2020-08-10 15:00:09 -04:00
// This file is Copyright its original authors, visible in version control
// history.
//
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// You may not use this file except in accordance with one or both of these
// licenses.
2018-09-19 17:39:43 -04:00
//! The top-level channel management and payment tracking stuff lives here.
2018-09-20 12:57:47 -04:00
//!
2018-09-19 17:39:43 -04:00
//! The ChannelManager is the main chunk of logic implementing the lightning protocol and is
//! responsible for tracking which channels are open, HTLCs are in flight and reestablishing those
//! upon reconnect to the relevant peer(s).
2018-09-20 12:57:47 -04:00
//!
2020-05-06 19:04:44 -04:00
//! It does not manage routing logic (see routing::router::get_route for that) nor does it manage constructing
2018-09-19 17:39:43 -04:00
//! on-chain transactions (it only monitors the chain to watch for any force-closes that might
//! imply it needs to fail HTLCs/payments/channels it manages).
2020-10-06 16:49:06 -07:00
//!
2018-09-19 17:39:43 -04:00
2018-06-29 17:23:50 -04:00
use bitcoin ::blockdata ::block ::BlockHeader ;
2017-12-25 01:05:27 -05:00
use bitcoin ::blockdata ::constants ::genesis_block ;
use bitcoin ::network ::constants ::Network ;
2020-04-27 16:41:54 +02:00
use bitcoin ::hashes ::{ Hash , HashEngine } ;
use bitcoin ::hashes ::hmac ::{ Hmac , HmacEngine } ;
use bitcoin ::hashes ::sha256 ::Hash as Sha256 ;
use bitcoin ::hashes ::sha256d ::Hash as Sha256dHash ;
use bitcoin ::hashes ::cmp ::fixed_time_eq ;
2020-04-27 17:53:13 +02:00
use bitcoin ::hash_types ::BlockHash ;
2018-12-17 23:58:02 -05:00
2020-04-27 16:51:59 +02:00
use bitcoin ::secp256k1 ::key ::{ SecretKey , PublicKey } ;
use bitcoin ::secp256k1 ::Secp256k1 ;
use bitcoin ::secp256k1 ::ecdh ::SharedSecret ;
use bitcoin ::secp256k1 ;
2017-12-25 01:05:27 -05:00
2020-07-20 17:03:52 -07:00
use chain ;
use chain ::Watch ;
2020-07-29 13:02:29 -07:00
use chain ::chaininterface ::{ BroadcasterInterface , FeeEstimator } ;
2020-10-23 11:55:58 -04:00
use chain ::channelmonitor ::{ ChannelMonitor , ChannelMonitorUpdate , ChannelMonitorUpdateStep , ChannelMonitorUpdateErr , HTLC_FAIL_BACK_BUFFER , CLTV_CLAIM_BUFFER , LATENCY_GRACE_PERIOD_BLOCKS , ANTI_REORG_DELAY , MonitorEvent , CLOSED_CHANNEL_UPDATE_ID } ;
2020-09-09 12:16:09 -07:00
use chain ::transaction ::{ OutPoint , TransactionData } ;
2018-10-26 11:15:55 -04:00
use ln ::channel ::{ Channel , ChannelError } ;
2020-01-02 20:32:37 -05:00
use ln ::features ::{ InitFeatures , NodeFeatures } ;
2020-05-02 09:37:38 -04:00
use routing ::router ::{ Route , RouteHop } ;
2017-12-25 01:05:27 -05:00
use ln ::msgs ;
2020-05-30 23:20:17 -04:00
use ln ::msgs ::NetAddress ;
2018-12-19 17:02:27 -05:00
use ln ::onion_utils ;
2020-06-28 14:43:10 +03:00
use ln ::msgs ::{ ChannelMessageHandler , DecodeError , LightningError , OptionalField } ;
2020-02-26 16:00:26 -05:00
use chain ::keysinterface ::{ ChannelKeys , KeysInterface , KeysManager , InMemoryChannelKeys } ;
2018-10-31 14:51:39 -04:00
use util ::config ::UserConfig ;
2020-05-30 23:20:17 -04:00
use util ::events ::{ Event , EventsProvider , MessageSendEvent , MessageSendEventsProvider } ;
2019-07-18 22:21:00 -04:00
use util ::{ byte_utils , events } ;
2020-05-06 21:02:44 -04:00
use util ::ser ::{ Readable , ReadableArgs , MaybeReadable , Writeable , Writer } ;
2019-12-27 17:38:15 -05:00
use util ::chacha20 ::{ ChaCha20 , ChaChaReader } ;
2018-08-20 12:56:17 -04:00
use util ::logger ::Logger ;
2018-08-15 00:59:42 +09:00
use util ::errors ::APIError ;
2017-12-25 01:05:27 -05:00
2018-12-19 17:02:27 -05:00
use std ::{ cmp , mem } ;
2018-10-26 14:35:50 -04:00
use std ::collections ::{ HashMap , hash_map , HashSet } ;
2019-12-27 17:38:15 -05:00
use std ::io ::{ Cursor , Read } ;
2020-11-19 12:53:16 -05:00
use std ::sync ::{ Arc , Condvar , Mutex , MutexGuard , RwLock , RwLockReadGuard } ;
2018-07-23 19:45:59 -04:00
use std ::sync ::atomic ::{ AtomicUsize , Ordering } ;
2019-07-18 18:13:28 -04:00
use std ::time ::Duration ;
2020-11-19 12:53:16 -05:00
#[ cfg(any(test, feature = " allow_wallclock_use " )) ]
use std ::time ::Instant ;
2020-01-16 13:26:38 -05:00
use std ::marker ::{ Sync , Send } ;
use std ::ops ::Deref ;
2020-07-13 13:16:32 +09:00
use bitcoin ::hashes ::hex ::ToHex ;
2017-12-25 01:05:27 -05:00
2018-12-19 16:36:26 -05:00
// We hold various information about HTLC relay in the HTLC objects in Channel itself:
//
// Upon receipt of an HTLC from a peer, we'll give it a PendingHTLCStatus indicating if it should
// forward the HTLC with information it will give back to us when it does so, or if it should Fail
// the HTLC with the relevant message for the Channel to handle giving to the remote peer.
//
2020-01-01 15:56:03 -05:00
// Once said HTLC is committed in the Channel, if the PendingHTLCStatus indicated Forward, the
// Channel will return the PendingHTLCInfo back to us, and we will create an HTLCForwardInfo
// with it to track where it came from (in case of onwards-forward error), waiting a random delay
// before we forward it.
//
// We will then use HTLCForwardInfo's PendingHTLCInfo to construct an outbound HTLC, with a
// relevant HTLCSource::PreviousHopData filled in to indicate where it came from (which we can use
// to either fail-backwards or fulfill the HTLC backwards along the relevant path).
2018-12-19 16:36:26 -05:00
// Alternatively, we can fill an outbound HTLC with a HTLCSource::OutboundRoute indicating this is
// our payment, which we can use to decode errors or inform the user that the payment was sent.
2020-01-01 15:56:03 -05:00
2020-01-01 17:39:51 -05:00
#[ derive(Clone) ] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
enum PendingHTLCRouting {
Forward {
onion_packet : msgs ::OnionPacket ,
short_channel_id : u64 , // This should be NonZero<u64> eventually when we bump MSRV
} ,
2020-01-01 20:20:42 -05:00
Receive {
payment_data : Option < msgs ::FinalOnionHopData > ,
2020-01-09 14:09:25 -05:00
incoming_cltv_expiry : u32 , // Used to track when we should expire pending HTLCs that go unclaimed
2020-01-01 20:20:42 -05:00
} ,
2020-01-01 17:39:51 -05:00
}
2018-12-19 16:36:26 -05:00
#[ derive(Clone) ] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
2020-01-01 15:56:03 -05:00
pub ( super ) struct PendingHTLCInfo {
2020-01-01 17:39:51 -05:00
routing : PendingHTLCRouting ,
2018-12-19 16:36:26 -05:00
incoming_shared_secret : [ u8 ; 32 ] ,
payment_hash : PaymentHash ,
2018-12-19 17:14:15 -05:00
pub ( super ) amt_to_forward : u64 ,
pub ( super ) outgoing_cltv_value : u32 ,
2018-12-19 16:36:26 -05:00
}
2018-03-27 11:18:10 -04:00
2018-12-19 16:36:26 -05:00
#[ derive(Clone) ] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
pub ( super ) enum HTLCFailureMsg {
Relay ( msgs ::UpdateFailHTLC ) ,
Malformed ( msgs ::UpdateFailMalformedHTLC ) ,
}
2018-08-26 16:34:47 -04:00
2018-12-19 16:36:26 -05:00
/// Stores whether we can't forward an HTLC or relevant forwarding info
#[ derive(Clone) ] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
pub ( super ) enum PendingHTLCStatus {
2020-01-01 15:56:03 -05:00
Forward ( PendingHTLCInfo ) ,
2018-12-19 16:36:26 -05:00
Fail ( HTLCFailureMsg ) ,
}
2018-08-21 16:57:41 -04:00
2020-01-01 15:56:03 -05:00
pub ( super ) enum HTLCForwardInfo {
AddHTLC {
2020-10-22 13:32:50 -04:00
forward_info : PendingHTLCInfo ,
// These fields are produced in `forward_htlcs()` and consumed in
// `process_pending_htlc_forwards()` for constructing the
// `HTLCSource::PreviousHopData` for failed and forwarded
// HTLCs.
2020-01-01 15:56:03 -05:00
prev_short_channel_id : u64 ,
prev_htlc_id : u64 ,
2020-10-22 13:32:50 -04:00
prev_funding_outpoint : OutPoint ,
2020-01-01 15:56:03 -05:00
} ,
FailHTLC {
htlc_id : u64 ,
err_packet : msgs ::OnionErrorPacket ,
} ,
}
2018-12-19 16:36:26 -05:00
/// Tracks the inbound corresponding to an outbound HTLC
#[ derive(Clone, PartialEq) ]
2020-08-07 10:58:15 -07:00
pub ( crate ) struct HTLCPreviousHopData {
2018-12-19 16:36:26 -05:00
short_channel_id : u64 ,
htlc_id : u64 ,
incoming_packet_shared_secret : [ u8 ; 32 ] ,
2020-10-22 13:32:50 -04:00
// This field is consumed by `claim_funds_from_hop()` when updating a force-closed backwards
// channel with a preimage provided by the forward channel.
outpoint : OutPoint ,
2018-12-19 16:36:26 -05:00
}
2018-09-11 14:20:40 -04:00
2020-01-01 20:20:42 -05:00
struct ClaimableHTLC {
prev_hop : HTLCPreviousHopData ,
value : u64 ,
/// Filled in when the HTLC was received with a payment_secret packet, which contains a
/// total_msat (which may differ from value if this is a Multi-Path Payment) and a
/// payment_secret which prevents path-probing attacks and can associate different HTLCs which
/// are part of the same payment.
payment_data : Option < msgs ::FinalOnionHopData > ,
2020-01-09 14:09:25 -05:00
cltv_expiry : u32 ,
2020-01-01 20:20:42 -05:00
}
2018-12-19 16:36:26 -05:00
/// Tracks the inbound corresponding to an outbound HTLC
#[ derive(Clone, PartialEq) ]
2020-08-07 10:58:15 -07:00
pub ( crate ) enum HTLCSource {
2018-12-19 16:36:26 -05:00
PreviousHopData ( HTLCPreviousHopData ) ,
OutboundRoute {
2020-01-03 19:31:40 -05:00
path : Vec < RouteHop > ,
2018-12-19 16:36:26 -05:00
session_priv : SecretKey ,
/// Technically we can recalculate this from the route, but we cache it here to avoid
/// doing a double-pass on route when we get a failure back
first_hop_htlc_msat : u64 ,
} ,
}
#[ cfg(test) ]
impl HTLCSource {
pub fn dummy ( ) -> Self {
HTLCSource ::OutboundRoute {
2020-01-03 19:31:40 -05:00
path : Vec ::new ( ) ,
2019-01-16 15:45:05 -05:00
session_priv : SecretKey ::from_slice ( & [ 1 ; 32 ] ) . unwrap ( ) ,
2018-12-19 16:36:26 -05:00
first_hop_htlc_msat : 0 ,
2018-09-11 14:20:40 -04:00
}
}
2018-12-19 16:36:26 -05:00
}
2018-09-11 14:20:40 -04:00
2018-12-19 16:36:26 -05:00
#[ derive(Clone) ] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
pub ( super ) enum HTLCFailReason {
2019-11-04 19:09:51 -05:00
LightningError {
2018-12-19 16:36:26 -05:00
err : msgs ::OnionErrorPacket ,
} ,
Reason {
failure_code : u16 ,
data : Vec < u8 > ,
2018-03-19 19:55:05 -04:00
}
}
2017-12-25 01:05:27 -05:00
2018-11-22 21:18:16 -05:00
/// payment_hash type, use to cross-lock hop
2020-05-12 13:48:07 -04:00
/// (C-not exported) as we just use [u8; 32] directly
2018-11-22 21:18:16 -05:00
#[ derive(Hash, Copy, Clone, PartialEq, Eq, Debug) ]
pub struct PaymentHash ( pub [ u8 ; 32 ] ) ;
/// payment_preimage type, use to route payment between hop
2020-05-12 13:48:07 -04:00
/// (C-not exported) as we just use [u8; 32] directly
2018-11-22 21:18:16 -05:00
#[ derive(Hash, Copy, Clone, PartialEq, Eq, Debug) ]
pub struct PaymentPreimage ( pub [ u8 ; 32 ] ) ;
2020-01-02 01:23:48 -05:00
/// payment_secret type, use to authenticate sender to the receiver and tie MPP HTLCs together
2020-05-12 13:48:07 -04:00
/// (C-not exported) as we just use [u8; 32] directly
2020-01-02 01:23:48 -05:00
#[ derive(Hash, Copy, Clone, PartialEq, Eq, Debug) ]
pub struct PaymentSecret ( pub [ u8 ; 32 ] ) ;
2018-11-22 21:18:16 -05:00
2020-03-18 16:30:05 -04:00
type ShutdownResult = ( Option < OutPoint > , ChannelMonitorUpdate , Vec < ( HTLCSource , PaymentHash ) > ) ;
2018-11-18 22:01:32 -05:00
2018-11-22 22:45:51 -05:00
/// Error type returned across the channel_state mutex boundary. When an Err is generated for a
/// Channel, we generally end up with a ChannelError::Close for which we have to close the channel
/// immediately (ie with no further calls on it made). Thus, this step happens inside a
/// channel_state lock. We then return the set of things that need to be done outside the lock in
/// this struct and call handle_error!() on it.
2018-11-22 21:18:16 -05:00
2018-09-04 20:16:06 -04:00
struct MsgHandleErrInternal {
2019-11-04 19:09:51 -05:00
err : msgs ::LightningError ,
2018-11-18 22:01:32 -05:00
shutdown_finish : Option < ( ShutdownResult , Option < msgs ::ChannelUpdate > ) > ,
2018-09-04 20:16:06 -04:00
}
impl MsgHandleErrInternal {
#[ inline ]
2020-07-13 13:16:32 +09:00
fn send_err_msg_no_close ( err : String , channel_id : [ u8 ; 32 ] ) -> Self {
2018-09-04 20:16:06 -04:00
Self {
2019-11-04 19:09:51 -05:00
err : LightningError {
2020-07-13 13:16:32 +09:00
err : err . clone ( ) ,
2019-11-04 19:54:43 -05:00
action : msgs ::ErrorAction ::SendErrorMessage {
2018-09-04 20:16:06 -04:00
msg : msgs ::ErrorMessage {
channel_id ,
2020-07-13 13:16:32 +09:00
data : err
2018-09-04 20:16:06 -04:00
} ,
2019-11-04 19:54:43 -05:00
} ,
2018-09-04 20:16:06 -04:00
} ,
2018-11-18 22:01:32 -05:00
shutdown_finish : None ,
2018-09-04 20:16:06 -04:00
}
}
#[ inline ]
2020-07-13 13:16:32 +09:00
fn ignore_no_close ( err : String ) -> Self {
2019-01-09 11:05:53 -05:00
Self {
2019-11-04 19:09:51 -05:00
err : LightningError {
2019-01-09 11:05:53 -05:00
err ,
2019-11-04 19:54:43 -05:00
action : msgs ::ErrorAction ::IgnoreError ,
2019-01-09 11:05:53 -05:00
} ,
shutdown_finish : None ,
}
}
#[ inline ]
2019-11-04 19:09:51 -05:00
fn from_no_close ( err : msgs ::LightningError ) -> Self {
2018-11-22 22:45:51 -05:00
Self { err , shutdown_finish : None }
2018-11-18 22:01:32 -05:00
}
#[ inline ]
2020-07-13 13:16:32 +09:00
fn from_finish_shutdown ( err : String , channel_id : [ u8 ; 32 ] , shutdown_res : ShutdownResult , channel_update : Option < msgs ::ChannelUpdate > ) -> Self {
2018-09-04 20:07:29 -04:00
Self {
2019-11-04 19:09:51 -05:00
err : LightningError {
2020-07-13 13:16:32 +09:00
err : err . clone ( ) ,
2019-11-04 19:54:43 -05:00
action : msgs ::ErrorAction ::SendErrorMessage {
2018-09-04 20:07:29 -04:00
msg : msgs ::ErrorMessage {
channel_id ,
2020-07-13 13:16:32 +09:00
data : err
2018-09-04 20:07:29 -04:00
} ,
2019-11-04 19:54:43 -05:00
} ,
2018-09-04 20:07:29 -04:00
} ,
2018-11-18 22:01:32 -05:00
shutdown_finish : Some ( ( shutdown_res , channel_update ) ) ,
2018-09-04 20:07:29 -04:00
}
}
#[ inline ]
2020-02-08 17:22:58 -05:00
fn from_chan_no_close ( err : ChannelError , channel_id : [ u8 ; 32 ] ) -> Self {
2018-09-30 18:19:59 -04:00
Self {
err : match err {
2019-11-04 19:09:51 -05:00
ChannelError ::Ignore ( msg ) = > LightningError {
2018-09-30 18:19:59 -04:00
err : msg ,
2019-11-04 19:54:43 -05:00
action : msgs ::ErrorAction ::IgnoreError ,
2018-09-30 18:19:59 -04:00
} ,
2019-11-04 19:09:51 -05:00
ChannelError ::Close ( msg ) = > LightningError {
2020-07-13 13:16:32 +09:00
err : msg . clone ( ) ,
2019-11-04 19:54:43 -05:00
action : msgs ::ErrorAction ::SendErrorMessage {
2018-09-30 18:19:59 -04:00
msg : msgs ::ErrorMessage {
channel_id ,
2020-07-13 13:16:32 +09:00
data : msg
2018-09-30 18:19:59 -04:00
} ,
2019-11-04 19:54:43 -05:00
} ,
2018-09-30 18:19:59 -04:00
} ,
2020-05-02 22:00:08 -04:00
ChannelError ::CloseDelayBroadcast ( msg ) = > LightningError {
2020-07-13 13:16:32 +09:00
err : msg . clone ( ) ,
2019-11-04 19:54:43 -05:00
action : msgs ::ErrorAction ::SendErrorMessage {
2019-07-10 16:39:10 -04:00
msg : msgs ::ErrorMessage {
channel_id ,
2020-07-13 13:16:32 +09:00
data : msg
2019-07-10 16:39:10 -04:00
} ,
2019-11-04 19:54:43 -05:00
} ,
2019-07-10 16:39:10 -04:00
} ,
2018-09-30 18:19:59 -04:00
} ,
2018-11-18 22:01:32 -05:00
shutdown_finish : None ,
2018-09-30 18:19:59 -04:00
}
}
2018-09-04 20:16:06 -04:00
}
2019-07-18 22:21:00 -04:00
/// We hold back HTLCs we intend to relay for a random interval greater than this (see
/// Event::PendingHTLCsForwardable for the API guidelines indicating how long should be waited).
/// This provides some limited amount of privacy. Ideally this would range from somewhere like one
/// second to 30 seconds, but people expect lightning to be, you know, kinda fast, sadly.
const MIN_HTLC_RELAY_HOLDING_CELL_MILLIS : u64 = 100 ;
2017-12-25 01:05:27 -05:00
2018-10-20 17:18:53 -04:00
/// For events which result in both a RevokeAndACK and a CommitmentUpdate, by default they should
/// be sent in the order they appear in the return value, however sometimes the order needs to be
/// variable at runtime (eg Channel::channel_reestablish needs to re-send messages in the order
/// they were originally sent). In those cases, this enum is also returned.
#[ derive(Clone, PartialEq) ]
pub ( super ) enum RAACommitmentOrder {
/// Send the CommitmentUpdate messages first
CommitmentFirst ,
/// Send the RevokeAndACK message first
RevokeAndACKFirst ,
}
2018-12-19 17:14:15 -05:00
// Note this is only exposed in cfg(test):
2019-11-26 16:46:33 -05:00
pub ( super ) struct ChannelHolder < ChanSigner : ChannelKeys > {
pub ( super ) by_id : HashMap < [ u8 ; 32 ] , Channel < ChanSigner > > ,
2018-12-19 17:14:15 -05:00
pub ( super ) short_to_id : HashMap < u64 , [ u8 ; 32 ] > ,
2017-12-25 01:05:27 -05:00
/// short channel id -> forward infos. Key of 0 means payments received
2018-07-28 18:32:43 -04:00
/// Note that while this is held in the same mutex as the channels themselves, no consistency
2019-01-24 16:41:51 +02:00
/// guarantees are made about the existence of a channel with the short id here, nor the short
2020-01-01 15:56:03 -05:00
/// ids in the PendingHTLCInfo!
2018-12-19 17:14:15 -05:00
pub ( super ) forward_htlcs : HashMap < u64 , Vec < HTLCForwardInfo > > ,
2020-01-02 01:23:48 -05:00
/// (payment_hash, payment_secret) -> Vec<HTLCs> for tracking HTLCs that
/// were to us and can be failed/claimed by the user
2018-07-28 18:32:43 -04:00
/// Note that while this is held in the same mutex as the channels themselves, no consistency
/// guarantees are made about the channels given here actually existing anymore by the time you
/// go to read them!
2020-01-02 01:23:48 -05:00
claimable_htlcs : HashMap < ( PaymentHash , Option < PaymentSecret > ) , Vec < ClaimableHTLC > > ,
2018-10-19 16:25:32 -04:00
/// Messages to send to peers - pushed to in the same lock that they are generated in (except
/// for broadcast messages, where ordering isn't as strict).
2020-05-30 23:20:17 -04:00
pub ( super ) pending_msg_events : Vec < MessageSendEvent > ,
2018-03-20 19:11:27 -04:00
}
2017-12-25 01:05:27 -05:00
2019-12-29 14:22:43 -05:00
/// State we hold per-peer. In the future we should put channels in here, but for now we only hold
/// the latest Init features we heard from the peer.
struct PeerState {
latest_features : InitFeatures ,
}
2018-07-28 17:39:10 -04:00
#[ cfg(not(any(target_pointer_width = " 32 " , target_pointer_width = " 64 " ))) ]
const ERR : ( ) = " You need at least 32 bit pointers (well, usize, but we'll assume they're the same) for ChannelManager::latest_block_height " ;
2020-01-16 13:26:38 -05:00
/// SimpleArcChannelManager is useful when you need a ChannelManager with a static lifetime, e.g.
/// when you're using lightning-net-tokio (since tokio::spawn requires parameters with static
/// lifetimes). Other times you can afford a reference, which is more efficient, in which case
/// SimpleRefChannelManager is the more appropriate type. Defining these type aliases prevents
2020-02-26 16:00:26 -05:00
/// issues such as overly long function definitions. Note that the ChannelManager can take any
/// type that implements KeysInterface for its keys manager, but this type alias chooses the
/// concrete type of the KeysManager.
2020-03-02 12:55:53 -05:00
pub type SimpleArcChannelManager < M , T , F , L > = Arc < ChannelManager < InMemoryChannelKeys , Arc < M > , Arc < T > , Arc < KeysManager > , Arc < F > , Arc < L > > > ;
2020-01-16 13:26:38 -05:00
/// SimpleRefChannelManager is a type alias for a ChannelManager reference, and is the reference
/// counterpart to the SimpleArcChannelManager type alias. Use this type by default when you don't
/// need a ChannelManager with a static lifetime. You'll need a static lifetime in cases such as
/// usage of lightning-net-tokio (since tokio::spawn requires parameters with static lifetimes).
/// But if this is not necessary, using a reference is more efficient. Defining these type aliases
2020-02-26 16:00:26 -05:00
/// helps with issues such as long function definitions. Note that the ChannelManager can take any
/// type that implements KeysInterface for its keys manager, but this type alias chooses the
/// concrete type of the KeysManager.
2020-03-02 12:55:53 -05:00
pub type SimpleRefChannelManager < ' a , ' b , ' c , ' d , ' e , M , T , F , L > = ChannelManager < InMemoryChannelKeys , & ' a M , & ' b T , & ' c KeysManager , & ' d F , & ' e L > ;
2020-01-16 13:26:38 -05:00
2017-12-25 01:05:27 -05:00
/// Manager which keeps track of a number of channels and sends messages to the appropriate
/// channel, also tracking HTLC preimages and forwarding onion packets appropriately.
2018-09-20 12:57:47 -04:00
///
2017-12-25 01:05:27 -05:00
/// Implements ChannelMessageHandler, handling the multi-channel parts and passing things through
/// to individual Channels.
2018-10-26 14:35:50 -04:00
///
/// Implements Writeable to write out all channel state to disk. Implies peer_disconnected() for
/// all peers during write/read (though does not modify this instance, only the instance being
/// serialized). This will result in any channels which have not yet exchanged funding_created (ie
/// called funding_transaction_generated for outbound channels).
///
/// Note that you can be a bit lazier about writing out ChannelManager than you can be with
/// ChannelMonitors. With ChannelMonitors you MUST write each monitor update out to disk before
2020-07-20 17:03:52 -07:00
/// returning from chain::Watch::watch_/update_channel, with ChannelManagers, writing updates
2018-10-26 14:35:50 -04:00
/// happens out-of-band (and will prevent any other ChannelManager operations from occurring during
/// the serialization process). If the deserialized version is out-of-date compared to the
/// ChannelMonitors passed by reference to read(), those channels will be force-closed based on the
/// ChannelMonitor state and no funds will be lost (mod on-chain transaction fees).
///
/// Note that the deserializer is only implemented for (Sha256dHash, ChannelManager), which
/// tells you the last block hash which was block_connect()ed. You MUST rescan any blocks along
/// the "reorg path" (ie call block_disconnected() until you get to a common block and then call
/// block_connected() to step towards your best block) upon deserialization before using the
/// object!
2019-11-18 00:43:13 -05:00
///
2019-11-29 20:38:03 -05:00
/// Note that ChannelManager is responsible for tracking liveness of its channels and generating
/// ChannelUpdate messages informing peers that the channel is temporarily disabled. To avoid
/// spam due to quick disconnection/reconnection, updates are not sent until the channel has been
/// offline for a full minute. In order to track this, you must call
2020-01-16 13:26:38 -05:00
/// timer_chan_freshness_every_min roughly once per minute, though it doesn't have to be perfect.
///
/// Rather than using a plain ChannelManager, it is preferable to use either a SimpleArcChannelManager
/// a SimpleRefChannelManager, for conciseness. See their documentation for more details, but
/// essentially you should default to using a SimpleRefChannelManager, and use a
/// SimpleArcChannelManager when you require a ChannelManager with a static lifetime, such as when
/// you're using lightning-net-tokio.
2020-03-02 12:55:53 -05:00
pub struct ChannelManager < ChanSigner : ChannelKeys , M : Deref , T : Deref , K : Deref , F : Deref , L : Deref >
2020-07-20 17:03:52 -07:00
where M ::Target : chain ::Watch < Keys = ChanSigner > ,
2020-02-20 14:14:12 -05:00
T ::Target : BroadcasterInterface ,
2020-02-26 16:00:26 -05:00
K ::Target : KeysInterface < ChanKeySigner = ChanSigner > ,
2020-02-27 11:33:03 -05:00
F ::Target : FeeEstimator ,
2020-03-02 12:55:53 -05:00
L ::Target : Logger ,
2020-02-20 14:14:12 -05:00
{
2018-10-31 14:51:39 -04:00
default_configuration : UserConfig ,
2020-04-27 17:53:13 +02:00
genesis_hash : BlockHash ,
2020-02-27 11:33:03 -05:00
fee_estimator : F ,
2020-07-20 17:03:52 -07:00
chain_monitor : M ,
2020-02-20 14:14:12 -05:00
tx_broadcaster : T ,
2017-12-25 01:05:27 -05:00
2018-12-19 17:14:15 -05:00
#[ cfg(test) ]
pub ( super ) latest_block_height : AtomicUsize ,
#[ cfg(not(test)) ]
2018-07-28 17:39:10 -04:00
latest_block_height : AtomicUsize ,
2020-04-27 17:53:13 +02:00
last_block_hash : Mutex < BlockHash > ,
2018-08-20 17:13:07 -04:00
secp_ctx : Secp256k1 < secp256k1 ::All > ,
2017-12-25 01:05:27 -05:00
2020-09-16 17:27:13 -04:00
#[ cfg(any(test, feature = " _test_utils " )) ]
2019-11-26 16:46:33 -05:00
pub ( super ) channel_state : Mutex < ChannelHolder < ChanSigner > > ,
2020-09-16 17:27:13 -04:00
#[ cfg(not(any(test, feature = " _test_utils " ))) ]
2019-11-26 16:46:33 -05:00
channel_state : Mutex < ChannelHolder < ChanSigner > > ,
2017-12-25 01:05:27 -05:00
our_network_key : SecretKey ,
2020-01-02 20:32:37 -05:00
/// Used to track the last value sent in a node_announcement "timestamp" field. We ensure this
/// value increases strictly since we don't assume access to a time source.
last_node_announcement_serial : AtomicUsize ,
2019-12-29 14:22:43 -05:00
/// The bulk of our storage will eventually be here (channels and message queues and the like).
/// If we are connected to a peer we always at least have an entry here, even if no channels
/// are currently open with that peer.
/// Because adding or removing an entry is rare, we usually take an outer read lock and then
/// operate on the inner value freely. Sadly, this prevents parallel operation when opening a
/// new channel.
per_peer_state : RwLock < HashMap < PublicKey , Mutex < PeerState > > > ,
2017-12-25 01:05:27 -05:00
pending_events : Mutex < Vec < events ::Event > > ,
2018-10-20 18:46:03 -04:00
/// Used when we have to take a BIG lock to make sure everything is self-consistent.
/// Essentially just when we're serializing ourselves out.
/// Taken first everywhere where we are making changes before any other locks.
2020-11-19 12:53:16 -05:00
/// When acquiring this lock in read mode, rather than acquiring it directly, call
/// `PersistenceNotifierGuard::new(..)` and pass the lock to it, to ensure the PersistenceNotifier
/// the lock contains sends out a notification when the lock is released.
2018-10-20 18:46:03 -04:00
total_consistency_lock : RwLock < ( ) > ,
2018-07-25 02:34:51 +00:00
2020-11-19 12:53:16 -05:00
persistence_notifier : PersistenceNotifier ,
2020-02-26 16:00:26 -05:00
keys_manager : K ,
2018-10-26 11:40:01 -04:00
2020-03-02 12:55:53 -05:00
logger : L ,
2017-12-25 01:05:27 -05:00
}
2020-11-19 12:53:16 -05:00
/// Whenever we release the `ChannelManager`'s `total_consistency_lock`, from read mode, it is
/// desirable to notify any listeners on `wait_timeout`/`wait` that new updates are available for
/// persistence. Therefore, this struct is responsible for locking the total consistency lock and,
/// upon going out of scope, sending the aforementioned notification (since the lock being released
/// indicates that the updates are ready for persistence).
struct PersistenceNotifierGuard < ' a > {
persistence_notifier : & ' a PersistenceNotifier ,
// We hold onto this result so the lock doesn't get released immediately.
_read_guard : RwLockReadGuard < ' a , ( ) > ,
}
impl < ' a > PersistenceNotifierGuard < ' a > {
fn new ( lock : & ' a RwLock < ( ) > , notifier : & ' a PersistenceNotifier ) -> Self {
let read_guard = lock . read ( ) . unwrap ( ) ;
Self {
persistence_notifier : notifier ,
_read_guard : read_guard ,
}
}
}
impl < ' a > Drop for PersistenceNotifierGuard < ' a > {
fn drop ( & mut self ) {
self . persistence_notifier . notify ( ) ;
}
}
2019-07-19 19:36:23 -04:00
/// The amount of time we require our counterparty wait to claim their money (ie time between when
/// we, or our watchtower, must check for them having broadcast a theft transaction).
pub ( crate ) const BREAKDOWN_TIMEOUT : u16 = 6 * 24 ;
/// The amount of time we're willing to wait to claim money back to us
pub ( crate ) const MAX_LOCAL_BREAKDOWN_TIMEOUT : u16 = 6 * 24 * 7 ;
2018-10-16 11:40:21 -04:00
/// The minimum number of blocks between an inbound HTLC's CLTV and the corresponding outbound
/// HTLC's CLTV. This should always be a few blocks greater than channelmonitor::CLTV_CLAIM_BUFFER,
/// ie the node we forwarded the payment on to should always have enough room to reliably time out
/// the HTLC via a full update_fail_htlc/commitment_signed dance before we hit the
2019-01-24 16:41:51 +02:00
/// CLTV_CLAIM_BUFFER point (we static assert that it's at least 3 blocks more).
2018-12-12 15:25:57 -05:00
const CLTV_EXPIRY_DELTA : u16 = 6 * 12 ; //TODO?
2018-12-19 17:14:15 -05:00
pub ( super ) const CLTV_FAR_FAR_AWAY : u32 = 6 * 24 * 7 ; //TODO?
2017-12-25 01:05:27 -05:00
2019-07-18 18:50:03 -04:00
// Check that our CLTV_EXPIRY is at least CLTV_CLAIM_BUFFER + ANTI_REORG_DELAY + LATENCY_GRACE_PERIOD_BLOCKS,
// ie that if the next-hop peer fails the HTLC within
// LATENCY_GRACE_PERIOD_BLOCKS then we'll still have CLTV_CLAIM_BUFFER left to timeout it onchain,
// then waiting ANTI_REORG_DELAY to be reorg-safe on the outbound HLTC and
// failing the corresponding htlc backward, and us now seeing the last block of ANTI_REORG_DELAY before
// LATENCY_GRACE_PERIOD_BLOCKS.
2018-10-16 11:40:21 -04:00
#[ deny(const_err) ]
#[ allow(dead_code) ]
2019-07-18 18:50:03 -04:00
const CHECK_CLTV_EXPIRY_SANITY : u32 = CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - CLTV_CLAIM_BUFFER - ANTI_REORG_DELAY - LATENCY_GRACE_PERIOD_BLOCKS ;
2018-10-16 11:40:21 -04:00
// Check for ability of an attacker to make us fail on-chain by delaying inbound claim. See
// ChannelMontior::would_broadcast_at_height for a description of why this is needed.
#[ deny(const_err) ]
#[ allow(dead_code) ]
2019-07-18 18:50:03 -04:00
const CHECK_CLTV_EXPIRY_SANITY_2 : u32 = CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2 * CLTV_CLAIM_BUFFER ;
2018-10-16 11:40:21 -04:00
2018-09-19 17:39:43 -04:00
/// Details of a channel, as returned by ChannelManager::list_channels and ChannelManager::list_usable_channels
2020-10-20 14:01:22 -04:00
#[ derive(Clone) ]
2018-03-27 11:16:53 -04:00
pub struct ChannelDetails {
/// The channel's ID (prior to funding transaction generation, this is a random 32 bytes,
/// thereafter this is the txid of the funding transaction xor the funding transaction output).
/// Note that this means this value is *not* persistent - it can change once during the
/// lifetime of the channel.
2018-07-22 18:19:28 -04:00
pub channel_id : [ u8 ; 32 ] ,
2018-03-27 11:16:53 -04:00
/// The position of the funding transaction in the chain. None if the funding transaction has
/// not yet been confirmed and the channel fully opened.
pub short_channel_id : Option < u64 > ,
2018-09-19 17:39:43 -04:00
/// The node_id of our counterparty
2018-03-27 11:16:53 -04:00
pub remote_network_id : PublicKey ,
2019-12-28 01:10:14 -05:00
/// The Features the channel counterparty provided upon last connection.
/// Useful for routing as it is the most up-to-date copy of the counterparty's features and
/// many routing-relevant features are present in the init context.
pub counterparty_features : InitFeatures ,
2018-09-19 17:39:43 -04:00
/// The value, in satoshis, of this channel as appears in the funding output
2018-03-27 11:16:53 -04:00
pub channel_value_satoshis : u64 ,
/// The user_id passed in to create_channel, or 0 if the channel was inbound.
pub user_id : u64 ,
2019-06-01 12:11:27 -04:00
/// The available outbound capacity for sending HTLCs to the remote peer. This does not include
/// any pending HTLCs which are not yet fully resolved (and, thus, who's balance is not
/// available for inclusion in new outbound HTLCs). This further does not include any pending
/// outgoing HTLCs which are awaiting some other resolution to be sent.
pub outbound_capacity_msat : u64 ,
/// The available inbound capacity for the remote peer to send HTLCs to us. This does not
/// include any pending HTLCs which are not yet fully resolved (and, thus, who's balance is not
/// available for inclusion in new inbound HTLCs).
/// Note that there are some corner cases not fully handled here, so the actual available
/// inbound capacity may be slightly higher than this.
pub inbound_capacity_msat : u64 ,
/// True if the channel is (a) confirmed and funding_locked messages have been exchanged, (b)
/// the peer is connected, and (c) no monitor update failure is pending resolution.
pub is_live : bool ,
2018-03-27 11:16:53 -04:00
}
2020-01-06 20:29:33 -05:00
/// If a payment fails to send, it can be in one of several states. This enum is returned as the
/// Err() type describing which state the payment is in, see the description of individual enum
/// states for more.
2021-02-10 22:25:42 -05:00
#[ derive(Clone, Debug) ]
2020-01-06 20:29:33 -05:00
pub enum PaymentSendFailure {
/// A parameter which was passed to send_payment was invalid, preventing us from attempting to
/// send the payment at all. No channel state has been changed or messages sent to peers, and
/// once you've changed the parameter at error, you can freely retry the payment in full.
ParameterError ( APIError ) ,
/// A parameter in a single path which was passed to send_payment was invalid, preventing us
/// from attempting to send the payment at all. No channel state has been changed or messages
/// sent to peers, and once you've changed the parameter at error, you can freely retry the
/// payment in full.
///
/// The results here are ordered the same as the paths in the route object which was passed to
/// send_payment.
PathParameterError ( Vec < Result < ( ) , APIError > > ) ,
/// All paths which were attempted failed to send, with no channel state change taking place.
/// You can freely retry the payment in full (though you probably want to do so over different
/// paths than the ones selected).
AllFailedRetrySafe ( Vec < APIError > ) ,
/// Some paths which were attempted failed to send, though possibly not all. At least some
/// paths have irrevocably committed to the HTLC and retrying the payment in full would result
/// in over-/re-payment.
///
/// The results here are ordered the same as the paths in the route object which was passed to
/// send_payment, and any Errs which are not APIError::MonitorUpdateFailed can be safely
/// retried (though there is currently no API with which to do so).
///
/// Any entries which contain Err(APIError::MonitorUpdateFailed) or Ok(()) MUST NOT be retried
/// as they will result in over-/re-payment. These HTLCs all either successfully sent (in the
/// case of Ok(())) or will send once channel_monitor_updated is called on the next-hop channel
/// with the latest update_id.
PartialFailure ( Vec < Result < ( ) , APIError > > ) ,
}
2018-10-29 20:38:29 -04:00
macro_rules ! handle_error {
2020-06-08 20:47:55 -04:00
( $self : ident , $internal : expr , $counterparty_node_id : expr ) = > {
2018-10-29 20:38:29 -04:00
match $internal {
Ok ( msg ) = > Ok ( msg ) ,
2018-11-22 22:45:51 -05:00
Err ( MsgHandleErrInternal { err , shutdown_finish } ) = > {
2020-01-13 16:10:30 -05:00
#[ cfg(debug_assertions) ]
{
// In testing, ensure there are no deadlocks where the lock is already held upon
// entering the macro.
assert! ( $self . channel_state . try_lock ( ) . is_ok ( ) ) ;
}
let mut msg_events = Vec ::with_capacity ( 2 ) ;
2018-11-18 22:01:32 -05:00
if let Some ( ( shutdown_res , update_option ) ) = shutdown_finish {
$self . finish_force_close_channel ( shutdown_res ) ;
if let Some ( update ) = update_option {
2020-01-13 16:10:30 -05:00
msg_events . push ( events ::MessageSendEvent ::BroadcastChannelUpdate {
2018-11-18 22:01:32 -05:00
msg : update
} ) ;
2018-10-29 20:38:29 -04:00
}
}
2020-01-13 16:10:30 -05:00
2020-03-02 12:55:53 -05:00
log_error! ( $self . logger , " {} " , err . err ) ;
2019-11-05 18:51:05 -05:00
if let msgs ::ErrorAction ::IgnoreError = err . action {
2020-01-13 16:10:30 -05:00
} else {
msg_events . push ( events ::MessageSendEvent ::HandleError {
2020-06-08 20:47:55 -04:00
node_id : $counterparty_node_id ,
2020-01-13 16:10:30 -05:00
action : err . action . clone ( )
} ) ;
}
if ! msg_events . is_empty ( ) {
$self . channel_state . lock ( ) . unwrap ( ) . pending_msg_events . append ( & mut msg_events ) ;
}
2019-11-05 18:51:05 -05:00
// Return error in case higher-API need one
2018-10-29 20:38:29 -04:00
Err ( err )
} ,
}
}
}
2018-11-22 18:48:28 -05:00
macro_rules ! break_chan_entry {
( $self : ident , $res : expr , $channel_state : expr , $entry : expr ) = > {
match $res {
Ok ( res ) = > res ,
Err ( ChannelError ::Ignore ( msg ) ) = > {
2020-02-08 17:22:58 -05:00
break Err ( MsgHandleErrInternal ::from_chan_no_close ( ChannelError ::Ignore ( msg ) , $entry . key ( ) . clone ( ) ) )
2018-11-22 18:48:28 -05:00
} ,
Err ( ChannelError ::Close ( msg ) ) = > {
2020-03-02 12:55:53 -05:00
log_trace! ( $self . logger , " Closing channel {} due to Close-required error: {} " , log_bytes! ( $entry . key ( ) [ .. ] ) , msg ) ;
2018-11-22 18:48:28 -05:00
let ( channel_id , mut chan ) = $entry . remove_entry ( ) ;
if let Some ( short_id ) = chan . get_short_channel_id ( ) {
$channel_state . short_to_id . remove ( & short_id ) ;
}
2020-05-02 22:00:08 -04:00
break Err ( MsgHandleErrInternal ::from_finish_shutdown ( msg , channel_id , chan . force_shutdown ( true ) , $self . get_channel_update ( & chan ) . ok ( ) ) )
} ,
Err ( ChannelError ::CloseDelayBroadcast ( _ ) ) = > { panic! ( " Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here " ) ; }
2018-11-22 18:48:28 -05:00
}
}
}
2018-11-18 22:01:32 -05:00
macro_rules ! try_chan_entry {
( $self : ident , $res : expr , $channel_state : expr , $entry : expr ) = > {
match $res {
Ok ( res ) = > res ,
Err ( ChannelError ::Ignore ( msg ) ) = > {
2020-02-08 17:22:58 -05:00
return Err ( MsgHandleErrInternal ::from_chan_no_close ( ChannelError ::Ignore ( msg ) , $entry . key ( ) . clone ( ) ) )
2018-11-18 22:01:32 -05:00
} ,
Err ( ChannelError ::Close ( msg ) ) = > {
2020-03-02 12:55:53 -05:00
log_trace! ( $self . logger , " Closing channel {} due to Close-required error: {} " , log_bytes! ( $entry . key ( ) [ .. ] ) , msg ) ;
2018-11-18 22:01:32 -05:00
let ( channel_id , mut chan ) = $entry . remove_entry ( ) ;
if let Some ( short_id ) = chan . get_short_channel_id ( ) {
$channel_state . short_to_id . remove ( & short_id ) ;
}
2020-03-18 16:30:05 -04:00
return Err ( MsgHandleErrInternal ::from_finish_shutdown ( msg , channel_id , chan . force_shutdown ( true ) , $self . get_channel_update ( & chan ) . ok ( ) ) )
2018-11-18 22:01:32 -05:00
} ,
2020-05-02 22:00:08 -04:00
Err ( ChannelError ::CloseDelayBroadcast ( msg ) ) = > {
2020-03-02 12:55:53 -05:00
log_error! ( $self . logger , " Channel {} need to be shutdown but closing transactions not broadcast due to {} " , log_bytes! ( $entry . key ( ) [ .. ] ) , msg ) ;
2019-07-10 16:39:10 -04:00
let ( channel_id , mut chan ) = $entry . remove_entry ( ) ;
if let Some ( short_id ) = chan . get_short_channel_id ( ) {
$channel_state . short_to_id . remove ( & short_id ) ;
}
2020-03-18 16:30:05 -04:00
let shutdown_res = chan . force_shutdown ( false ) ;
2019-07-10 16:39:10 -04:00
return Err ( MsgHandleErrInternal ::from_finish_shutdown ( msg , channel_id , shutdown_res , $self . get_channel_update ( & chan ) . ok ( ) ) )
}
2018-11-18 22:01:32 -05:00
}
}
}
2019-01-14 20:35:56 -05:00
macro_rules ! handle_monitor_err {
2019-01-07 23:10:51 -05:00
( $self : ident , $err : expr , $channel_state : expr , $entry : expr , $action_type : path , $resend_raa : expr , $resend_commitment : expr ) = > {
2019-01-14 20:35:56 -05:00
handle_monitor_err! ( $self , $err , $channel_state , $entry , $action_type , $resend_raa , $resend_commitment , Vec ::new ( ) , Vec ::new ( ) )
2018-11-26 21:54:14 -05:00
} ;
2019-01-07 23:10:51 -05:00
( $self : ident , $err : expr , $channel_state : expr , $entry : expr , $action_type : path , $resend_raa : expr , $resend_commitment : expr , $failed_forwards : expr , $failed_fails : expr ) = > {
2018-11-26 21:54:14 -05:00
match $err {
ChannelMonitorUpdateErr ::PermanentFailure = > {
2020-03-02 12:55:53 -05:00
log_error! ( $self . logger , " Closing channel {} due to monitor update PermanentFailure " , log_bytes! ( $entry . key ( ) [ .. ] ) ) ;
2018-11-26 21:54:14 -05:00
let ( channel_id , mut chan ) = $entry . remove_entry ( ) ;
if let Some ( short_id ) = chan . get_short_channel_id ( ) {
$channel_state . short_to_id . remove ( & short_id ) ;
}
// TODO: $failed_fails is dropped here, which will cause other channels to hit the
// chain in a confused state! We need to move them into the ChannelMonitor which
// will be responsible for failing backwards once things confirm on-chain.
// It's ok that we drop $failed_forwards here - at this point we'd rather they
// broadcast HTLC-Timeout and pay the associated fees to get their funds back than
// us bother trying to claim it just to forward on to another peer. If we're
// splitting hairs we'd prefer to claim payments that were to us, but we haven't
// given up the preimage yet, so might as well just wait until the payment is
// retried, avoiding the on-chain fees.
2020-07-13 13:16:32 +09:00
let res : Result < ( ) , _ > = Err ( MsgHandleErrInternal ::from_finish_shutdown ( " ChannelMonitor storage failure " . to_owned ( ) , channel_id , chan . force_shutdown ( true ) , $self . get_channel_update ( & chan ) . ok ( ) ) ) ;
2019-01-14 20:35:56 -05:00
res
2018-11-26 21:54:14 -05:00
} ,
ChannelMonitorUpdateErr ::TemporaryFailure = > {
2020-03-02 12:55:53 -05:00
log_info! ( $self . logger , " Disabling channel {} due to monitor update TemporaryFailure. On restore will send {} and process {} forwards and {} fails " ,
2019-01-14 20:37:06 -05:00
log_bytes! ( $entry . key ( ) [ .. ] ) ,
if $resend_commitment & & $resend_raa {
match $action_type {
RAACommitmentOrder ::CommitmentFirst = > { " commitment then RAA " } ,
RAACommitmentOrder ::RevokeAndACKFirst = > { " RAA then commitment " } ,
}
} else if $resend_commitment { " commitment " }
else if $resend_raa { " RAA " }
else { " nothing " } ,
2020-01-01 15:56:03 -05:00
( & $failed_forwards as & Vec < ( PendingHTLCInfo , u64 ) > ) . len ( ) ,
2019-01-14 20:37:06 -05:00
( & $failed_fails as & Vec < ( HTLCSource , PaymentHash , HTLCFailReason ) > ) . len ( ) ) ;
2019-01-17 17:10:58 -05:00
if ! $resend_commitment {
debug_assert! ( $action_type = = RAACommitmentOrder ::RevokeAndACKFirst | | ! $resend_raa ) ;
}
if ! $resend_raa {
debug_assert! ( $action_type = = RAACommitmentOrder ::CommitmentFirst | | ! $resend_commitment ) ;
}
2019-03-05 15:36:11 -05:00
$entry . get_mut ( ) . monitor_update_failed ( $resend_raa , $resend_commitment , $failed_forwards , $failed_fails ) ;
2020-07-13 13:16:32 +09:00
Err ( MsgHandleErrInternal ::from_chan_no_close ( ChannelError ::Ignore ( " Failed to update ChannelMonitor " . to_owned ( ) ) , * $entry . key ( ) ) )
2018-11-26 21:54:14 -05:00
} ,
}
}
}
2019-01-14 20:35:56 -05:00
macro_rules ! return_monitor_err {
( $self : ident , $err : expr , $channel_state : expr , $entry : expr , $action_type : path , $resend_raa : expr , $resend_commitment : expr ) = > {
return handle_monitor_err! ( $self , $err , $channel_state , $entry , $action_type , $resend_raa , $resend_commitment ) ;
} ;
( $self : ident , $err : expr , $channel_state : expr , $entry : expr , $action_type : path , $resend_raa : expr , $resend_commitment : expr , $failed_forwards : expr , $failed_fails : expr ) = > {
return handle_monitor_err! ( $self , $err , $channel_state , $entry , $action_type , $resend_raa , $resend_commitment , $failed_forwards , $failed_fails ) ;
}
}
2018-11-26 16:40:15 -05:00
// Does not break in case of TemporaryFailure!
macro_rules ! maybe_break_monitor_err {
2019-01-07 23:10:51 -05:00
( $self : ident , $err : expr , $channel_state : expr , $entry : expr , $action_type : path , $resend_raa : expr , $resend_commitment : expr ) = > {
2019-01-14 20:35:56 -05:00
match ( handle_monitor_err! ( $self , $err , $channel_state , $entry , $action_type , $resend_raa , $resend_commitment ) , $err ) {
( e , ChannelMonitorUpdateErr ::PermanentFailure ) = > {
break e ;
2018-11-26 16:40:15 -05:00
} ,
2019-01-14 20:35:56 -05:00
( _ , ChannelMonitorUpdateErr ::TemporaryFailure ) = > { } ,
2018-11-26 16:40:15 -05:00
}
}
}
2020-03-02 12:55:53 -05:00
impl < ChanSigner : ChannelKeys , M : Deref , T : Deref , K : Deref , F : Deref , L : Deref > ChannelManager < ChanSigner , M , T , K , F , L >
2020-07-20 17:03:52 -07:00
where M ::Target : chain ::Watch < Keys = ChanSigner > ,
2020-02-20 14:14:12 -05:00
T ::Target : BroadcasterInterface ,
2020-02-26 16:00:26 -05:00
K ::Target : KeysInterface < ChanKeySigner = ChanSigner > ,
2020-02-27 11:33:03 -05:00
F ::Target : FeeEstimator ,
2020-03-02 12:55:53 -05:00
L ::Target : Logger ,
2020-02-20 14:14:12 -05:00
{
2018-09-20 12:57:47 -04:00
/// Constructs a new ChannelManager to hold several channels and route between them.
///
/// This is the main "logic hub" for all channel-related actions, and implements
/// ChannelMessageHandler.
///
2017-12-25 01:05:27 -05:00
/// Non-proportional fees are fixed according to our risk using the provided fee estimator.
2018-09-20 12:57:47 -04:00
///
2018-06-30 10:32:23 -04:00
/// panics if channel_value_satoshis is >= `MAX_FUNDING_SATOSHIS`!
2019-11-14 17:41:17 -05:00
///
2019-11-08 20:12:13 -05:00
/// Users must provide the current blockchain height from which to track onchain channel
2019-11-14 17:41:17 -05:00
/// funding outpoints and send payments with reliable timelocks.
2019-11-08 20:12:13 -05:00
///
/// Users need to notify the new ChannelManager when a new block is connected or
/// disconnected using its `block_connected` and `block_disconnected` methods.
2020-07-20 17:03:52 -07:00
pub fn new ( network : Network , fee_est : F , chain_monitor : M , tx_broadcaster : T , logger : L , keys_manager : K , config : UserConfig , current_blockchain_height : usize ) -> Self {
2017-12-25 01:05:27 -05:00
let secp_ctx = Secp256k1 ::new ( ) ;
2020-05-12 13:34:00 -04:00
ChannelManager {
2018-10-31 14:51:39 -04:00
default_configuration : config . clone ( ) ,
2020-08-25 17:12:00 -04:00
genesis_hash : genesis_block ( network ) . header . block_hash ( ) ,
2020-02-27 11:33:03 -05:00
fee_estimator : fee_est ,
2020-07-20 17:03:52 -07:00
chain_monitor ,
2018-03-26 16:48:18 -04:00
tx_broadcaster ,
2017-12-25 01:05:27 -05:00
2019-11-14 17:41:17 -05:00
latest_block_height : AtomicUsize ::new ( current_blockchain_height ) ,
2018-10-26 14:35:50 -04:00
last_block_hash : Mutex ::new ( Default ::default ( ) ) ,
2018-03-26 16:48:18 -04:00
secp_ctx ,
2017-12-25 01:05:27 -05:00
2018-03-20 19:11:27 -04:00
channel_state : Mutex ::new ( ChannelHolder {
by_id : HashMap ::new ( ) ,
short_to_id : HashMap ::new ( ) ,
2017-12-25 01:05:27 -05:00
forward_htlcs : HashMap ::new ( ) ,
2018-03-20 19:11:27 -04:00
claimable_htlcs : HashMap ::new ( ) ,
2018-10-19 16:25:32 -04:00
pending_msg_events : Vec ::new ( ) ,
2017-12-25 01:05:27 -05:00
} ) ,
2018-10-26 11:40:01 -04:00
our_network_key : keys_manager . get_node_secret ( ) ,
2018-03-20 19:11:27 -04:00
2020-01-02 20:32:37 -05:00
last_node_announcement_serial : AtomicUsize ::new ( 0 ) ,
2019-12-29 14:22:43 -05:00
per_peer_state : RwLock ::new ( HashMap ::new ( ) ) ,
2018-03-20 19:11:27 -04:00
pending_events : Mutex ::new ( Vec ::new ( ) ) ,
2018-10-20 18:46:03 -04:00
total_consistency_lock : RwLock ::new ( ( ) ) ,
2020-11-19 12:53:16 -05:00
persistence_notifier : PersistenceNotifier ::new ( ) ,
2018-07-25 02:34:51 +00:00
2018-10-26 11:40:01 -04:00
keys_manager ,
2018-07-25 02:34:51 +00:00
logger ,
2020-05-12 13:34:00 -04:00
}
2017-12-25 01:05:27 -05:00
}
2018-07-06 17:29:34 -04:00
/// Creates a new outbound channel to the given remote node and with the given value.
2018-09-20 12:57:47 -04:00
///
2018-07-06 17:29:34 -04:00
/// user_id will be provided back as user_channel_id in FundingGenerationReady and
/// FundingBroadcastSafe events to allow tracking of which events correspond with which
/// create_channel call. Note that user_channel_id defaults to 0 for inbound channels, so you
/// may wish to avoid using 0 for user_id here.
2018-09-20 12:57:47 -04:00
///
2018-10-19 16:25:32 -04:00
/// If successful, will generate a SendOpenChannel message event, so you should probably poll
2018-07-06 17:29:34 -04:00
/// PeerManager::process_events afterwards.
2018-09-20 12:57:47 -04:00
///
2018-10-31 14:45:29 -04:00
/// Raises APIError::APIMisuseError when channel_value_satoshis > 2**24 or push_msat is
/// greater than channel_value_satoshis * 1k or channel_value_satoshis is < 1000.
2020-02-25 12:03:25 +00:00
pub fn create_channel ( & self , their_network_key : PublicKey , channel_value_satoshis : u64 , push_msat : u64 , user_id : u64 , override_config : Option < UserConfig > ) -> Result < ( ) , APIError > {
2018-10-31 14:45:29 -04:00
if channel_value_satoshis < 1000 {
2020-07-13 13:16:32 +09:00
return Err ( APIError ::APIMisuseError { err : format ! ( " Channel value must be at least 1000 satoshis. It was {} " , channel_value_satoshis ) } ) ;
2018-10-31 14:45:29 -04:00
}
2020-02-25 12:03:25 +00:00
let config = if override_config . is_some ( ) { override_config . as_ref ( ) . unwrap ( ) } else { & self . default_configuration } ;
2020-03-02 12:55:53 -05:00
let channel = Channel ::new_outbound ( & self . fee_estimator , & self . keys_manager , their_network_key , channel_value_satoshis , push_msat , user_id , config ) ? ;
2020-06-13 16:46:25 -04:00
let res = channel . get_open_channel ( self . genesis_hash . clone ( ) ) ;
2018-10-20 18:46:03 -04:00
2020-11-19 12:53:16 -05:00
let _persistence_guard = PersistenceNotifierGuard ::new ( & self . total_consistency_lock , & self . persistence_notifier ) ;
// We want to make sure the lock is actually acquired by PersistenceNotifierGuard.
debug_assert! ( & self . total_consistency_lock . try_write ( ) . is_err ( ) ) ;
2018-03-20 19:11:27 -04:00
let mut channel_state = self . channel_state . lock ( ) . unwrap ( ) ;
2018-09-26 11:02:38 -04:00
match channel_state . by_id . entry ( channel . channel_id ( ) ) {
hash_map ::Entry ::Occupied ( _ ) = > {
if cfg! ( feature = " fuzztarget " ) {
2020-07-13 13:16:32 +09:00
return Err ( APIError ::APIMisuseError { err : " Fuzzy bad RNG " . to_owned ( ) } ) ;
2018-09-26 11:02:38 -04:00
} else {
panic! ( " RNG is bad??? " ) ;
}
} ,
hash_map ::Entry ::Vacant ( entry ) = > { entry . insert ( channel ) ; }
2017-12-25 01:05:27 -05:00
}
2018-10-19 16:25:32 -04:00
channel_state . pending_msg_events . push ( events ::MessageSendEvent ::SendOpenChannel {
2018-07-06 17:29:34 -04:00
node_id : their_network_key ,
msg : res ,
} ) ;
Ok ( ( ) )
2017-12-25 01:05:27 -05:00
}
2020-02-27 11:33:03 -05:00
fn list_channels_with_filter < Fn : FnMut ( & ( & [ u8 ; 32 ] , & Channel < ChanSigner > ) ) -> bool > ( & self , f : Fn ) -> Vec < ChannelDetails > {
2019-12-28 01:10:14 -05:00
let mut res = Vec ::new ( ) ;
{
let channel_state = self . channel_state . lock ( ) . unwrap ( ) ;
res . reserve ( channel_state . by_id . len ( ) ) ;
2020-01-17 17:36:46 -05:00
for ( channel_id , channel ) in channel_state . by_id . iter ( ) . filter ( f ) {
2019-06-01 12:11:27 -04:00
let ( inbound_capacity_msat , outbound_capacity_msat ) = channel . get_inbound_outbound_available_balance_msat ( ) ;
2018-07-23 13:10:18 -04:00
res . push ( ChannelDetails {
channel_id : ( * channel_id ) . clone ( ) ,
short_channel_id : channel . get_short_channel_id ( ) ,
2020-06-08 20:47:55 -04:00
remote_network_id : channel . get_counterparty_node_id ( ) ,
2019-12-28 01:10:14 -05:00
counterparty_features : InitFeatures ::empty ( ) ,
2018-07-23 13:10:18 -04:00
channel_value_satoshis : channel . get_value_satoshis ( ) ,
2019-06-01 12:11:27 -04:00
inbound_capacity_msat ,
outbound_capacity_msat ,
2018-07-23 13:10:18 -04:00
user_id : channel . get_user_id ( ) ,
2019-12-28 01:10:14 -05:00
is_live : channel . is_live ( ) ,
2018-07-23 13:10:18 -04:00
} ) ;
}
}
2019-12-28 01:10:14 -05:00
let per_peer_state = self . per_peer_state . read ( ) . unwrap ( ) ;
for chan in res . iter_mut ( ) {
if let Some ( peer_state ) = per_peer_state . get ( & chan . remote_network_id ) {
chan . counterparty_features = peer_state . lock ( ) . unwrap ( ) . latest_features . clone ( ) ;
}
}
res
}
2020-01-17 17:36:46 -05:00
/// Gets the list of open channels, in random order. See ChannelDetail field documentation for
/// more information.
pub fn list_channels ( & self ) -> Vec < ChannelDetails > {
self . list_channels_with_filter ( | _ | true )
}
2019-12-28 01:10:14 -05:00
/// Gets the list of usable channels, in random order. Useful as an argument to
2020-05-02 15:05:04 -04:00
/// get_route to ensure non-announced channels are used.
2019-12-28 01:10:14 -05:00
///
/// These are guaranteed to have their is_live value set to true, see the documentation for
/// ChannelDetails::is_live for more info on exactly what the criteria are.
pub fn list_usable_channels ( & self ) -> Vec < ChannelDetails > {
2020-01-17 17:36:46 -05:00
// Note we use is_live here instead of usable which leads to somewhat confused
// internal/external nomenclature, but that's ok cause that's probably what the user
// really wanted anyway.
self . list_channels_with_filter ( | & ( _ , ref channel ) | channel . is_live ( ) )
2018-07-23 13:10:18 -04:00
}
2018-03-27 11:16:53 -04:00
/// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs
/// will be accepted on the given channel, and after additional timeout/the closing of all
/// pending HTLCs, the channel will be closed on chain.
2018-09-20 12:57:47 -04:00
///
2018-10-19 16:25:32 -04:00
/// May generate a SendShutdown message event on success, which should be relayed.
2018-09-26 00:32:30 +09:00
pub fn close_channel ( & self , channel_id : & [ u8 ; 32 ] ) -> Result < ( ) , APIError > {
2020-11-19 12:53:16 -05:00
let _persistence_guard = PersistenceNotifierGuard ::new ( & self . total_consistency_lock , & self . persistence_notifier ) ;
2018-10-20 18:46:03 -04:00
2018-10-19 16:25:32 -04:00
let ( mut failed_htlcs , chan_option ) = {
2018-04-24 00:21:38 -04:00
let mut channel_state_lock = self . channel_state . lock ( ) . unwrap ( ) ;
2020-01-16 10:48:16 -08:00
let channel_state = & mut * channel_state_lock ;
2018-03-27 11:16:53 -04:00
match channel_state . by_id . entry ( channel_id . clone ( ) ) {
hash_map ::Entry ::Occupied ( mut chan_entry ) = > {
2018-10-19 16:25:32 -04:00
let ( shutdown_msg , failed_htlcs ) = chan_entry . get_mut ( ) . get_shutdown ( ) ? ;
channel_state . pending_msg_events . push ( events ::MessageSendEvent ::SendShutdown {
2020-06-08 20:47:55 -04:00
node_id : chan_entry . get ( ) . get_counterparty_node_id ( ) ,
2018-10-19 16:25:32 -04:00
msg : shutdown_msg
} ) ;
2018-03-27 11:16:53 -04:00
if chan_entry . get ( ) . is_shutdown ( ) {
2018-04-24 00:21:38 -04:00
if let Some ( short_id ) = chan_entry . get ( ) . get_short_channel_id ( ) {
channel_state . short_to_id . remove ( & short_id ) ;
}
2018-10-19 16:25:32 -04:00
( failed_htlcs , Some ( chan_entry . remove_entry ( ) . 1 ) )
} else { ( failed_htlcs , None ) }
2018-03-27 11:16:53 -04:00
} ,
2020-07-13 13:16:32 +09:00
hash_map ::Entry ::Vacant ( _ ) = > return Err ( APIError ::ChannelUnavailable { err : " No such channel " . to_owned ( ) } )
2018-03-27 11:16:53 -04:00
}
} ;
2018-10-19 16:25:32 -04:00
for htlc_source in failed_htlcs . drain ( .. ) {
2018-12-17 20:47:19 -05:00
self . fail_htlc_backwards_internal ( self . channel_state . lock ( ) . unwrap ( ) , htlc_source . 0 , & htlc_source . 1 , HTLCFailReason ::Reason { failure_code : 0x4000 | 8 , data : Vec ::new ( ) } ) ;
2018-03-27 11:16:53 -04:00
}
2018-07-22 23:03:31 -04:00
let chan_update = if let Some ( chan ) = chan_option {
2018-04-24 20:40:22 -04:00
if let Ok ( update ) = self . get_channel_update ( & chan ) {
2018-07-22 23:03:31 -04:00
Some ( update )
} else { None }
} else { None } ;
if let Some ( update ) = chan_update {
2018-10-19 16:25:32 -04:00
let mut channel_state = self . channel_state . lock ( ) . unwrap ( ) ;
channel_state . pending_msg_events . push ( events ::MessageSendEvent ::BroadcastChannelUpdate {
2018-07-22 23:03:31 -04:00
msg : update
} ) ;
2018-04-24 20:40:22 -04:00
}
2018-07-22 23:03:31 -04:00
Ok ( ( ) )
2018-03-27 11:16:53 -04:00
}
2018-07-28 19:15:45 -04:00
#[ inline ]
2018-11-18 22:01:32 -05:00
fn finish_force_close_channel ( & self , shutdown_res : ShutdownResult ) {
2020-03-18 16:30:05 -04:00
let ( funding_txo_option , monitor_update , mut failed_htlcs ) = shutdown_res ;
2020-03-02 12:55:53 -05:00
log_trace! ( self . logger , " Finishing force-closure of channel {} HTLCs to fail " , failed_htlcs . len ( ) ) ;
2018-09-11 14:20:40 -04:00
for htlc_source in failed_htlcs . drain ( .. ) {
2018-12-17 20:47:19 -05:00
self . fail_htlc_backwards_internal ( self . channel_state . lock ( ) . unwrap ( ) , htlc_source . 0 , & htlc_source . 1 , HTLCFailReason ::Reason { failure_code : 0x4000 | 8 , data : Vec ::new ( ) } ) ;
2018-07-28 19:15:45 -04:00
}
2020-03-18 16:30:05 -04:00
if let Some ( funding_txo ) = funding_txo_option {
// There isn't anything we can do if we get an update failure - we're already
// force-closing. The monitor update on the required in-memory copy should broadcast
// the latest local state, which is the best we can do anyway. Thus, it is safe to
// ignore the result here.
2020-07-20 17:03:52 -07:00
let _ = self . chain_monitor . update_channel ( funding_txo , monitor_update ) ;
2018-07-28 19:15:45 -04:00
}
}
2021-01-15 21:34:17 -05:00
fn force_close_channel_with_peer ( & self , channel_id : & [ u8 ; 32 ] , peer_node_id : Option < & PublicKey > ) -> Result < ( ) , APIError > {
2018-07-28 19:15:45 -04:00
let mut chan = {
let mut channel_state_lock = self . channel_state . lock ( ) . unwrap ( ) ;
2020-01-16 10:48:16 -08:00
let channel_state = & mut * channel_state_lock ;
2021-01-15 21:34:17 -05:00
if let hash_map ::Entry ::Occupied ( chan ) = channel_state . by_id . entry ( channel_id . clone ( ) ) {
if let Some ( node_id ) = peer_node_id {
if chan . get ( ) . get_counterparty_node_id ( ) ! = * node_id {
// Error or Ok here doesn't matter - the result is only exposed publicly
// when peer_node_id is None anyway.
return Ok ( ( ) ) ;
}
}
if let Some ( short_id ) = chan . get ( ) . get_short_channel_id ( ) {
2018-07-28 19:15:45 -04:00
channel_state . short_to_id . remove ( & short_id ) ;
}
2021-01-15 21:34:17 -05:00
chan . remove_entry ( ) . 1
2018-07-28 19:15:45 -04:00
} else {
2021-01-14 17:05:38 +01:00
return Err ( APIError ::ChannelUnavailable { err : " No such channel " . to_owned ( ) } ) ;
2018-07-28 19:15:45 -04:00
}
} ;
2020-03-02 12:55:53 -05:00
log_trace! ( self . logger , " Force-closing channel {} " , log_bytes! ( channel_id [ .. ] ) ) ;
2020-03-18 16:30:05 -04:00
self . finish_force_close_channel ( chan . force_shutdown ( true ) ) ;
2018-07-28 19:15:45 -04:00
if let Ok ( update ) = self . get_channel_update ( & chan ) {
2018-10-19 16:25:32 -04:00
let mut channel_state = self . channel_state . lock ( ) . unwrap ( ) ;
channel_state . pending_msg_events . push ( events ::MessageSendEvent ::BroadcastChannelUpdate {
2018-07-28 19:15:45 -04:00
msg : update
} ) ;
}
2021-01-14 17:05:38 +01:00
Ok ( ( ) )
2018-07-28 19:15:45 -04:00
}
2021-01-15 21:34:17 -05:00
/// Force closes a channel, immediately broadcasting the latest local commitment transaction to
/// the chain and rejecting new HTLCs on the given channel. Fails if channel_id is unknown to the manager.
pub fn force_close_channel ( & self , channel_id : & [ u8 ; 32 ] ) -> Result < ( ) , APIError > {
2020-11-19 12:53:16 -05:00
let _persistence_guard = PersistenceNotifierGuard ::new ( & self . total_consistency_lock , & self . persistence_notifier ) ;
2021-01-15 21:34:17 -05:00
self . force_close_channel_with_peer ( channel_id , None )
}
2018-08-14 10:43:34 -04:00
/// Force close all channels, immediately broadcasting the latest local commitment transaction
/// for each to the chain and rejecting new HTLCs on each.
pub fn force_close_all_channels ( & self ) {
for chan in self . list_channels ( ) {
2021-01-14 17:05:38 +01:00
let _ = self . force_close_channel ( & chan . channel_id ) ;
2018-08-14 10:43:34 -04:00
}
}
2019-11-26 16:46:33 -05:00
fn decode_update_add_htlc_onion ( & self , msg : & msgs ::UpdateAddHTLC ) -> ( PendingHTLCStatus , MutexGuard < ChannelHolder < ChanSigner > > ) {
2018-12-17 15:25:32 -05:00
macro_rules ! return_malformed_err {
( $msg : expr , $err_code : expr ) = > {
2018-08-21 16:57:41 -04:00
{
2020-03-02 12:55:53 -05:00
log_info! ( self . logger , " Failed to accept/forward incoming HTLC: {} " , $msg ) ;
2018-12-17 15:25:32 -05:00
return ( PendingHTLCStatus ::Fail ( HTLCFailureMsg ::Malformed ( msgs ::UpdateFailMalformedHTLC {
channel_id : msg . channel_id ,
htlc_id : msg . htlc_id ,
2018-12-17 23:58:02 -05:00
sha256_of_onion : Sha256 ::hash ( & msg . onion_routing_packet . hop_data ) . into_inner ( ) ,
2018-12-17 15:25:32 -05:00
failure_code : $err_code ,
} ) ) , self . channel_state . lock ( ) . unwrap ( ) ) ;
2018-08-21 16:57:41 -04:00
}
}
}
2018-08-26 16:35:26 -04:00
if let Err ( _ ) = msg . onion_routing_packet . public_key {
2018-12-17 15:25:32 -05:00
return_malformed_err! ( " invalid ephemeral pubkey " , 0x8000 | 0x4000 | 6 ) ;
2018-08-26 16:35:26 -04:00
}
2018-10-18 14:17:20 -04:00
let shared_secret = {
let mut arr = [ 0 ; 32 ] ;
2019-01-16 15:45:05 -05:00
arr . copy_from_slice ( & SharedSecret ::new ( & msg . onion_routing_packet . public_key . unwrap ( ) , & self . our_network_key ) [ .. ] ) ;
2018-10-18 14:17:20 -04:00
arr
} ;
2018-12-19 17:02:27 -05:00
let ( rho , mu ) = onion_utils ::gen_rho_mu_from_shared_secret ( & shared_secret ) ;
2018-08-26 16:35:26 -04:00
2018-12-17 15:25:32 -05:00
if msg . onion_routing_packet . version ! = 0 {
//TODO: Spec doesn't indicate if we should only hash hop_data here (and in other
//sha256_of_onion error data packets), or the entire onion_routing_packet. Either way,
2019-01-24 16:41:51 +02:00
//the hash doesn't really serve any purpose - in the case of hashing all data, the
2018-12-17 15:25:32 -05:00
//receiving node would have to brute force to figure out which version was put in the
//packet by the node that send us the message, in the case of hashing the hop_data, the
//node knows the HMAC matched, so they already know what is there...
return_malformed_err! ( " Unknown onion packet version " , 0x8000 | 0x4000 | 4 ) ;
}
2018-12-17 23:58:02 -05:00
let mut hmac = HmacEngine ::< Sha256 > ::new ( & mu ) ;
2018-12-17 15:25:32 -05:00
hmac . input ( & msg . onion_routing_packet . hop_data ) ;
hmac . input ( & msg . payment_hash . 0 [ .. ] ) ;
2018-12-18 00:01:31 -05:00
if ! fixed_time_eq ( & Hmac ::from_engine ( hmac ) . into_inner ( ) , & msg . onion_routing_packet . hmac ) {
2018-12-17 15:25:32 -05:00
return_malformed_err! ( " HMAC Check failed " , 0x8000 | 0x4000 | 5 ) ;
}
2018-08-21 16:57:41 -04:00
let mut channel_state = None ;
macro_rules ! return_err {
( $msg : expr , $err_code : expr , $data : expr ) = > {
{
2020-03-02 12:55:53 -05:00
log_info! ( self . logger , " Failed to accept/forward incoming HTLC: {} " , $msg ) ;
2018-08-21 16:57:41 -04:00
if channel_state . is_none ( ) {
channel_state = Some ( self . channel_state . lock ( ) . unwrap ( ) ) ;
}
2018-08-26 16:34:47 -04:00
return ( PendingHTLCStatus ::Fail ( HTLCFailureMsg ::Relay ( msgs ::UpdateFailHTLC {
2018-08-21 16:57:41 -04:00
channel_id : msg . channel_id ,
htlc_id : msg . htlc_id ,
2018-12-19 17:02:27 -05:00
reason : onion_utils ::build_first_hop_failure_packet ( & shared_secret , $err_code , $data ) ,
2018-09-11 14:20:40 -04:00
} ) ) , channel_state . unwrap ( ) ) ;
2018-08-21 16:57:41 -04:00
}
}
}
let mut chacha = ChaCha20 ::new ( & rho , & [ 0 u8 ; 8 ] ) ;
2019-12-27 17:38:15 -05:00
let mut chacha_stream = ChaChaReader { chacha : & mut chacha , read : Cursor ::new ( & msg . onion_routing_packet . hop_data [ .. ] ) } ;
2019-12-26 13:43:43 -05:00
let ( next_hop_data , next_hop_hmac ) = {
2019-12-27 17:38:15 -05:00
match msgs ::OnionHopData ::read ( & mut chacha_stream ) {
2018-08-21 16:57:41 -04:00
Err ( err ) = > {
let error_code = match err {
2018-09-19 13:06:35 -04:00
msgs ::DecodeError ::UnknownVersion = > 0x4000 | 1 , // unknown realm byte
2019-12-27 17:44:46 -05:00
msgs ::DecodeError ::UnknownRequiredFeature |
msgs ::DecodeError ::InvalidValue |
msgs ::DecodeError ::ShortRead = > 0x4000 | 22 , // invalid_onion_payload
2018-08-21 16:57:41 -04:00
_ = > 0x2000 | 2 , // Should never happen
} ;
return_err! ( " Unable to decode our hop data " , error_code , & [ 0 ; 0 ] ) ;
} ,
2019-12-27 17:38:15 -05:00
Ok ( msg ) = > {
let mut hmac = [ 0 ; 32 ] ;
if let Err ( _ ) = chacha_stream . read_exact ( & mut hmac [ .. ] ) {
2019-12-27 17:44:46 -05:00
return_err! ( " Unable to decode hop data " , 0x4000 | 22 , & [ 0 ; 0 ] ) ;
2019-12-27 17:38:15 -05:00
}
( msg , hmac )
} ,
2018-08-21 16:57:41 -04:00
}
} ;
2019-12-26 13:43:43 -05:00
let pending_forward_info = if next_hop_hmac = = [ 0 ; 32 ] {
2019-11-25 16:12:45 -05:00
#[ cfg(test) ]
{
// In tests, make sure that the initial onion pcket data is, at least, non-0.
// We could do some fancy randomness test here, but, ehh, whatever.
// This checks for the issue where you can calculate the path length given the
// onion data as all the path entries that the originator sent will be here
// as-is (and were originally 0s).
// Of course reverse path calculation is still pretty easy given naive routing
// algorithms, but this fixes the most-obvious case.
2019-12-27 17:38:15 -05:00
let mut next_bytes = [ 0 ; 32 ] ;
chacha_stream . read_exact ( & mut next_bytes ) . unwrap ( ) ;
assert_ne! ( next_bytes [ .. ] , [ 0 ; 32 ] [ .. ] ) ;
chacha_stream . read_exact ( & mut next_bytes ) . unwrap ( ) ;
assert_ne! ( next_bytes [ .. ] , [ 0 ; 32 ] [ .. ] ) ;
2019-11-25 16:12:45 -05:00
}
2018-08-21 16:57:41 -04:00
// OUR PAYMENT!
2018-10-14 22:30:21 +09:00
// final_expiry_too_soon
2020-04-09 21:51:29 -04:00
// We have to have some headroom to broadcast on chain if we have the preimage, so make sure we have at least
// HTLC_FAIL_BACK_BUFFER blocks to go.
// Also, ensure that, in the case of an unknown payment hash, our payment logic has enough time to fail the HTLC backward
// before our onchain logic triggers a channel closure (see HTLC_FAIL_BACK_BUFFER rational).
if ( msg . cltv_expiry as u64 ) < = self . latest_block_height . load ( Ordering ::Acquire ) as u64 + HTLC_FAIL_BACK_BUFFER as u64 + 1 {
2018-10-14 22:30:21 +09:00
return_err! ( " The final CLTV expiry is too soon to handle " , 17 , & [ 0 ; 0 ] ) ;
}
// final_incorrect_htlc_amount
2019-12-24 15:52:47 -05:00
if next_hop_data . amt_to_forward > msg . amount_msat {
2018-08-21 16:57:41 -04:00
return_err! ( " Upstream node sent less than we were supposed to receive in payment " , 19 , & byte_utils ::be64_to_array ( msg . amount_msat ) ) ;
}
2018-10-14 22:30:21 +09:00
// final_incorrect_cltv_expiry
2019-12-24 15:52:47 -05:00
if next_hop_data . outgoing_cltv_value ! = msg . cltv_expiry {
2018-08-21 16:57:41 -04:00
return_err! ( " Upstream node set CLTV to the wrong value " , 18 , & byte_utils ::be32_to_array ( msg . cltv_expiry ) ) ;
}
2020-01-01 20:20:42 -05:00
let payment_data = match next_hop_data . format {
msgs ::OnionHopDataFormat ::Legacy { .. } = > None ,
msgs ::OnionHopDataFormat ::NonFinalNode { .. } = > return_err! ( " Got non final data with an HMAC of 0 " , 0x4000 | 22 , & [ 0 ; 0 ] ) ,
msgs ::OnionHopDataFormat ::FinalNode { payment_data } = > payment_data ,
} ;
2018-08-21 16:57:41 -04:00
// Note that we could obviously respond immediately with an update_fulfill_htlc
// message, however that would leak that we are the recipient of this payment, so
// instead we stay symmetric with the forwarding case, only responding (after a
// delay) once they've send us a commitment_signed!
2020-01-01 15:56:03 -05:00
PendingHTLCStatus ::Forward ( PendingHTLCInfo {
2020-01-09 14:09:25 -05:00
routing : PendingHTLCRouting ::Receive {
payment_data ,
incoming_cltv_expiry : msg . cltv_expiry ,
} ,
2018-08-21 16:57:41 -04:00
payment_hash : msg . payment_hash . clone ( ) ,
2018-10-18 14:17:20 -04:00
incoming_shared_secret : shared_secret ,
2019-12-24 15:52:47 -05:00
amt_to_forward : next_hop_data . amt_to_forward ,
outgoing_cltv_value : next_hop_data . outgoing_cltv_value ,
2018-08-21 16:57:41 -04:00
} )
} else {
let mut new_packet_data = [ 0 ; 20 * 65 ] ;
2019-12-27 17:38:15 -05:00
let read_pos = chacha_stream . read ( & mut new_packet_data ) . unwrap ( ) ;
2019-12-27 17:44:46 -05:00
#[ cfg(debug_assertions) ]
{
// Check two things:
// a) that the behavior of our stream here will return Ok(0) even if the TLV
// read above emptied out our buffer and the unwrap() wont needlessly panic
// b) that we didn't somehow magically end up with extra data.
let mut t = [ 0 ; 1 ] ;
debug_assert! ( chacha_stream . read ( & mut t ) . unwrap ( ) = = 0 ) ;
}
2019-12-27 17:38:15 -05:00
// Once we've emptied the set of bytes our peer gave us, encrypt 0 bytes until we
// fill the onion hop data we'll forward to our next-hop peer.
chacha_stream . chacha . process_in_place ( & mut new_packet_data [ read_pos .. ] ) ;
2018-08-21 16:57:41 -04:00
2018-08-26 16:35:26 -04:00
let mut new_pubkey = msg . onion_routing_packet . public_key . unwrap ( ) ;
2018-08-21 16:57:41 -04:00
let blinding_factor = {
2018-12-17 23:58:02 -05:00
let mut sha = Sha256 ::engine ( ) ;
2018-08-21 16:57:41 -04:00
sha . input ( & new_pubkey . serialize ( ) [ .. ] ) ;
2018-10-18 14:17:20 -04:00
sha . input ( & shared_secret ) ;
2019-01-16 15:45:05 -05:00
Sha256 ::from_engine ( sha ) . into_inner ( )
2018-08-21 16:57:41 -04:00
} ;
2019-01-16 15:45:05 -05:00
let public_key = if let Err ( e ) = new_pubkey . mul_assign ( & self . secp_ctx , & blinding_factor [ .. ] ) {
2018-12-17 15:25:32 -05:00
Err ( e )
} else { Ok ( new_pubkey ) } ;
2018-08-21 16:57:41 -04:00
let outgoing_packet = msgs ::OnionPacket {
version : 0 ,
2018-12-17 15:25:32 -05:00
public_key ,
2018-08-21 16:57:41 -04:00
hop_data : new_packet_data ,
2019-12-26 13:43:43 -05:00
hmac : next_hop_hmac . clone ( ) ,
2018-08-21 16:57:41 -04:00
} ;
2019-12-27 17:44:46 -05:00
let short_channel_id = match next_hop_data . format {
msgs ::OnionHopDataFormat ::Legacy { short_channel_id } = > short_channel_id ,
msgs ::OnionHopDataFormat ::NonFinalNode { short_channel_id } = > short_channel_id ,
2020-01-01 17:39:51 -05:00
msgs ::OnionHopDataFormat ::FinalNode { .. } = > {
2019-12-27 17:44:46 -05:00
return_err! ( " Final Node OnionHopData provided for us as an intermediary node " , 0x4000 | 22 , & [ 0 ; 0 ] ) ;
} ,
} ;
2020-01-01 15:56:03 -05:00
PendingHTLCStatus ::Forward ( PendingHTLCInfo {
2020-01-01 17:39:51 -05:00
routing : PendingHTLCRouting ::Forward {
onion_packet : outgoing_packet ,
2020-10-06 16:47:23 -07:00
short_channel_id ,
2020-01-01 17:39:51 -05:00
} ,
2018-08-21 16:57:41 -04:00
payment_hash : msg . payment_hash . clone ( ) ,
2018-10-18 14:17:20 -04:00
incoming_shared_secret : shared_secret ,
2019-12-24 15:52:47 -05:00
amt_to_forward : next_hop_data . amt_to_forward ,
outgoing_cltv_value : next_hop_data . outgoing_cltv_value ,
2018-08-21 16:57:41 -04:00
} )
} ;
channel_state = Some ( self . channel_state . lock ( ) . unwrap ( ) ) ;
2020-01-01 17:39:51 -05:00
if let & PendingHTLCStatus ::Forward ( PendingHTLCInfo { ref routing , ref amt_to_forward , ref outgoing_cltv_value , .. } ) = & pending_forward_info {
// If short_channel_id is 0 here, we'll reject the HTLC as there cannot be a channel
// with a short_channel_id of 0. This is important as various things later assume
// short_channel_id is non-0 in any ::Forward.
if let & PendingHTLCRouting ::Forward { ref short_channel_id , .. } = routing {
2018-08-21 16:57:41 -04:00
let id_option = channel_state . as_ref ( ) . unwrap ( ) . short_to_id . get ( & short_channel_id ) . cloned ( ) ;
let forwarding_id = match id_option {
2018-10-14 22:30:21 +09:00
None = > { // unknown_next_peer
2018-08-21 16:57:41 -04:00
return_err! ( " Don't have available channel for forwarding as requested. " , 0x4000 | 10 , & [ 0 ; 0 ] ) ;
} ,
Some ( id ) = > id . clone ( ) ,
} ;
2018-10-14 22:30:21 +09:00
if let Some ( ( err , code , chan_update ) ) = loop {
2018-08-21 16:57:41 -04:00
let chan = channel_state . as_mut ( ) . unwrap ( ) . by_id . get_mut ( & forwarding_id ) . unwrap ( ) ;
2018-10-14 22:30:21 +09:00
2018-10-17 18:21:06 -04:00
// Note that we could technically not return an error yet here and just hope
// that the connection is reestablished or monitor updated by the time we get
// around to doing the actual forward, but better to fail early if we can and
// hopefully an attacker trying to path-trace payments cannot make this occur
// on a small/per-node/per-channel scale.
2018-10-14 22:30:21 +09:00
if ! chan . is_live ( ) { // channel_disabled
break Some ( ( " Forwarding channel is not in a ready state. " , 0x1000 | 20 , Some ( self . get_channel_update ( chan ) . unwrap ( ) ) ) ) ;
}
2020-06-08 20:47:55 -04:00
if * amt_to_forward < chan . get_counterparty_htlc_minimum_msat ( ) { // amount_below_minimum
2018-10-14 22:30:21 +09:00
break Some ( ( " HTLC amount was below the htlc_minimum_msat " , 0x1000 | 11 , Some ( self . get_channel_update ( chan ) . unwrap ( ) ) ) ) ;
}
2020-06-08 20:47:55 -04:00
let fee = amt_to_forward . checked_mul ( chan . get_fee_proportional_millionths ( ) as u64 ) . and_then ( | prop_fee | { ( prop_fee / 1000000 ) . checked_add ( chan . get_holder_fee_base_msat ( & self . fee_estimator ) as u64 ) } ) ;
2018-10-14 22:30:21 +09:00
if fee . is_none ( ) | | msg . amount_msat < fee . unwrap ( ) | | ( msg . amount_msat - fee . unwrap ( ) ) < * amt_to_forward { // fee_insufficient
break Some ( ( " Prior hop has deviated from specified fees parameters or origin node has obsolete ones " , 0x1000 | 12 , Some ( self . get_channel_update ( chan ) . unwrap ( ) ) ) ) ;
}
if ( msg . cltv_expiry as u64 ) < ( * outgoing_cltv_value ) as u64 + CLTV_EXPIRY_DELTA as u64 { // incorrect_cltv_expiry
break Some ( ( " Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta " , 0x1000 | 13 , Some ( self . get_channel_update ( chan ) . unwrap ( ) ) ) ) ;
}
let cur_height = self . latest_block_height . load ( Ordering ::Acquire ) as u32 + 1 ;
2020-04-09 21:22:16 -04:00
// Theoretically, channel counterparty shouldn't send us a HTLC expiring now, but we want to be robust wrt to counterparty
// packet sanitization (see HTLC_FAIL_BACK_BUFFER rational)
if msg . cltv_expiry < = cur_height + HTLC_FAIL_BACK_BUFFER as u32 { // expiry_too_soon
2018-10-14 22:30:21 +09:00
break Some ( ( " CLTV expiry is too close " , 0x1000 | 14 , Some ( self . get_channel_update ( chan ) . unwrap ( ) ) ) ) ;
}
if msg . cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far
break Some ( ( " CLTV expiry is too far in the future " , 21 , None ) ) ;
}
2020-04-09 22:04:30 -04:00
// In theory, we would be safe against unitentional channel-closure, if we only required a margin of LATENCY_GRACE_PERIOD_BLOCKS.
// But, to be safe against policy reception, we use a longuer delay.
if ( * outgoing_cltv_value ) as u64 < = ( cur_height + HTLC_FAIL_BACK_BUFFER ) as u64 {
break Some ( ( " Outgoing CLTV value is too soon " , 0x1000 | 14 , Some ( self . get_channel_update ( chan ) . unwrap ( ) ) ) ) ;
}
2018-10-14 22:30:21 +09:00
break None ;
}
{
let mut res = Vec ::with_capacity ( 8 + 128 ) ;
if let Some ( chan_update ) = chan_update {
2018-12-17 14:25:38 -05:00
if code = = 0x1000 | 11 | | code = = 0x1000 | 12 {
res . extend_from_slice ( & byte_utils ::be64_to_array ( msg . amount_msat ) ) ;
}
else if code = = 0x1000 | 13 {
res . extend_from_slice ( & byte_utils ::be32_to_array ( msg . cltv_expiry ) ) ;
}
else if code = = 0x1000 | 20 {
2020-06-28 14:43:10 +03:00
// TODO: underspecified, follow https://github.com/lightningnetwork/lightning-rfc/issues/791
res . extend_from_slice ( & byte_utils ::be16_to_array ( 0 ) ) ;
2018-12-17 14:25:38 -05:00
}
2018-10-14 22:30:21 +09:00
res . extend_from_slice ( & chan_update . encode_with_len ( ) [ .. ] ) ;
2018-08-21 16:57:41 -04:00
}
2018-10-14 22:30:21 +09:00
return_err! ( err , code , & res [ .. ] ) ;
2018-08-21 16:57:41 -04:00
}
}
}
2018-09-11 14:20:40 -04:00
( pending_forward_info , channel_state . unwrap ( ) )
2018-08-21 16:57:41 -04:00
}
2017-12-25 01:05:27 -05:00
/// only fails if the channel does not yet have an assigned short_id
2018-10-17 18:21:06 -04:00
/// May be called with channel_state already locked!
2019-11-26 16:46:33 -05:00
fn get_channel_update ( & self , chan : & Channel < ChanSigner > ) -> Result < msgs ::ChannelUpdate , LightningError > {
2017-12-25 01:05:27 -05:00
let short_channel_id = match chan . get_short_channel_id ( ) {
2020-07-13 13:16:32 +09:00
None = > return Err ( LightningError { err : " Channel not yet established " . to_owned ( ) , action : msgs ::ErrorAction ::IgnoreError } ) ,
2017-12-25 01:05:27 -05:00
Some ( id ) = > id ,
} ;
2020-06-08 20:47:55 -04:00
let were_node_one = PublicKey ::from_secret_key ( & self . secp_ctx , & self . our_network_key ) . serialize ( ) [ .. ] < chan . get_counterparty_node_id ( ) . serialize ( ) [ .. ] ;
2017-12-25 01:05:27 -05:00
let unsigned = msgs ::UnsignedChannelUpdate {
chain_hash : self . genesis_hash ,
2020-10-06 16:47:23 -07:00
short_channel_id ,
2020-03-05 18:01:06 -05:00
timestamp : chan . get_update_time_counter ( ) ,
2020-06-28 14:43:10 +03:00
flags : ( ! were_node_one ) as u8 | ( ( ! chan . is_live ( ) as u8 ) < < 1 ) ,
2017-12-25 01:05:27 -05:00
cltv_expiry_delta : CLTV_EXPIRY_DELTA ,
2020-09-14 17:39:42 -04:00
htlc_minimum_msat : chan . get_counterparty_htlc_minimum_msat ( ) ,
2020-06-28 14:43:10 +03:00
htlc_maximum_msat : OptionalField ::Present ( chan . get_announced_htlc_max_msat ( ) ) ,
2020-06-08 20:47:55 -04:00
fee_base_msat : chan . get_holder_fee_base_msat ( & self . fee_estimator ) ,
2018-10-31 14:51:39 -04:00
fee_proportional_millionths : chan . get_fee_proportional_millionths ( ) ,
2018-08-29 16:01:07 -04:00
excess_data : Vec ::new ( ) ,
2017-12-25 01:05:27 -05:00
} ;
2019-03-04 18:02:02 +01:00
let msg_hash = Sha256dHash ::hash ( & unsigned . encode ( ) [ .. ] ) ;
2019-01-17 17:36:49 -05:00
let sig = self . secp_ctx . sign ( & hash_to_message! ( & msg_hash [ .. ] ) , & self . our_network_key ) ;
2017-12-25 01:05:27 -05:00
Ok ( msgs ::ChannelUpdate {
signature : sig ,
contents : unsigned
} )
}
2020-03-19 00:34:15 -04:00
// Only public for testing, this should otherwise never be called direcly
pub ( crate ) fn send_payment_along_path ( & self , path : & Vec < RouteHop > , payment_hash : & PaymentHash , payment_secret : & Option < PaymentSecret > , total_value : u64 , cur_height : u32 ) -> Result < ( ) , APIError > {
2020-03-02 12:55:53 -05:00
log_trace! ( self . logger , " Attempting to send payment for path with next hop {} " , path . first ( ) . unwrap ( ) . short_channel_id ) ;
2020-08-23 17:06:33 -04:00
let prng_seed = self . keys_manager . get_secure_random_bytes ( ) ;
let session_priv = SecretKey ::from_slice ( & self . keys_manager . get_secure_random_bytes ( ) [ .. ] ) . expect ( " RNG is busted " ) ;
2020-03-19 00:34:15 -04:00
let onion_keys = onion_utils ::construct_onion_keys ( & self . secp_ctx , & path , & session_priv )
. map_err ( | _ | APIError ::RouteError { err : " Pubkey along hop was maliciously selected " } ) ? ;
let ( onion_payloads , htlc_msat , htlc_cltv ) = onion_utils ::build_onion_payloads ( path , total_value , payment_secret , cur_height ) ? ;
if onion_utils ::route_size_insane ( & onion_payloads ) {
return Err ( APIError ::RouteError { err : " Route size too large considering onion data " } ) ;
}
let onion_packet = onion_utils ::construct_onion_packet ( onion_payloads , onion_keys , prng_seed , payment_hash ) ;
2020-11-19 12:53:16 -05:00
let _persistence_guard = PersistenceNotifierGuard ::new ( & self . total_consistency_lock , & self . persistence_notifier ) ;
2020-03-19 00:34:15 -04:00
let err : Result < ( ) , _ > = loop {
let mut channel_lock = self . channel_state . lock ( ) . unwrap ( ) ;
let id = match channel_lock . short_to_id . get ( & path . first ( ) . unwrap ( ) . short_channel_id ) {
2020-07-13 13:16:32 +09:00
None = > return Err ( APIError ::ChannelUnavailable { err : " No channel available with first hop! " . to_owned ( ) } ) ,
2020-03-19 00:34:15 -04:00
Some ( id ) = > id . clone ( ) ,
} ;
let channel_state = & mut * channel_lock ;
if let hash_map ::Entry ::Occupied ( mut chan ) = channel_state . by_id . entry ( id ) {
match {
2020-06-08 20:47:55 -04:00
if chan . get ( ) . get_counterparty_node_id ( ) ! = path . first ( ) . unwrap ( ) . pubkey {
2020-03-19 00:34:15 -04:00
return Err ( APIError ::RouteError { err : " Node ID mismatch on first hop! " } ) ;
}
if ! chan . get ( ) . is_live ( ) {
2020-07-13 13:16:32 +09:00
return Err ( APIError ::ChannelUnavailable { err : " Peer for first hop currently disconnected/pending monitor update! " . to_owned ( ) } ) ;
2020-03-19 00:34:15 -04:00
}
break_chan_entry! ( self , chan . get_mut ( ) . send_htlc_and_commit ( htlc_msat , payment_hash . clone ( ) , htlc_cltv , HTLCSource ::OutboundRoute {
path : path . clone ( ) ,
session_priv : session_priv . clone ( ) ,
first_hop_htlc_msat : htlc_msat ,
2020-03-02 12:55:53 -05:00
} , onion_packet , & self . logger ) , channel_state , chan )
2020-03-19 00:34:15 -04:00
} {
Some ( ( update_add , commitment_signed , monitor_update ) ) = > {
2020-07-20 17:03:52 -07:00
if let Err ( e ) = self . chain_monitor . update_channel ( chan . get ( ) . get_funding_txo ( ) . unwrap ( ) , monitor_update ) {
2020-03-19 00:34:15 -04:00
maybe_break_monitor_err! ( self , e , channel_state , chan , RAACommitmentOrder ::CommitmentFirst , false , true ) ;
// Note that MonitorUpdateFailed here indicates (per function docs)
// that we will resend the commitment update once monitor updating
// is restored. Therefore, we must return an error indicating that
// it is unsafe to retry the payment wholesale, which we do in the
// send_payment check for MonitorUpdateFailed, below.
return Err ( APIError ::MonitorUpdateFailed ) ;
}
channel_state . pending_msg_events . push ( events ::MessageSendEvent ::UpdateHTLCs {
node_id : path . first ( ) . unwrap ( ) . pubkey ,
updates : msgs ::CommitmentUpdate {
update_add_htlcs : vec ! [ update_add ] ,
update_fulfill_htlcs : Vec ::new ( ) ,
update_fail_htlcs : Vec ::new ( ) ,
update_fail_malformed_htlcs : Vec ::new ( ) ,
update_fee : None ,
commitment_signed ,
} ,
} ) ;
} ,
None = > { } ,
}
} else { unreachable! ( ) ; }
return Ok ( ( ) ) ;
} ;
match handle_error! ( self , err , path . first ( ) . unwrap ( ) . pubkey ) {
Ok ( _ ) = > unreachable! ( ) ,
Err ( e ) = > {
Err ( APIError ::ChannelUnavailable { err : e . err } )
} ,
}
}
2018-04-24 20:40:22 -04:00
/// Sends a payment along a given route.
2018-09-20 12:57:47 -04:00
///
2018-04-24 20:40:22 -04:00
/// Value parameters are provided via the last hop in route, see documentation for RouteHop
/// fields for more info.
2018-09-20 12:57:47 -04:00
///
2018-07-26 15:44:27 -04:00
/// Note that if the payment_hash already exists elsewhere (eg you're sending a duplicative
/// payment), we don't do anything to stop you! We always try to ensure that if the provided
/// next hop knows the preimage to payment_hash they can claim an additional amount as
/// specified in the last hop in the route! Thus, you should probably do your own
/// payment_preimage tracking (which you should already be doing as they represent "proof of
/// payment") and prevent double-sends yourself.
2018-09-20 12:57:47 -04:00
///
2020-01-06 20:29:33 -05:00
/// May generate SendHTLCs message(s) event on success, which should be relayed.
///
/// Each path may have a different return value, and PaymentSendValue may return a Vec with
/// each entry matching the corresponding-index entry in the route paths, see
/// PaymentSendFailure for more info.
2018-09-20 12:57:47 -04:00
///
2020-01-06 20:29:33 -05:00
/// In general, a path may raise:
/// * APIError::RouteError when an invalid route or forwarding parameter (cltv_delta, fee,
/// node public key) is specified.
/// * APIError::ChannelUnavailable if the next-hop channel is not available for updates
/// (including due to previous monitor update failure or new permanent monitor update
/// failure).
/// * APIError::MonitorUpdateFailed if a new monitor update failure prevented sending the
/// relevant updates.
2018-11-26 16:40:15 -05:00
///
2020-01-06 20:29:33 -05:00
/// Note that depending on the type of the PaymentSendFailure the HTLC may have been
/// irrevocably committed to on our end. In such a case, do NOT retry the payment with a
/// different route unless you intend to pay twice!
2020-01-02 01:23:48 -05:00
///
/// payment_secret is unrelated to payment_hash (or PaymentPreimage) and exists to authenticate
/// the sender to the recipient and prevent payment-probing (deanonymization) attacks. For
/// newer nodes, it will be provided to you in the invoice. If you do not have one, the Route
/// must not contain multiple paths as multi-path payments require a recipient-provided
/// payment_secret.
/// If a payment_secret *is* provided, we assume that the invoice had the payment_secret feature
/// bit set (either as required or as available). If multiple paths are present in the Route,
/// we assume the invoice had the basic_mpp feature set.
2020-04-07 13:36:58 -04:00
pub fn send_payment ( & self , route : & Route , payment_hash : PaymentHash , payment_secret : & Option < PaymentSecret > ) -> Result < ( ) , PaymentSendFailure > {
2020-01-06 20:29:33 -05:00
if route . paths . len ( ) < 1 {
return Err ( PaymentSendFailure ::ParameterError ( APIError ::RouteError { err : " There must be at least one path to send over " } ) ) ;
2020-01-03 19:31:40 -05:00
}
2020-01-06 20:29:33 -05:00
if route . paths . len ( ) > 10 {
// This limit is completely arbitrary - there aren't any real fundamental path-count
// limits. After we support retrying individual paths we should likely bump this, but
// for now more than 10 paths likely carries too much one-path failure.
return Err ( PaymentSendFailure ::ParameterError ( APIError ::RouteError { err : " Sending over more than 10 paths is not currently supported " } ) ) ;
2017-12-25 01:05:27 -05:00
}
2020-01-06 20:29:33 -05:00
let mut total_value = 0 ;
2018-03-20 19:11:27 -04:00
let our_node_id = self . get_our_node_id ( ) ;
2020-01-06 20:29:33 -05:00
let mut path_errs = Vec ::with_capacity ( route . paths . len ( ) ) ;
' path_check : for path in route . paths . iter ( ) {
if path . len ( ) < 1 | | path . len ( ) > 20 {
path_errs . push ( Err ( APIError ::RouteError { err : " Path didn't go anywhere/had bogus size " } ) ) ;
continue 'path_check ;
2018-03-20 19:11:27 -04:00
}
2020-01-06 20:29:33 -05:00
for ( idx , hop ) in path . iter ( ) . enumerate ( ) {
if idx ! = path . len ( ) - 1 & & hop . pubkey = = our_node_id {
path_errs . push ( Err ( APIError ::RouteError { err : " Path went through us but wasn't a simple rebalance loop to us " } ) ) ;
continue 'path_check ;
}
}
total_value + = path . last ( ) . unwrap ( ) . fee_msat ;
path_errs . push ( Ok ( ( ) ) ) ;
}
if path_errs . iter ( ) . any ( | e | e . is_err ( ) ) {
return Err ( PaymentSendFailure ::PathParameterError ( path_errs ) ) ;
2018-03-20 19:11:27 -04:00
}
2017-12-25 01:05:27 -05:00
2018-07-23 19:45:59 -04:00
let cur_height = self . latest_block_height . load ( Ordering ::Acquire ) as u32 + 1 ;
2020-01-06 20:29:33 -05:00
let mut results = Vec ::new ( ) ;
2020-03-19 00:34:15 -04:00
for path in route . paths . iter ( ) {
results . push ( self . send_payment_along_path ( & path , & payment_hash , payment_secret , total_value , cur_height ) ) ;
2020-01-06 20:29:33 -05:00
}
let mut has_ok = false ;
let mut has_err = false ;
for res in results . iter ( ) {
if res . is_ok ( ) { has_ok = true ; }
if res . is_err ( ) { has_err = true ; }
if let & Err ( APIError ::MonitorUpdateFailed ) = res {
// MonitorUpdateFailed is inherently unsafe to retry, so we call it a
// PartialFailure.
has_err = true ;
has_ok = true ;
break ;
}
}
if has_err & & has_ok {
Err ( PaymentSendFailure ::PartialFailure ( results ) )
} else if has_err {
Err ( PaymentSendFailure ::AllFailedRetrySafe ( results . drain ( .. ) . map ( | r | r . unwrap_err ( ) ) . collect ( ) ) )
} else {
Ok ( ( ) )
2018-10-19 16:25:32 -04:00
}
2017-12-25 01:05:27 -05:00
}
/// Call this upon creation of a funding transaction for the given channel.
2018-09-20 12:57:47 -04:00
///
2018-10-29 20:21:47 -04:00
/// Note that ALL inputs in the transaction pointed to by funding_txo MUST spend SegWit outputs
/// or your counterparty can steal your funds!
///
2017-12-25 01:05:27 -05:00
/// Panics if a funding transaction has already been provided for this channel.
2018-09-20 12:57:47 -04:00
///
2018-08-15 15:43:29 -04:00
/// May panic if the funding_txo is duplicative with some other channel (note that this should
/// be trivially prevented by using unique funding transaction keys per-channel).
2018-07-22 18:19:28 -04:00
pub fn funding_transaction_generated ( & self , temporary_channel_id : & [ u8 ; 32 ] , funding_txo : OutPoint ) {
2020-11-19 12:53:16 -05:00
let _persistence_guard = PersistenceNotifierGuard ::new ( & self . total_consistency_lock , & self . persistence_notifier ) ;
2018-10-20 18:46:03 -04:00
2020-04-18 16:35:01 -04:00
let ( chan , msg ) = {
2020-01-13 16:10:30 -05:00
let ( res , chan ) = match self . channel_state . lock ( ) . unwrap ( ) . by_id . remove ( temporary_channel_id ) {
2019-11-05 18:51:05 -05:00
Some ( mut chan ) = > {
2020-03-02 12:55:53 -05:00
( chan . get_outbound_funding_created ( funding_txo , & self . logger )
2019-11-05 18:51:05 -05:00
. map_err ( | e | if let ChannelError ::Close ( msg ) = e {
2020-03-18 16:30:05 -04:00
MsgHandleErrInternal ::from_finish_shutdown ( msg , chan . channel_id ( ) , chan . force_shutdown ( true ) , None )
2019-11-05 18:51:05 -05:00
} else { unreachable! ( ) ; } )
, chan )
} ,
None = > return
2018-10-29 20:38:29 -04:00
} ;
2020-06-08 20:47:55 -04:00
match handle_error! ( self , res , chan . get_counterparty_node_id ( ) ) {
2018-10-29 20:38:29 -04:00
Ok ( funding_msg ) = > {
2020-04-18 16:35:01 -04:00
( chan , funding_msg )
2018-10-29 20:38:29 -04:00
} ,
2019-11-05 18:51:05 -05:00
Err ( _ ) = > { return ; }
2017-12-25 01:05:27 -05:00
}
2018-10-17 08:47:33 -04:00
} ;
2018-10-19 16:25:32 -04:00
let mut channel_state = self . channel_state . lock ( ) . unwrap ( ) ;
channel_state . pending_msg_events . push ( events ::MessageSendEvent ::SendFundingCreated {
2020-06-08 20:47:55 -04:00
node_id : chan . get_counterparty_node_id ( ) ,
2020-10-06 16:47:23 -07:00
msg ,
2018-07-23 01:06:45 +00:00
} ) ;
2018-08-15 15:43:29 -04:00
match channel_state . by_id . entry ( chan . channel_id ( ) ) {
hash_map ::Entry ::Occupied ( _ ) = > {
panic! ( " Generated duplicate funding txid? " ) ;
} ,
hash_map ::Entry ::Vacant ( e ) = > {
e . insert ( chan ) ;
}
}
2017-12-25 01:05:27 -05:00
}
2019-11-26 16:46:33 -05:00
fn get_announcement_sigs ( & self , chan : & Channel < ChanSigner > ) -> Option < msgs ::AnnouncementSignatures > {
2020-02-10 15:50:47 -05:00
if ! chan . should_announce ( ) {
2020-03-02 12:55:53 -05:00
log_trace! ( self . logger , " Can't send announcement_signatures for private channel {} " , log_bytes! ( chan . channel_id ( ) ) ) ;
2020-02-10 15:50:47 -05:00
return None
}
2017-12-25 01:05:27 -05:00
2018-08-28 12:11:45 -04:00
let ( announcement , our_bitcoin_sig ) = match chan . get_channel_announcement ( self . get_our_node_id ( ) , self . genesis_hash . clone ( ) ) {
Ok ( res ) = > res ,
Err ( _ ) = > return None , // Only in case of state precondition violations eg channel is closing
} ;
2019-03-04 18:02:02 +01:00
let msghash = hash_to_message! ( & Sha256dHash ::hash ( & announcement . encode ( ) [ .. ] ) [ .. ] ) ;
2018-08-20 17:13:07 -04:00
let our_node_sig = self . secp_ctx . sign ( & msghash , & self . our_network_key ) ;
2017-12-25 01:05:27 -05:00
2018-08-28 12:11:45 -04:00
Some ( msgs ::AnnouncementSignatures {
2017-12-25 01:05:27 -05:00
channel_id : chan . channel_id ( ) ,
short_channel_id : chan . get_short_channel_id ( ) . unwrap ( ) ,
node_signature : our_node_sig ,
bitcoin_signature : our_bitcoin_sig ,
2018-08-28 12:11:45 -04:00
} )
2017-12-25 01:05:27 -05:00
}
2020-01-02 20:32:37 -05:00
#[ allow(dead_code) ]
// Messages of up to 64KB should never end up more than half full with addresses, as that would
// be absurd. We ensure this by checking that at least 500 (our stated public contract on when
// broadcast_node_announcement panics) of the maximum-length addresses would fit in a 64KB
// message...
2020-05-30 23:20:17 -04:00
const HALF_MESSAGE_IS_ADDRS : u32 = ::std ::u16 ::MAX as u32 / ( NetAddress ::MAX_LEN as u32 + 1 ) / 2 ;
2020-01-02 20:32:37 -05:00
#[ deny(const_err) ]
#[ allow(dead_code) ]
// ...by failing to compile if the number of addresses that would be half of a message is
// smaller than 500:
const STATIC_ASSERT : u32 = Self ::HALF_MESSAGE_IS_ADDRS - 500 ;
/// Generates a signed node_announcement from the given arguments and creates a
/// BroadcastNodeAnnouncement event. Note that such messages will be ignored unless peers have
/// seen a channel_announcement from us (ie unless we have public channels open).
///
/// RGB is a node "color" and alias is a printable human-readable string to describe this node
/// to humans. They carry no in-protocol meaning.
///
/// addresses represent the set (possibly empty) of socket addresses on which this node accepts
/// incoming connections. These will be broadcast to the network, publicly tying these
/// addresses together. If you wish to preserve user privacy, addresses should likely contain
/// only Tor Onion addresses.
///
/// Panics if addresses is absurdly large (more than 500).
2020-05-30 23:20:17 -04:00
pub fn broadcast_node_announcement ( & self , rgb : [ u8 ; 3 ] , alias : [ u8 ; 32 ] , addresses : Vec < NetAddress > ) {
2020-11-19 12:53:16 -05:00
let _persistence_guard = PersistenceNotifierGuard ::new ( & self . total_consistency_lock , & self . persistence_notifier ) ;
2020-01-02 20:32:37 -05:00
if addresses . len ( ) > 500 {
panic! ( " More than half the message size was taken up by public addresses! " ) ;
}
let announcement = msgs ::UnsignedNodeAnnouncement {
2020-04-15 17:16:45 -07:00
features : NodeFeatures ::known ( ) ,
2020-01-02 20:32:37 -05:00
timestamp : self . last_node_announcement_serial . fetch_add ( 1 , Ordering ::AcqRel ) as u32 ,
node_id : self . get_our_node_id ( ) ,
rgb , alias , addresses ,
excess_address_data : Vec ::new ( ) ,
excess_data : Vec ::new ( ) ,
} ;
let msghash = hash_to_message! ( & Sha256dHash ::hash ( & announcement . encode ( ) [ .. ] ) [ .. ] ) ;
let mut channel_state = self . channel_state . lock ( ) . unwrap ( ) ;
channel_state . pending_msg_events . push ( events ::MessageSendEvent ::BroadcastNodeAnnouncement {
msg : msgs ::NodeAnnouncement {
signature : self . secp_ctx . sign ( & msghash , & self . our_network_key ) ,
contents : announcement
} ,
} ) ;
}
2018-07-24 22:08:18 -04:00
/// Processes HTLCs which are pending waiting on random forward delay.
2018-09-20 12:57:47 -04:00
///
2019-01-24 16:41:51 +02:00
/// Should only really ever be called in response to a PendingHTLCsForwardable event.
2018-07-24 22:08:18 -04:00
/// Will likely generate further events.
pub fn process_pending_htlc_forwards ( & self ) {
2020-11-19 12:53:16 -05:00
let _persistence_guard = PersistenceNotifierGuard ::new ( & self . total_consistency_lock , & self . persistence_notifier ) ;
2018-10-20 18:46:03 -04:00
2017-12-25 01:05:27 -05:00
let mut new_events = Vec ::new ( ) ;
2018-03-23 17:09:09 -04:00
let mut failed_forwards = Vec ::new ( ) ;
2019-01-14 20:35:56 -05:00
let mut handle_errors = Vec ::new ( ) ;
2017-12-25 01:05:27 -05:00
{
2018-03-20 19:11:27 -04:00
let mut channel_state_lock = self . channel_state . lock ( ) . unwrap ( ) ;
2020-01-16 10:48:16 -08:00
let channel_state = & mut * channel_state_lock ;
2018-03-20 19:11:27 -04:00
2018-09-11 14:20:40 -04:00
for ( short_chan_id , mut pending_forwards ) in channel_state . forward_htlcs . drain ( ) {
2017-12-25 01:05:27 -05:00
if short_chan_id ! = 0 {
2018-03-20 19:11:27 -04:00
let forward_chan_id = match channel_state . short_to_id . get ( & short_chan_id ) {
2017-12-25 01:05:27 -05:00
Some ( chan_id ) = > chan_id . clone ( ) ,
None = > {
2018-03-23 17:09:09 -04:00
failed_forwards . reserve ( pending_forwards . len ( ) ) ;
2018-12-20 15:36:02 -05:00
for forward_info in pending_forwards . drain ( .. ) {
match forward_info {
2020-10-22 13:32:50 -04:00
HTLCForwardInfo ::AddHTLC { prev_short_channel_id , prev_htlc_id , forward_info ,
prev_funding_outpoint } = > {
2018-12-20 15:36:02 -05:00
let htlc_source = HTLCSource ::PreviousHopData ( HTLCPreviousHopData {
short_channel_id : prev_short_channel_id ,
2020-10-22 13:32:50 -04:00
outpoint : prev_funding_outpoint ,
2018-12-20 15:36:02 -05:00
htlc_id : prev_htlc_id ,
incoming_packet_shared_secret : forward_info . incoming_shared_secret ,
} ) ;
2020-01-02 01:23:48 -05:00
failed_forwards . push ( ( htlc_source , forward_info . payment_hash ,
HTLCFailReason ::Reason { failure_code : 0x4000 | 10 , data : Vec ::new ( ) }
) ) ;
2018-12-20 15:36:02 -05:00
} ,
2018-12-20 16:15:07 -05:00
HTLCForwardInfo ::FailHTLC { .. } = > {
// Channel went away before we could fail it. This implies
// the channel is now on chain and our counterparty is
// trying to broadcast the HTLC-Timeout, but that's their
// problem, not ours.
}
2018-12-20 15:36:02 -05:00
}
2018-03-23 17:09:09 -04:00
}
2017-12-25 01:05:27 -05:00
continue ;
}
} ;
2019-01-14 20:35:56 -05:00
if let hash_map ::Entry ::Occupied ( mut chan ) = channel_state . by_id . entry ( forward_chan_id ) {
let mut add_htlc_msgs = Vec ::new ( ) ;
let mut fail_htlc_msgs = Vec ::new ( ) ;
for forward_info in pending_forwards . drain ( .. ) {
match forward_info {
2020-01-01 17:39:51 -05:00
HTLCForwardInfo ::AddHTLC { prev_short_channel_id , prev_htlc_id , forward_info : PendingHTLCInfo {
routing : PendingHTLCRouting ::Forward {
onion_packet , ..
2020-10-22 13:32:50 -04:00
} , incoming_shared_secret , payment_hash , amt_to_forward , outgoing_cltv_value } ,
prev_funding_outpoint } = > {
2020-03-02 12:55:53 -05:00
log_trace! ( self . logger , " Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay " , log_bytes! ( payment_hash . 0 ) , prev_short_channel_id , short_chan_id ) ;
2019-01-14 20:35:56 -05:00
let htlc_source = HTLCSource ::PreviousHopData ( HTLCPreviousHopData {
short_channel_id : prev_short_channel_id ,
2020-10-22 13:32:50 -04:00
outpoint : prev_funding_outpoint ,
2019-01-14 20:35:56 -05:00
htlc_id : prev_htlc_id ,
2020-01-01 17:39:51 -05:00
incoming_packet_shared_secret : incoming_shared_secret ,
2019-01-14 20:35:56 -05:00
} ) ;
2020-01-01 17:39:51 -05:00
match chan . get_mut ( ) . send_htlc ( amt_to_forward , payment_hash , outgoing_cltv_value , htlc_source . clone ( ) , onion_packet ) {
2019-01-14 20:35:56 -05:00
Err ( e ) = > {
if let ChannelError ::Ignore ( msg ) = e {
2020-03-02 12:55:53 -05:00
log_trace! ( self . logger , " Failed to forward HTLC with payment_hash {}: {} " , log_bytes! ( payment_hash . 0 ) , msg ) ;
2019-01-14 20:35:56 -05:00
} else {
panic! ( " Stated return value requirements in send_htlc() were not met " ) ;
}
let chan_update = self . get_channel_update ( chan . get ( ) ) . unwrap ( ) ;
2020-01-02 01:23:48 -05:00
failed_forwards . push ( ( htlc_source , payment_hash ,
HTLCFailReason ::Reason { failure_code : 0x1000 | 7 , data : chan_update . encode_with_len ( ) }
) ) ;
2019-01-14 20:35:56 -05:00
continue ;
} ,
Ok ( update_add ) = > {
match update_add {
Some ( msg ) = > { add_htlc_msgs . push ( msg ) ; } ,
None = > {
// Nothing to do here...we're waiting on a remote
// revoke_and_ack before we can add anymore HTLCs. The Channel
// will automatically handle building the update_add_htlc and
// commitment_signed messages when we can.
// TODO: Do some kind of timer to set the channel as !is_live()
// as we don't really want others relying on us relaying through
// this channel currently :/.
}
2018-12-20 15:36:02 -05:00
}
}
2017-12-25 01:05:27 -05:00
}
2019-01-14 20:35:56 -05:00
} ,
2020-01-01 17:39:51 -05:00
HTLCForwardInfo ::AddHTLC { .. } = > {
panic! ( " short_channel_id != 0 should imply any pending_forward entries are of type Forward " ) ;
} ,
2019-01-14 20:35:56 -05:00
HTLCForwardInfo ::FailHTLC { htlc_id , err_packet } = > {
2020-03-02 12:55:53 -05:00
log_trace! ( self . logger , " Failing HTLC back to channel with short id {} after delay " , short_chan_id ) ;
2019-01-14 20:35:56 -05:00
match chan . get_mut ( ) . get_update_fail_htlc ( htlc_id , err_packet ) {
Err ( e ) = > {
if let ChannelError ::Ignore ( msg ) = e {
2020-03-02 12:55:53 -05:00
log_trace! ( self . logger , " Failed to fail backwards to short_id {}: {} " , short_chan_id , msg ) ;
2019-01-14 20:35:56 -05:00
} else {
panic! ( " Stated return value requirements in get_update_fail_htlc() were not met " ) ;
}
// fail-backs are best-effort, we probably already have one
// pending, and if not that's OK, if not, the channel is on
// the chain and sending the HTLC-Timeout is their problem.
continue ;
} ,
Ok ( Some ( msg ) ) = > { fail_htlc_msgs . push ( msg ) ; } ,
Ok ( None ) = > {
// Nothing to do here...we're waiting on a remote
// revoke_and_ack before we can update the commitment
// transaction. The Channel will automatically handle
// building the update_fail_htlc and commitment_signed
// messages when we can.
// We don't need any kind of timer here as they should fail
// the channel onto the chain if they can't get our
// update_fail_htlc in time, it's not our problem.
2018-12-20 16:15:07 -05:00
}
}
2019-01-14 20:35:56 -05:00
} ,
}
2017-12-25 01:05:27 -05:00
}
2019-01-14 20:35:56 -05:00
if ! add_htlc_msgs . is_empty ( ) | | ! fail_htlc_msgs . is_empty ( ) {
2020-03-02 12:55:53 -05:00
let ( commitment_msg , monitor_update ) = match chan . get_mut ( ) . send_commitment ( & self . logger ) {
2019-01-14 20:35:56 -05:00
Ok ( res ) = > res ,
Err ( e ) = > {
2019-11-14 18:27:47 -05:00
// We surely failed send_commitment due to bad keys, in that case
// close channel and then send error message to peer.
2020-06-08 20:47:55 -04:00
let counterparty_node_id = chan . get ( ) . get_counterparty_node_id ( ) ;
2019-11-14 18:27:47 -05:00
let err : Result < ( ) , _ > = match e {
ChannelError ::Ignore ( _ ) = > {
panic! ( " Stated return value requirements in send_commitment() were not met " ) ;
} ,
ChannelError ::Close ( msg ) = > {
2020-03-02 12:55:53 -05:00
log_trace! ( self . logger , " Closing channel {} due to Close-required error: {} " , log_bytes! ( chan . key ( ) [ .. ] ) , msg ) ;
2019-11-14 18:27:47 -05:00
let ( channel_id , mut channel ) = chan . remove_entry ( ) ;
if let Some ( short_id ) = channel . get_short_channel_id ( ) {
channel_state . short_to_id . remove ( & short_id ) ;
}
2020-03-18 16:30:05 -04:00
Err ( MsgHandleErrInternal ::from_finish_shutdown ( msg , channel_id , channel . force_shutdown ( true ) , self . get_channel_update ( & channel ) . ok ( ) ) )
2019-11-14 18:27:47 -05:00
} ,
2020-05-02 22:00:08 -04:00
ChannelError ::CloseDelayBroadcast ( _ ) = > { panic! ( " Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here " ) ; }
2019-11-14 18:27:47 -05:00
} ;
2020-06-08 20:47:55 -04:00
handle_errors . push ( ( counterparty_node_id , err ) ) ;
2020-01-13 16:10:30 -05:00
continue ;
2019-11-14 18:27:47 -05:00
}
2019-01-14 20:35:56 -05:00
} ;
2020-07-20 17:03:52 -07:00
if let Err ( e ) = self . chain_monitor . update_channel ( chan . get ( ) . get_funding_txo ( ) . unwrap ( ) , monitor_update ) {
2020-06-08 20:47:55 -04:00
handle_errors . push ( ( chan . get ( ) . get_counterparty_node_id ( ) , handle_monitor_err! ( self , e , channel_state , chan , RAACommitmentOrder ::CommitmentFirst , false , true ) ) ) ;
2017-12-25 01:05:27 -05:00
continue ;
2019-01-14 20:35:56 -05:00
}
channel_state . pending_msg_events . push ( events ::MessageSendEvent ::UpdateHTLCs {
2020-06-08 20:47:55 -04:00
node_id : chan . get ( ) . get_counterparty_node_id ( ) ,
2019-01-14 20:35:56 -05:00
updates : msgs ::CommitmentUpdate {
update_add_htlcs : add_htlc_msgs ,
update_fulfill_htlcs : Vec ::new ( ) ,
update_fail_htlcs : fail_htlc_msgs ,
update_fail_malformed_htlcs : Vec ::new ( ) ,
update_fee : None ,
commitment_signed : commitment_msg ,
} ,
} ) ;
2018-10-17 08:47:33 -04:00
}
2019-01-14 20:35:56 -05:00
} else {
unreachable! ( ) ;
2017-12-25 01:05:27 -05:00
}
2018-03-20 19:11:27 -04:00
} else {
2018-12-20 15:36:02 -05:00
for forward_info in pending_forwards . drain ( .. ) {
match forward_info {
2020-01-01 17:39:51 -05:00
HTLCForwardInfo ::AddHTLC { prev_short_channel_id , prev_htlc_id , forward_info : PendingHTLCInfo {
2020-01-09 14:09:25 -05:00
routing : PendingHTLCRouting ::Receive { payment_data , incoming_cltv_expiry } ,
2020-10-22 13:32:50 -04:00
incoming_shared_secret , payment_hash , amt_to_forward , .. } ,
prev_funding_outpoint } = > {
2020-01-01 20:20:42 -05:00
let prev_hop = HTLCPreviousHopData {
2018-12-20 15:36:02 -05:00
short_channel_id : prev_short_channel_id ,
2020-10-22 13:32:50 -04:00
outpoint : prev_funding_outpoint ,
2018-12-20 15:36:02 -05:00
htlc_id : prev_htlc_id ,
2020-01-01 17:39:51 -05:00
incoming_packet_shared_secret : incoming_shared_secret ,
2018-12-20 15:36:02 -05:00
} ;
2020-01-02 01:23:48 -05:00
let mut total_value = 0 ;
let payment_secret_opt =
if let & Some ( ref data ) = & payment_data { Some ( data . payment_secret . clone ( ) ) } else { None } ;
let htlcs = channel_state . claimable_htlcs . entry ( ( payment_hash , payment_secret_opt ) )
. or_insert ( Vec ::new ( ) ) ;
htlcs . push ( ClaimableHTLC {
2020-01-01 20:20:42 -05:00
prev_hop ,
value : amt_to_forward ,
2020-01-02 01:23:48 -05:00
payment_data : payment_data . clone ( ) ,
2020-01-09 14:09:25 -05:00
cltv_expiry : incoming_cltv_expiry ,
2018-12-20 15:36:02 -05:00
} ) ;
2020-01-02 01:23:48 -05:00
if let & Some ( ref data ) = & payment_data {
for htlc in htlcs . iter ( ) {
total_value + = htlc . value ;
if htlc . payment_data . as_ref ( ) . unwrap ( ) . total_msat ! = data . total_msat {
total_value = msgs ::MAX_VALUE_MSAT ;
}
if total_value > = msgs ::MAX_VALUE_MSAT { break ; }
}
if total_value > = msgs ::MAX_VALUE_MSAT | | total_value > data . total_msat {
for htlc in htlcs . iter ( ) {
2020-04-19 22:14:14 +10:00
let mut htlc_msat_height_data = byte_utils ::be64_to_array ( htlc . value ) . to_vec ( ) ;
htlc_msat_height_data . extend_from_slice (
& byte_utils ::be32_to_array (
self . latest_block_height . load ( Ordering ::Acquire )
as u32 ,
) ,
) ;
2020-01-02 01:23:48 -05:00
failed_forwards . push ( ( HTLCSource ::PreviousHopData ( HTLCPreviousHopData {
short_channel_id : htlc . prev_hop . short_channel_id ,
2020-10-22 13:32:50 -04:00
outpoint : prev_funding_outpoint ,
2020-01-02 01:23:48 -05:00
htlc_id : htlc . prev_hop . htlc_id ,
incoming_packet_shared_secret : htlc . prev_hop . incoming_packet_shared_secret ,
} ) , payment_hash ,
2020-04-19 22:14:14 +10:00
HTLCFailReason ::Reason { failure_code : 0x4000 | 15 , data : htlc_msat_height_data }
2020-01-02 01:23:48 -05:00
) ) ;
}
} else if total_value = = data . total_msat {
new_events . push ( events ::Event ::PaymentReceived {
2020-10-06 16:47:23 -07:00
payment_hash ,
2020-01-02 01:23:48 -05:00
payment_secret : Some ( data . payment_secret ) ,
amt : total_value ,
} ) ;
}
} else {
new_events . push ( events ::Event ::PaymentReceived {
2020-10-06 16:47:23 -07:00
payment_hash ,
2020-01-02 01:23:48 -05:00
payment_secret : None ,
amt : amt_to_forward ,
} ) ;
}
2018-12-20 15:36:02 -05:00
} ,
2020-01-01 17:39:51 -05:00
HTLCForwardInfo ::AddHTLC { .. } = > {
panic! ( " short_channel_id == 0 should imply any pending_forward entries are of type Receive " ) ;
} ,
2018-12-20 16:15:07 -05:00
HTLCForwardInfo ::FailHTLC { .. } = > {
panic! ( " Got pending fail of our own HTLC " ) ;
}
2018-12-20 15:36:02 -05:00
}
2018-03-20 19:11:27 -04:00
}
2017-12-25 01:05:27 -05:00
}
}
}
2020-01-02 01:23:48 -05:00
for ( htlc_source , payment_hash , failure_reason ) in failed_forwards . drain ( .. ) {
self . fail_htlc_backwards_internal ( self . channel_state . lock ( ) . unwrap ( ) , htlc_source , & payment_hash , failure_reason ) ;
2018-03-23 17:09:09 -04:00
}
2020-06-08 20:47:55 -04:00
for ( counterparty_node_id , err ) in handle_errors . drain ( .. ) {
let _ = handle_error! ( self , err , counterparty_node_id ) ;
2019-01-14 20:35:56 -05:00
}
2017-12-25 01:05:27 -05:00
if new_events . is_empty ( ) { return }
let mut events = self . pending_events . lock ( ) . unwrap ( ) ;
2018-10-17 08:47:33 -04:00
events . append ( & mut new_events ) ;
2017-12-25 01:05:27 -05:00
}
2019-11-29 20:38:03 -05:00
/// If a peer is disconnected we mark any channels with that peer as 'disabled'.
/// After some time, if channels are still disabled we need to broadcast a ChannelUpdate
/// to inform the network about the uselessness of these channels.
///
/// This method handles all the details, and must be called roughly once per minute.
2019-11-18 00:43:13 -05:00
pub fn timer_chan_freshness_every_min ( & self ) {
2020-11-19 12:53:16 -05:00
let _persistence_guard = PersistenceNotifierGuard ::new ( & self . total_consistency_lock , & self . persistence_notifier ) ;
2019-11-18 00:43:13 -05:00
let mut channel_state_lock = self . channel_state . lock ( ) . unwrap ( ) ;
2020-01-16 10:48:16 -08:00
let channel_state = & mut * channel_state_lock ;
for ( _ , chan ) in channel_state . by_id . iter_mut ( ) {
2019-11-18 00:43:13 -05:00
if chan . is_disabled_staged ( ) & & ! chan . is_live ( ) {
if let Ok ( update ) = self . get_channel_update ( & chan ) {
channel_state . pending_msg_events . push ( events ::MessageSendEvent ::BroadcastChannelUpdate {
msg : update
} ) ;
}
chan . to_fresh ( ) ;
} else if chan . is_disabled_staged ( ) & & chan . is_live ( ) {
chan . to_fresh ( ) ;
} else if chan . is_disabled_marked ( ) {
chan . to_disabled_staged ( ) ;
}
}
}
2018-12-17 22:43:05 -05:00
/// Indicates that the preimage for payment_hash is unknown or the received amount is incorrect
2019-01-22 15:49:29 -05:00
/// after a PaymentReceived event, failing the HTLC back to its origin and freeing resources
/// along the path (including in our own channel on which we received it).
/// Returns false if no payment was found to fail backwards, true if the process of failing the
/// HTLC backwards has been started.
2020-01-02 01:23:48 -05:00
pub fn fail_htlc_backwards ( & self , payment_hash : & PaymentHash , payment_secret : & Option < PaymentSecret > ) -> bool {
2020-11-19 12:53:16 -05:00
let _persistence_guard = PersistenceNotifierGuard ::new ( & self . total_consistency_lock , & self . persistence_notifier ) ;
2018-10-20 18:46:03 -04:00
2018-09-11 14:20:40 -04:00
let mut channel_state = Some ( self . channel_state . lock ( ) . unwrap ( ) ) ;
2020-01-02 01:23:48 -05:00
let removed_source = channel_state . as_mut ( ) . unwrap ( ) . claimable_htlcs . remove ( & ( * payment_hash , * payment_secret ) ) ;
2018-09-11 14:20:40 -04:00
if let Some ( mut sources ) = removed_source {
2020-01-01 20:20:42 -05:00
for htlc in sources . drain ( .. ) {
2018-09-11 14:20:40 -04:00
if channel_state . is_none ( ) { channel_state = Some ( self . channel_state . lock ( ) . unwrap ( ) ) ; }
2020-04-19 22:14:14 +10:00
let mut htlc_msat_height_data = byte_utils ::be64_to_array ( htlc . value ) . to_vec ( ) ;
htlc_msat_height_data . extend_from_slice ( & byte_utils ::be32_to_array (
self . latest_block_height . load ( Ordering ::Acquire ) as u32 ,
) ) ;
2018-12-17 22:43:05 -05:00
self . fail_htlc_backwards_internal ( channel_state . take ( ) . unwrap ( ) ,
2020-01-01 20:20:42 -05:00
HTLCSource ::PreviousHopData ( htlc . prev_hop ) , payment_hash ,
2020-04-19 22:14:14 +10:00
HTLCFailReason ::Reason { failure_code : 0x4000 | 15 , data : htlc_msat_height_data } ) ;
2018-09-11 14:20:40 -04:00
}
true
} else { false }
2018-03-20 19:11:27 -04:00
}
2020-05-06 18:15:43 -04:00
// Fail a list of HTLCs that were just freed from the holding cell. The HTLCs need to be
// failed backwards or, if they were one of our outgoing HTLCs, then their failure needs to
// be surfaced to the user.
fn fail_holding_cell_htlcs ( & self , mut htlcs_to_fail : Vec < ( HTLCSource , PaymentHash ) > , channel_id : [ u8 ; 32 ] ) {
for ( htlc_src , payment_hash ) in htlcs_to_fail . drain ( .. ) {
match htlc_src {
HTLCSource ::PreviousHopData ( HTLCPreviousHopData { .. } ) = > {
let ( failure_code , onion_failure_data ) =
match self . channel_state . lock ( ) . unwrap ( ) . by_id . entry ( channel_id ) {
hash_map ::Entry ::Occupied ( chan_entry ) = > {
if let Ok ( upd ) = self . get_channel_update ( & chan_entry . get ( ) ) {
( 0x1000 | 7 , upd . encode_with_len ( ) )
} else {
( 0x4000 | 10 , Vec ::new ( ) )
}
} ,
hash_map ::Entry ::Vacant ( _ ) = > ( 0x4000 | 10 , Vec ::new ( ) )
} ;
let channel_state = self . channel_state . lock ( ) . unwrap ( ) ;
self . fail_htlc_backwards_internal ( channel_state ,
htlc_src , & payment_hash , HTLCFailReason ::Reason { failure_code , data : onion_failure_data } ) ;
} ,
HTLCSource ::OutboundRoute { .. } = > {
self . pending_events . lock ( ) . unwrap ( ) . push (
events ::Event ::PaymentFailed {
payment_hash ,
rejected_by_dest : false ,
#[ cfg(test) ]
error_code : None ,
#[ cfg(test) ]
error_data : None ,
}
)
} ,
} ;
}
}
2018-07-28 18:32:43 -04:00
/// Fails an HTLC backwards to the sender of it to us.
/// Note that while we take a channel_state lock as input, we do *not* assume consistency here.
/// There are several callsites that do stupid things like loop over a list of payment_hashes
/// to fail and take the channel_state lock for each iteration (as we take ownership and may
/// drop it). In other words, no assumptions are made that entries in claimable_htlcs point to
/// still-available channels.
2019-11-26 16:46:33 -05:00
fn fail_htlc_backwards_internal ( & self , mut channel_state_lock : MutexGuard < ChannelHolder < ChanSigner > > , source : HTLCSource , payment_hash : & PaymentHash , onion_error : HTLCFailReason ) {
2018-12-20 22:50:25 -05:00
//TODO: There is a timing attack here where if a node fails an HTLC back to us they can
//identify whether we sent it or not based on the (I presume) very different runtime
//between the branches here. We should make this async and move it into the forward HTLCs
//timer handling.
2018-09-11 14:20:40 -04:00
match source {
2020-01-03 19:31:40 -05:00
HTLCSource ::OutboundRoute { ref path , .. } = > {
2020-03-02 12:55:53 -05:00
log_trace! ( self . logger , " Failing outbound payment HTLC with payment_hash {} " , log_bytes! ( payment_hash . 0 ) ) ;
2018-10-19 16:25:32 -04:00
mem ::drop ( channel_state_lock ) ;
2018-12-17 13:49:12 -05:00
match & onion_error {
2019-11-04 19:09:51 -05:00
& HTLCFailReason ::LightningError { ref err } = > {
2018-12-17 18:54:48 -05:00
#[ cfg(test) ]
2020-04-20 07:30:16 +10:00
let ( channel_update , payment_retryable , onion_error_code , onion_error_data ) = onion_utils ::process_onion_failure ( & self . secp_ctx , & self . logger , & source , err . data . clone ( ) ) ;
2018-12-17 18:54:48 -05:00
#[ cfg(not(test)) ]
2020-04-20 07:30:16 +10:00
let ( channel_update , payment_retryable , _ , _ ) = onion_utils ::process_onion_failure ( & self . secp_ctx , & self . logger , & source , err . data . clone ( ) ) ;
2018-12-17 18:54:48 -05:00
// TODO: If we decided to blame ourselves (or one of our channels) in
// process_onion_failure we should close that channel as it implies our
// next-hop is needlessly blaming us!
2018-12-17 13:49:12 -05:00
if let Some ( update ) = channel_update {
self . channel_state . lock ( ) . unwrap ( ) . pending_msg_events . push (
events ::MessageSendEvent ::PaymentFailureNetworkUpdate {
update ,
}
) ;
}
self . pending_events . lock ( ) . unwrap ( ) . push (
events ::Event ::PaymentFailed {
payment_hash : payment_hash . clone ( ) ,
rejected_by_dest : ! payment_retryable ,
2018-12-17 18:54:48 -05:00
#[ cfg(test) ]
2020-04-20 07:30:16 +10:00
error_code : onion_error_code ,
#[ cfg(test) ]
error_data : onion_error_data
2018-12-17 13:49:12 -05:00
}
) ;
} ,
2018-12-17 18:54:48 -05:00
& HTLCFailReason ::Reason {
#[ cfg(test) ]
ref failure_code ,
2020-04-20 07:30:16 +10:00
#[ cfg(test) ]
ref data ,
2018-12-17 18:54:48 -05:00
.. } = > {
2018-12-17 13:49:12 -05:00
// we get a fail_malformed_htlc from the first hop
// TODO: We'd like to generate a PaymentFailureNetworkUpdate for temporary
2020-05-02 15:05:04 -04:00
// failures here, but that would be insufficient as get_route
2018-12-17 13:49:12 -05:00
// generally ignores its view of our own channels as we provide them via
// ChannelDetails.
// TODO: For non-temporary failures, we really should be closing the
// channel here as we apparently can't relay through them anyway.
self . pending_events . lock ( ) . unwrap ( ) . push (
events ::Event ::PaymentFailed {
payment_hash : payment_hash . clone ( ) ,
2020-01-03 19:31:40 -05:00
rejected_by_dest : path . len ( ) = = 1 ,
2018-12-17 18:54:48 -05:00
#[ cfg(test) ]
error_code : Some ( * failure_code ) ,
2020-04-20 07:30:16 +10:00
#[ cfg(test) ]
error_data : Some ( data . clone ( ) ) ,
2018-10-19 16:25:32 -04:00
}
) ;
2018-10-22 11:12:44 -04:00
}
}
2018-03-20 19:11:27 -04:00
} ,
2020-10-22 13:32:50 -04:00
HTLCSource ::PreviousHopData ( HTLCPreviousHopData { short_channel_id , htlc_id , incoming_packet_shared_secret , .. } ) = > {
2018-03-20 19:11:27 -04:00
let err_packet = match onion_error {
2018-03-23 17:09:09 -04:00
HTLCFailReason ::Reason { failure_code , data } = > {
2020-03-02 12:55:53 -05:00
log_trace! ( self . logger , " Failing HTLC with payment_hash {} backwards from us with code {} " , log_bytes! ( payment_hash . 0 ) , failure_code ) ;
2018-12-19 17:02:27 -05:00
let packet = onion_utils ::build_failure_packet ( & incoming_packet_shared_secret , failure_code , & data [ .. ] ) . encode ( ) ;
onion_utils ::encrypt_failure_packet ( & incoming_packet_shared_secret , & packet )
2018-03-20 19:11:27 -04:00
} ,
2019-11-04 19:09:51 -05:00
HTLCFailReason ::LightningError { err } = > {
2020-03-02 12:55:53 -05:00
log_trace! ( self . logger , " Failing HTLC with payment_hash {} backwards with pre-built LightningError " , log_bytes! ( payment_hash . 0 ) ) ;
2018-12-19 17:02:27 -05:00
onion_utils ::encrypt_failure_packet ( & incoming_packet_shared_secret , & err . data )
2018-03-20 19:11:27 -04:00
}
} ;
2018-12-20 22:50:25 -05:00
let mut forward_event = None ;
if channel_state_lock . forward_htlcs . is_empty ( ) {
2019-07-18 22:21:00 -04:00
forward_event = Some ( Duration ::from_millis ( MIN_HTLC_RELAY_HOLDING_CELL_MILLIS ) ) ;
2018-12-20 22:50:25 -05:00
}
match channel_state_lock . forward_htlcs . entry ( short_channel_id ) {
hash_map ::Entry ::Occupied ( mut entry ) = > {
entry . get_mut ( ) . push ( HTLCForwardInfo ::FailHTLC { htlc_id , err_packet } ) ;
2018-10-19 16:25:32 -04:00
} ,
2018-12-20 22:50:25 -05:00
hash_map ::Entry ::Vacant ( entry ) = > {
entry . insert ( vec! ( HTLCForwardInfo ::FailHTLC { htlc_id , err_packet } ) ) ;
}
}
mem ::drop ( channel_state_lock ) ;
if let Some ( time ) = forward_event {
let mut pending_events = self . pending_events . lock ( ) . unwrap ( ) ;
pending_events . push ( events ::Event ::PendingHTLCsForwardable {
time_forwardable : time
} ) ;
2018-04-04 11:56:54 -04:00
}
2018-03-20 19:11:27 -04:00
} ,
}
}
2017-12-25 01:05:27 -05:00
/// Provides a payment preimage in response to a PaymentReceived event, returning true and
/// generating message events for the net layer to claim the payment, if possible. Thus, you
/// should probably kick the net layer to go send messages if this returns true!
2018-09-20 12:57:47 -04:00
///
2019-11-14 18:50:24 -05:00
/// You must specify the expected amounts for this HTLC, and we will only claim HTLCs
/// available within a few percent of the expected amount. This is critical for several
/// reasons : a) it avoids providing senders with `proof-of-payment` (in the form of the
/// payment_preimage without having provided the full value and b) it avoids certain
/// privacy-breaking recipient-probing attacks which may reveal payment activity to
/// motivated attackers.
///
2020-03-30 16:24:19 -04:00
/// Note that the privacy concerns in (b) are not relevant in payments with a payment_secret
/// set. Thus, for such payments we will claim any payments which do not under-pay.
///
2018-04-03 14:59:23 -04:00
/// May panic if called except in response to a PaymentReceived event.
2020-01-02 01:23:48 -05:00
pub fn claim_funds ( & self , payment_preimage : PaymentPreimage , payment_secret : & Option < PaymentSecret > , expected_amount : u64 ) -> bool {
2018-12-17 23:58:02 -05:00
let payment_hash = PaymentHash ( Sha256 ::hash ( & payment_preimage . 0 ) . into_inner ( ) ) ;
2017-12-25 01:05:27 -05:00
2020-11-19 12:53:16 -05:00
let _persistence_guard = PersistenceNotifierGuard ::new ( & self . total_consistency_lock , & self . persistence_notifier ) ;
2018-10-20 18:46:03 -04:00
2018-09-11 14:20:40 -04:00
let mut channel_state = Some ( self . channel_state . lock ( ) . unwrap ( ) ) ;
2020-01-02 01:23:48 -05:00
let removed_source = channel_state . as_mut ( ) . unwrap ( ) . claimable_htlcs . remove ( & ( payment_hash , * payment_secret ) ) ;
2018-09-11 14:20:40 -04:00
if let Some ( mut sources ) = removed_source {
2020-01-03 19:31:40 -05:00
assert! ( ! sources . is_empty ( ) ) ;
2020-03-30 16:24:19 -04:00
// If we are claiming an MPP payment, we have to take special care to ensure that each
// channel exists before claiming all of the payments (inside one lock).
// Note that channel existance is sufficient as we should always get a monitor update
// which will take care of the real HTLC claim enforcement.
//
// If we find an HTLC which we would need to claim but for which we do not have a
// channel, we will fail all parts of the MPP payment. While we could wait and see if
// the sender retries the already-failed path(s), it should be a pretty rare case where
// we got all the HTLCs and then a channel closed while we were waiting for the user to
// provide the preimage, so worrying too much about the optimal handling isn't worth
// it.
let ( is_mpp , mut valid_mpp ) = if let & Some ( ref data ) = & sources [ 0 ] . payment_data {
2020-01-03 19:31:40 -05:00
assert! ( payment_secret . is_some ( ) ) ;
2020-03-30 16:24:19 -04:00
( true , data . total_msat > = expected_amount )
2020-01-03 19:31:40 -05:00
} else {
assert! ( payment_secret . is_none ( ) ) ;
2020-03-30 16:24:19 -04:00
( false , false )
2020-01-03 19:31:40 -05:00
} ;
2020-03-30 16:24:19 -04:00
for htlc in sources . iter ( ) {
if ! is_mpp | | ! valid_mpp { break ; }
if let None = channel_state . as_ref ( ) . unwrap ( ) . short_to_id . get ( & htlc . prev_hop . short_channel_id ) {
valid_mpp = false ;
}
}
let mut errs = Vec ::new ( ) ;
2020-01-03 19:31:40 -05:00
let mut claimed_any_htlcs = false ;
2020-01-01 20:20:42 -05:00
for htlc in sources . drain ( .. ) {
2018-09-11 14:20:40 -04:00
if channel_state . is_none ( ) { channel_state = Some ( self . channel_state . lock ( ) . unwrap ( ) ) ; }
2020-03-30 16:24:19 -04:00
if ( is_mpp & & ! valid_mpp ) | | ( ! is_mpp & & ( htlc . value < expected_amount | | htlc . value > expected_amount * 2 ) ) {
2020-04-19 22:14:14 +10:00
let mut htlc_msat_height_data = byte_utils ::be64_to_array ( htlc . value ) . to_vec ( ) ;
htlc_msat_height_data . extend_from_slice ( & byte_utils ::be32_to_array (
self . latest_block_height . load ( Ordering ::Acquire ) as u32 ,
) ) ;
2019-11-14 18:50:24 -05:00
self . fail_htlc_backwards_internal ( channel_state . take ( ) . unwrap ( ) ,
2020-01-01 20:20:42 -05:00
HTLCSource ::PreviousHopData ( htlc . prev_hop ) , & payment_hash ,
2020-04-19 22:14:14 +10:00
HTLCFailReason ::Reason { failure_code : 0x4000 | 15 , data : htlc_msat_height_data } ) ;
2019-11-14 18:50:24 -05:00
} else {
2020-03-30 16:24:19 -04:00
match self . claim_funds_from_hop ( channel_state . as_mut ( ) . unwrap ( ) , htlc . prev_hop , payment_preimage ) {
Err ( Some ( e ) ) = > {
if let msgs ::ErrorAction ::IgnoreError = e . 1. err . action {
// We got a temporary failure updating monitor, but will claim the
// HTLC when the monitor updating is restored (or on chain).
2020-03-02 12:55:53 -05:00
log_error! ( self . logger , " Temporary failure claiming HTLC, treating as success: {} " , e . 1. err . err ) ;
2020-03-30 16:24:19 -04:00
claimed_any_htlcs = true ;
} else { errs . push ( e ) ; }
} ,
Err ( None ) if is_mpp = > unreachable! ( " We already checked for channel existence, we can't fail here! " ) ,
Err ( None ) = > {
2020-03-02 12:55:53 -05:00
log_warn! ( self . logger , " Channel we expected to claim an HTLC from was closed. " ) ;
2020-03-30 16:24:19 -04:00
} ,
Ok ( ( ) ) = > claimed_any_htlcs = true ,
}
2019-11-14 18:50:24 -05:00
}
2017-12-25 01:05:27 -05:00
}
2020-03-30 16:24:19 -04:00
// Now that we've done the entire above loop in one lock, we can handle any errors
// which were generated.
channel_state . take ( ) ;
2020-06-08 20:47:55 -04:00
for ( counterparty_node_id , err ) in errs . drain ( .. ) {
2020-03-30 16:24:19 -04:00
let res : Result < ( ) , _ > = Err ( err ) ;
2020-06-08 20:47:55 -04:00
let _ = handle_error! ( self , res , counterparty_node_id ) ;
2020-03-30 16:24:19 -04:00
}
2020-01-03 19:31:40 -05:00
claimed_any_htlcs
2018-09-11 14:20:40 -04:00
} else { false }
}
2017-12-25 01:05:27 -05:00
2020-03-30 16:24:19 -04:00
fn claim_funds_from_hop ( & self , channel_state_lock : & mut MutexGuard < ChannelHolder < ChanSigner > > , prev_hop : HTLCPreviousHopData , payment_preimage : PaymentPreimage ) -> Result < ( ) , Option < ( PublicKey , MsgHandleErrInternal ) > > {
//TODO: Delay the claimed_funds relaying just like we do outbound relay!
let channel_state = & mut * * channel_state_lock ;
let chan_id = match channel_state . short_to_id . get ( & prev_hop . short_channel_id ) {
Some ( chan_id ) = > chan_id . clone ( ) ,
None = > {
return Err ( None )
}
} ;
2019-01-14 20:35:56 -05:00
2020-03-30 16:24:19 -04:00
if let hash_map ::Entry ::Occupied ( mut chan ) = channel_state . by_id . entry ( chan_id ) {
let was_frozen_for_monitor = chan . get ( ) . is_awaiting_monitor_update ( ) ;
2020-03-02 12:55:53 -05:00
match chan . get_mut ( ) . get_update_fulfill_htlc_and_commit ( prev_hop . htlc_id , payment_preimage , & self . logger ) {
2020-03-30 16:24:19 -04:00
Ok ( ( msgs , monitor_option ) ) = > {
if let Some ( monitor_update ) = monitor_option {
2020-07-20 17:03:52 -07:00
if let Err ( e ) = self . chain_monitor . update_channel ( chan . get ( ) . get_funding_txo ( ) . unwrap ( ) , monitor_update ) {
2020-03-30 16:24:19 -04:00
if was_frozen_for_monitor {
assert! ( msgs . is_none ( ) ) ;
} else {
2020-06-08 20:47:55 -04:00
return Err ( Some ( ( chan . get ( ) . get_counterparty_node_id ( ) , handle_monitor_err! ( self , e , channel_state , chan , RAACommitmentOrder ::CommitmentFirst , false , msgs . is_some ( ) ) . unwrap_err ( ) ) ) ) ;
2020-03-30 16:24:19 -04:00
}
2018-10-19 16:25:32 -04:00
}
2020-03-30 16:24:19 -04:00
}
if let Some ( ( msg , commitment_signed ) ) = msgs {
channel_state . pending_msg_events . push ( events ::MessageSendEvent ::UpdateHTLCs {
2020-06-08 20:47:55 -04:00
node_id : chan . get ( ) . get_counterparty_node_id ( ) ,
2020-03-30 16:24:19 -04:00
updates : msgs ::CommitmentUpdate {
update_add_htlcs : Vec ::new ( ) ,
update_fulfill_htlcs : vec ! [ msg ] ,
update_fail_htlcs : Vec ::new ( ) ,
update_fail_malformed_htlcs : Vec ::new ( ) ,
update_fee : None ,
commitment_signed ,
}
} ) ;
}
return Ok ( ( ) )
} ,
Err ( e ) = > {
// TODO: Do something with e?
// This should only occur if we are claiming an HTLC at the same time as the
// HTLC is being failed (eg because a block is being connected and this caused
// an HTLC to time out). This should, of course, only occur if the user is the
// one doing the claiming (as it being a part of a peer claim would imply we're
// about to lose funds) and only if the lock in claim_funds was dropped as a
// previous HTLC was failed (thus not for an MPP payment).
debug_assert! ( false , " This shouldn't be reachable except in absurdly rare cases between monitor updates and HTLC timeouts: {:?} " , e ) ;
return Err ( None )
2019-01-14 20:35:56 -05:00
} ,
}
2020-03-30 16:24:19 -04:00
} else { unreachable! ( ) ; }
}
2019-01-14 20:35:56 -05:00
2020-03-30 16:24:19 -04:00
fn claim_funds_internal ( & self , mut channel_state_lock : MutexGuard < ChannelHolder < ChanSigner > > , source : HTLCSource , payment_preimage : PaymentPreimage ) {
match source {
HTLCSource ::OutboundRoute { .. } = > {
mem ::drop ( channel_state_lock ) ;
let mut pending_events = self . pending_events . lock ( ) . unwrap ( ) ;
pending_events . push ( events ::Event ::PaymentSent {
payment_preimage
} ) ;
} ,
HTLCSource ::PreviousHopData ( hop_data ) = > {
2020-10-23 11:55:58 -04:00
let prev_outpoint = hop_data . outpoint ;
2020-06-08 20:47:55 -04:00
if let Err ( ( counterparty_node_id , err ) ) = match self . claim_funds_from_hop ( & mut channel_state_lock , hop_data , payment_preimage ) {
2020-03-30 16:24:19 -04:00
Ok ( ( ) ) = > Ok ( ( ) ) ,
Err ( None ) = > {
2020-10-23 11:55:58 -04:00
let preimage_update = ChannelMonitorUpdate {
update_id : CLOSED_CHANNEL_UPDATE_ID ,
updates : vec ! [ ChannelMonitorUpdateStep ::PaymentPreimage {
payment_preimage : payment_preimage . clone ( ) ,
} ] ,
} ;
// We update the ChannelMonitor on the backward link, after
// receiving an offchain preimage event from the forward link (the
// event being update_fulfill_htlc).
if let Err ( e ) = self . chain_monitor . update_channel ( prev_outpoint , preimage_update ) {
log_error! ( self . logger , " Critical error: failed to update channel monitor with preimage {:?}: {:?} " ,
payment_preimage , e ) ;
}
2020-03-30 16:24:19 -04:00
Ok ( ( ) )
} ,
Err ( Some ( res ) ) = > Err ( res ) ,
} {
mem ::drop ( channel_state_lock ) ;
let res : Result < ( ) , _ > = Err ( err ) ;
2020-06-08 20:47:55 -04:00
let _ = handle_error! ( self , res , counterparty_node_id ) ;
2020-03-30 16:24:19 -04:00
}
} ,
}
2017-12-25 01:05:27 -05:00
}
/// Gets the node_id held by this ChannelManager
pub fn get_our_node_id ( & self ) -> PublicKey {
2018-08-20 17:13:07 -04:00
PublicKey ::from_secret_key ( & self . secp_ctx , & self . our_network_key )
2017-12-25 01:05:27 -05:00
}
2018-04-24 00:19:52 -04:00
2020-02-05 19:39:31 -05:00
/// Restores a single, given channel to normal operation after a
/// ChannelMonitorUpdateErr::TemporaryFailure was returned from a channel monitor update
/// operation.
///
/// All ChannelMonitor updates up to and including highest_applied_update_id must have been
/// fully committed in every copy of the given channels' ChannelMonitors.
///
/// Note that there is no effect to calling with a highest_applied_update_id other than the
/// current latest ChannelMonitorUpdate and one call to this function after multiple
/// ChannelMonitorUpdateErr::TemporaryFailures is fine. The highest_applied_update_id field
/// exists largely only to prevent races between this and concurrent update_monitor calls.
///
/// Thus, the anticipated use is, at a high level:
2020-07-20 17:03:52 -07:00
/// 1) You register a chain::Watch with this ChannelManager,
2020-02-05 19:39:31 -05:00
/// 2) it stores each update to disk, and begins updating any remote (eg watchtower) copies of
/// said ChannelMonitors as it can, returning ChannelMonitorUpdateErr::TemporaryFailures
/// any time it cannot do so instantly,
/// 3) update(s) are applied to each remote copy of a ChannelMonitor,
/// 4) once all remote copies are updated, you call this function with the update_id that
/// completed, and once it is the latest the Channel will be re-enabled.
pub fn channel_monitor_updated ( & self , funding_txo : & OutPoint , highest_applied_update_id : u64 ) {
2020-11-19 12:53:16 -05:00
let _persistence_guard = PersistenceNotifierGuard ::new ( & self . total_consistency_lock , & self . persistence_notifier ) ;
2020-02-05 19:39:31 -05:00
let mut close_results = Vec ::new ( ) ;
let mut htlc_forwards = Vec ::new ( ) ;
let mut htlc_failures = Vec ::new ( ) ;
let mut pending_events = Vec ::new ( ) ;
{
let mut channel_lock = self . channel_state . lock ( ) . unwrap ( ) ;
let channel_state = & mut * channel_lock ;
let short_to_id = & mut channel_state . short_to_id ;
let pending_msg_events = & mut channel_state . pending_msg_events ;
let channel = match channel_state . by_id . get_mut ( & funding_txo . to_channel_id ( ) ) {
Some ( chan ) = > chan ,
None = > return ,
} ;
if ! channel . is_awaiting_monitor_update ( ) | | channel . get_latest_monitor_update_id ( ) ! = highest_applied_update_id {
return ;
}
2020-03-02 12:55:53 -05:00
let ( raa , commitment_update , order , pending_forwards , mut pending_failures , needs_broadcast_safe , funding_locked ) = channel . monitor_updating_restored ( & self . logger ) ;
2020-02-05 19:39:31 -05:00
if ! pending_forwards . is_empty ( ) {
2020-10-22 13:32:50 -04:00
htlc_forwards . push ( ( channel . get_short_channel_id ( ) . expect ( " We can't have pending forwards before funding confirmation " ) , funding_txo . clone ( ) , pending_forwards ) ) ;
2020-02-05 19:39:31 -05:00
}
htlc_failures . append ( & mut pending_failures ) ;
macro_rules ! handle_cs { ( ) = > {
if let Some ( update ) = commitment_update {
pending_msg_events . push ( events ::MessageSendEvent ::UpdateHTLCs {
2020-06-08 20:47:55 -04:00
node_id : channel . get_counterparty_node_id ( ) ,
2020-02-05 19:39:31 -05:00
updates : update ,
} ) ;
}
} }
macro_rules ! handle_raa { ( ) = > {
if let Some ( revoke_and_ack ) = raa {
pending_msg_events . push ( events ::MessageSendEvent ::SendRevokeAndACK {
2020-06-08 20:47:55 -04:00
node_id : channel . get_counterparty_node_id ( ) ,
2020-02-05 19:39:31 -05:00
msg : revoke_and_ack ,
} ) ;
}
} }
match order {
RAACommitmentOrder ::CommitmentFirst = > {
handle_cs! ( ) ;
handle_raa! ( ) ;
} ,
RAACommitmentOrder ::RevokeAndACKFirst = > {
handle_raa! ( ) ;
handle_cs! ( ) ;
} ,
}
if needs_broadcast_safe {
pending_events . push ( events ::Event ::FundingBroadcastSafe {
funding_txo : channel . get_funding_txo ( ) . unwrap ( ) ,
user_channel_id : channel . get_user_id ( ) ,
} ) ;
}
if let Some ( msg ) = funding_locked {
pending_msg_events . push ( events ::MessageSendEvent ::SendFundingLocked {
2020-06-08 20:47:55 -04:00
node_id : channel . get_counterparty_node_id ( ) ,
2020-02-05 19:39:31 -05:00
msg ,
} ) ;
if let Some ( announcement_sigs ) = self . get_announcement_sigs ( channel ) {
pending_msg_events . push ( events ::MessageSendEvent ::SendAnnouncementSignatures {
2020-06-08 20:47:55 -04:00
node_id : channel . get_counterparty_node_id ( ) ,
2020-02-05 19:39:31 -05:00
msg : announcement_sigs ,
} ) ;
}
short_to_id . insert ( channel . get_short_channel_id ( ) . unwrap ( ) , channel . channel_id ( ) ) ;
}
}
self . pending_events . lock ( ) . unwrap ( ) . append ( & mut pending_events ) ;
for failure in htlc_failures . drain ( .. ) {
self . fail_htlc_backwards_internal ( self . channel_state . lock ( ) . unwrap ( ) , failure . 0 , & failure . 1 , failure . 2 ) ;
}
self . forward_htlcs ( & mut htlc_forwards [ .. ] ) ;
for res in close_results . drain ( .. ) {
self . finish_force_close_channel ( res ) ;
}
}
2020-06-08 20:47:55 -04:00
fn internal_open_channel ( & self , counterparty_node_id : & PublicKey , their_features : InitFeatures , msg : & msgs ::OpenChannel ) -> Result < ( ) , MsgHandleErrInternal > {
2018-09-04 20:17:45 -04:00
if msg . chain_hash ! = self . genesis_hash {
2020-07-13 13:16:32 +09:00
return Err ( MsgHandleErrInternal ::send_err_msg_no_close ( " Unknown genesis block hash " . to_owned ( ) , msg . temporary_channel_id . clone ( ) ) ) ;
2018-09-04 20:17:45 -04:00
}
2020-06-08 20:47:55 -04:00
let channel = Channel ::new_from_req ( & self . fee_estimator , & self . keys_manager , counterparty_node_id . clone ( ) , their_features , msg , 0 , & self . default_configuration )
2018-09-30 18:19:59 -04:00
. map_err ( | e | MsgHandleErrInternal ::from_chan_no_close ( e , msg . temporary_channel_id ) ) ? ;
2018-10-19 16:49:12 -04:00
let mut channel_state_lock = self . channel_state . lock ( ) . unwrap ( ) ;
2020-01-16 10:48:16 -08:00
let channel_state = & mut * channel_state_lock ;
2018-10-19 16:49:12 -04:00
match channel_state . by_id . entry ( channel . channel_id ( ) ) {
2020-07-13 13:16:32 +09:00
hash_map ::Entry ::Occupied ( _ ) = > return Err ( MsgHandleErrInternal ::send_err_msg_no_close ( " temporary_channel_id collision! " . to_owned ( ) , msg . temporary_channel_id . clone ( ) ) ) ,
2018-10-19 16:49:12 -04:00
hash_map ::Entry ::Vacant ( entry ) = > {
channel_state . pending_msg_events . push ( events ::MessageSendEvent ::SendAcceptChannel {
2020-06-08 20:47:55 -04:00
node_id : counterparty_node_id . clone ( ) ,
2018-10-19 16:49:12 -04:00
msg : channel . get_accept_channel ( ) ,
} ) ;
entry . insert ( channel ) ;
}
}
Ok ( ( ) )
2018-09-04 20:17:45 -04:00
}
2018-09-04 20:07:29 -04:00
2020-06-08 20:47:55 -04:00
fn internal_accept_channel ( & self , counterparty_node_id : & PublicKey , their_features : InitFeatures , msg : & msgs ::AcceptChannel ) -> Result < ( ) , MsgHandleErrInternal > {
2018-09-05 23:45:38 +00:00
let ( value , output_script , user_id ) = {
2018-11-18 22:01:32 -05:00
let mut channel_lock = self . channel_state . lock ( ) . unwrap ( ) ;
2020-01-16 10:48:16 -08:00
let channel_state = & mut * channel_lock ;
2018-11-18 22:01:32 -05:00
match channel_state . by_id . entry ( msg . temporary_channel_id ) {
hash_map ::Entry ::Occupied ( mut chan ) = > {
2020-06-08 20:47:55 -04:00
if chan . get ( ) . get_counterparty_node_id ( ) ! = * counterparty_node_id {
2020-07-13 13:16:32 +09:00
return Err ( MsgHandleErrInternal ::send_err_msg_no_close ( " Got a message for a channel from the wrong node! " . to_owned ( ) , msg . temporary_channel_id ) ) ;
2018-09-05 23:45:38 +00:00
}
2019-12-23 17:52:58 -05:00
try_chan_entry! ( self , chan . get_mut ( ) . accept_channel ( & msg , & self . default_configuration , their_features ) , channel_state , chan ) ;
2018-11-18 22:01:32 -05:00
( chan . get ( ) . get_value_satoshis ( ) , chan . get ( ) . get_funding_redeemscript ( ) . to_v0_p2wsh ( ) , chan . get ( ) . get_user_id ( ) )
2018-09-05 23:45:38 +00:00
} ,
2020-07-13 13:16:32 +09:00
hash_map ::Entry ::Vacant ( _ ) = > return Err ( MsgHandleErrInternal ::send_err_msg_no_close ( " Failed to find corresponding channel " . to_owned ( ) , msg . temporary_channel_id ) )
2018-09-05 23:45:38 +00:00
}
} ;
let mut pending_events = self . pending_events . lock ( ) . unwrap ( ) ;
pending_events . push ( events ::Event ::FundingGenerationReady {
temporary_channel_id : msg . temporary_channel_id ,
channel_value_satoshis : value ,
2020-10-06 16:47:23 -07:00
output_script ,
2018-09-05 23:45:38 +00:00
user_channel_id : user_id ,
} ) ;
Ok ( ( ) )
}
2020-06-08 20:47:55 -04:00
fn internal_funding_created ( & self , counterparty_node_id : & PublicKey , msg : & msgs ::FundingCreated ) -> Result < ( ) , MsgHandleErrInternal > {
2020-07-20 17:03:52 -07:00
let ( ( funding_msg , monitor ) , mut chan ) = {
2018-11-18 22:01:32 -05:00
let mut channel_lock = self . channel_state . lock ( ) . unwrap ( ) ;
2020-01-16 10:48:16 -08:00
let channel_state = & mut * channel_lock ;
2018-09-06 00:58:00 +00:00
match channel_state . by_id . entry ( msg . temporary_channel_id . clone ( ) ) {
hash_map ::Entry ::Occupied ( mut chan ) = > {
2020-06-08 20:47:55 -04:00
if chan . get ( ) . get_counterparty_node_id ( ) ! = * counterparty_node_id {
2020-07-13 13:16:32 +09:00
return Err ( MsgHandleErrInternal ::send_err_msg_no_close ( " Got a message for a channel from the wrong node! " . to_owned ( ) , msg . temporary_channel_id ) ) ;
2018-09-06 00:58:00 +00:00
}
2020-03-02 12:55:53 -05:00
( try_chan_entry! ( self , chan . get_mut ( ) . funding_created ( msg , & self . logger ) , channel_state , chan ) , chan . remove ( ) )
2018-09-06 00:58:00 +00:00
} ,
2020-07-13 13:16:32 +09:00
hash_map ::Entry ::Vacant ( _ ) = > return Err ( MsgHandleErrInternal ::send_err_msg_no_close ( " Failed to find corresponding channel " . to_owned ( ) , msg . temporary_channel_id ) )
2018-09-06 00:58:00 +00:00
}
2018-10-17 08:47:33 -04:00
} ;
// Because we have exclusive ownership of the channel here we can release the channel_state
2020-07-20 17:03:52 -07:00
// lock before watch_channel
if let Err ( e ) = self . chain_monitor . watch_channel ( monitor . get_funding_txo ( ) . 0 , monitor ) {
2019-07-29 13:45:35 -04:00
match e {
ChannelMonitorUpdateErr ::PermanentFailure = > {
// Note that we reply with the new channel_id in error messages if we gave up on the
// channel, not the temporary_channel_id. This is compatible with ourselves, but the
// spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for
// any messages referencing a previously-closed channel anyway.
2020-11-15 16:44:09 -05:00
// We do not do a force-close here as that would generate a monitor update for
// a monitor that we didn't manage to store (and that we don't care about - we
// don't respond with the funding_signed so the channel can never go on chain).
let ( _funding_txo_option , _monitor_update , failed_htlcs ) = chan . force_shutdown ( true ) ;
assert! ( failed_htlcs . is_empty ( ) ) ;
return Err ( MsgHandleErrInternal ::send_err_msg_no_close ( " ChannelMonitor storage failure " . to_owned ( ) , funding_msg . channel_id ) ) ;
2019-07-29 13:45:35 -04:00
} ,
ChannelMonitorUpdateErr ::TemporaryFailure = > {
// There's no problem signing a counterparty's funding transaction if our monitor
// hasn't persisted to disk yet - we can't lose money on a transaction that we haven't
// accepted payment from yet. We do, however, need to wait to send our funding_locked
// until we have persisted our monitor.
chan . monitor_update_failed ( false , false , Vec ::new ( ) , Vec ::new ( ) ) ;
} ,
}
2018-09-06 00:58:00 +00:00
}
2018-10-19 17:06:40 -04:00
let mut channel_state_lock = self . channel_state . lock ( ) . unwrap ( ) ;
2020-01-16 10:48:16 -08:00
let channel_state = & mut * channel_state_lock ;
2018-09-06 00:58:00 +00:00
match channel_state . by_id . entry ( funding_msg . channel_id ) {
hash_map ::Entry ::Occupied ( _ ) = > {
2020-07-13 13:16:32 +09:00
return Err ( MsgHandleErrInternal ::send_err_msg_no_close ( " Already had channel with the new channel_id " . to_owned ( ) , funding_msg . channel_id ) )
2018-09-06 00:58:00 +00:00
} ,
hash_map ::Entry ::Vacant ( e ) = > {
2018-10-19 17:06:40 -04:00
channel_state . pending_msg_events . push ( events ::MessageSendEvent ::SendFundingSigned {
2020-06-08 20:47:55 -04:00
node_id : counterparty_node_id . clone ( ) ,
2018-10-19 17:06:40 -04:00
msg : funding_msg ,
} ) ;
2018-09-06 00:58:00 +00:00
e . insert ( chan ) ;
}
}
2018-10-19 17:06:40 -04:00
Ok ( ( ) )
2018-09-06 00:58:00 +00:00
}
2020-06-08 20:47:55 -04:00
fn internal_funding_signed ( & self , counterparty_node_id : & PublicKey , msg : & msgs ::FundingSigned ) -> Result < ( ) , MsgHandleErrInternal > {
2018-10-17 08:47:33 -04:00
let ( funding_txo , user_id ) = {
2018-11-18 22:01:32 -05:00
let mut channel_lock = self . channel_state . lock ( ) . unwrap ( ) ;
2020-01-16 10:48:16 -08:00
let channel_state = & mut * channel_lock ;
2018-11-18 22:01:32 -05:00
match channel_state . by_id . entry ( msg . channel_id ) {
hash_map ::Entry ::Occupied ( mut chan ) = > {
2020-06-08 20:47:55 -04:00
if chan . get ( ) . get_counterparty_node_id ( ) ! = * counterparty_node_id {
2020-07-13 13:16:32 +09:00
return Err ( MsgHandleErrInternal ::send_err_msg_no_close ( " Got a message for a channel from the wrong node! " . to_owned ( ) , msg . channel_id ) ) ;
2018-09-07 02:45:07 +00:00
}
2020-03-02 12:55:53 -05:00
let monitor = match chan . get_mut ( ) . funding_signed ( & msg , & self . logger ) {
2020-02-07 20:08:31 -05:00
Ok ( update ) = > update ,
2020-04-18 16:35:01 -04:00
Err ( e ) = > try_chan_entry! ( self , Err ( e ) , channel_state , chan ) ,
2020-02-07 20:08:31 -05:00
} ;
2020-07-20 17:03:52 -07:00
if let Err ( e ) = self . chain_monitor . watch_channel ( chan . get ( ) . get_funding_txo ( ) . unwrap ( ) , monitor ) {
2019-07-26 18:05:05 -04:00
return_monitor_err! ( self , e , channel_state , chan , RAACommitmentOrder ::RevokeAndACKFirst , false , false ) ;
2018-10-17 08:47:33 -04:00
}
2018-11-18 22:01:32 -05:00
( chan . get ( ) . get_funding_txo ( ) . unwrap ( ) , chan . get ( ) . get_user_id ( ) )
2018-09-07 02:45:07 +00:00
} ,
2020-07-13 13:16:32 +09:00
hash_map ::Entry ::Vacant ( _ ) = > return Err ( MsgHandleErrInternal ::send_err_msg_no_close ( " Failed to find corresponding channel " . to_owned ( ) , msg . channel_id ) )
2018-09-07 02:45:07 +00:00
}
} ;
let mut pending_events = self . pending_events . lock ( ) . unwrap ( ) ;
pending_events . push ( events ::Event ::FundingBroadcastSafe {
2020-10-06 16:47:23 -07:00
funding_txo ,
2018-09-07 02:45:07 +00:00
user_channel_id : user_id ,
} ) ;
Ok ( ( ) )
}
2020-06-08 20:47:55 -04:00
fn internal_funding_locked ( & self , counterparty_node_id : & PublicKey , msg : & msgs ::FundingLocked ) -> Result < ( ) , MsgHandleErrInternal > {
2018-10-19 17:30:52 -04:00
let mut channel_state_lock = self . channel_state . lock ( ) . unwrap ( ) ;
2020-01-16 10:48:16 -08:00
let channel_state = & mut * channel_state_lock ;
2018-11-18 22:01:32 -05:00
match channel_state . by_id . entry ( msg . channel_id ) {
hash_map ::Entry ::Occupied ( mut chan ) = > {
2020-06-08 20:47:55 -04:00
if chan . get ( ) . get_counterparty_node_id ( ) ! = * counterparty_node_id {
2020-07-13 13:16:32 +09:00
return Err ( MsgHandleErrInternal ::send_err_msg_no_close ( " Got a message for a channel from the wrong node! " . to_owned ( ) , msg . channel_id ) ) ;
2018-09-07 02:58:01 +00:00
}
2018-11-18 22:01:32 -05:00
try_chan_entry! ( self , chan . get_mut ( ) . funding_locked ( & msg ) , channel_state , chan ) ;
if let Some ( announcement_sigs ) = self . get_announcement_sigs ( chan . get ( ) ) {
2020-03-02 12:55:53 -05:00
log_trace! ( self . logger , " Sending announcement_signatures for {} in response to funding_locked " , log_bytes! ( chan . get ( ) . channel_id ( ) ) ) ;
2019-11-19 17:48:22 -05:00
// If we see locking block before receiving remote funding_locked, we broadcast our
// announcement_sigs at remote funding_locked reception. If we receive remote
// funding_locked before seeing locking block, we broadcast our announcement_sigs at locking
// block connection. We should guanrantee to broadcast announcement_sigs to our peer whatever
// the order of the events but our peer may not receive it due to disconnection. The specs
// lacking an acknowledgement for announcement_sigs we may have to re-send them at peer
// connection in the future if simultaneous misses by both peers due to network/hardware
// failures is an issue. Note, to achieve its goal, only one of the announcement_sigs needs
// to be received, from then sigs are going to be flood to the whole network.
2018-10-19 17:30:52 -04:00
channel_state . pending_msg_events . push ( events ::MessageSendEvent ::SendAnnouncementSignatures {
2020-06-08 20:47:55 -04:00
node_id : counterparty_node_id . clone ( ) ,
2018-10-19 17:30:52 -04:00
msg : announcement_sigs ,
} ) ;
}
Ok ( ( ) )
2018-09-07 02:58:01 +00:00
} ,
2020-07-13 13:16:32 +09:00
hash_map ::Entry ::Vacant ( _ ) = > Err ( MsgHandleErrInternal ::send_err_msg_no_close ( " Failed to find corresponding channel " . to_owned ( ) , msg . channel_id ) )
2018-10-19 17:30:52 -04:00
}
2018-09-07 02:58:01 +00:00
}
2020-06-08 20:47:55 -04:00
fn internal_shutdown ( & self , counterparty_node_id : & PublicKey , msg : & msgs ::Shutdown ) -> Result < ( ) , MsgHandleErrInternal > {
2018-10-19 21:50:16 -04:00
let ( mut dropped_htlcs , chan_option ) = {
2018-09-07 03:10:10 +00:00
let mut channel_state_lock = self . channel_state . lock ( ) . unwrap ( ) ;
2020-01-16 10:48:16 -08:00
let channel_state = & mut * channel_state_lock ;
2018-09-07 03:10:10 +00:00
match channel_state . by_id . entry ( msg . channel_id . clone ( ) ) {
hash_map ::Entry ::Occupied ( mut chan_entry ) = > {
2020-06-08 20:47:55 -04:00
if chan_entry . get ( ) . get_counterparty_node_id ( ) ! = * counterparty_node_id {
2020-07-13 13:16:32 +09:00
return Err ( MsgHandleErrInternal ::send_err_msg_no_close ( " Got a message for a channel from the wrong node! " . to_owned ( ) , msg . channel_id ) ) ;
2018-09-07 03:10:10 +00:00
}
2020-02-27 11:33:03 -05:00
let ( shutdown , closing_signed , dropped_htlcs ) = try_chan_entry! ( self , chan_entry . get_mut ( ) . shutdown ( & self . fee_estimator , & msg ) , channel_state , chan_entry ) ;
2018-10-19 21:50:16 -04:00
if let Some ( msg ) = shutdown {
channel_state . pending_msg_events . push ( events ::MessageSendEvent ::SendShutdown {
2020-06-08 20:47:55 -04:00
node_id : counterparty_node_id . clone ( ) ,
2018-10-19 21:50:16 -04:00
msg ,
} ) ;
}
if let Some ( msg ) = closing_signed {
channel_state . pending_msg_events . push ( events ::MessageSendEvent ::SendClosingSigned {
2020-06-08 20:47:55 -04:00
node_id : counterparty_node_id . clone ( ) ,
2018-10-19 21:50:16 -04:00
msg ,
} ) ;
}
2018-09-07 03:10:10 +00:00
if chan_entry . get ( ) . is_shutdown ( ) {
if let Some ( short_id ) = chan_entry . get ( ) . get_short_channel_id ( ) {
channel_state . short_to_id . remove ( & short_id ) ;
}
2018-10-19 21:50:16 -04:00
( dropped_htlcs , Some ( chan_entry . remove_entry ( ) . 1 ) )
} else { ( dropped_htlcs , None ) }
2018-09-07 03:10:10 +00:00
} ,
2020-07-13 13:16:32 +09:00
hash_map ::Entry ::Vacant ( _ ) = > return Err ( MsgHandleErrInternal ::send_err_msg_no_close ( " Failed to find corresponding channel " . to_owned ( ) , msg . channel_id ) )
2018-09-07 03:10:10 +00:00
}
} ;
2018-10-19 21:50:16 -04:00
for htlc_source in dropped_htlcs . drain ( .. ) {
2018-12-17 20:47:19 -05:00
self . fail_htlc_backwards_internal ( self . channel_state . lock ( ) . unwrap ( ) , htlc_source . 0 , & htlc_source . 1 , HTLCFailReason ::Reason { failure_code : 0x4000 | 8 , data : Vec ::new ( ) } ) ;
2018-09-07 03:10:10 +00:00
}
if let Some ( chan ) = chan_option {
if let Ok ( update ) = self . get_channel_update ( & chan ) {
2018-10-19 16:25:32 -04:00
let mut channel_state = self . channel_state . lock ( ) . unwrap ( ) ;
channel_state . pending_msg_events . push ( events ::MessageSendEvent ::BroadcastChannelUpdate {
2018-09-07 03:10:10 +00:00
msg : update
} ) ;
}
}
2018-10-19 21:50:16 -04:00
Ok ( ( ) )
2018-09-07 03:10:10 +00:00
}
2020-06-08 20:47:55 -04:00
fn internal_closing_signed ( & self , counterparty_node_id : & PublicKey , msg : & msgs ::ClosingSigned ) -> Result < ( ) , MsgHandleErrInternal > {
2018-10-19 21:50:16 -04:00
let ( tx , chan_option ) = {
2018-09-07 21:17:28 +00:00
let mut channel_state_lock = self . channel_state . lock ( ) . unwrap ( ) ;
2020-01-16 10:48:16 -08:00
let channel_state = & mut * channel_state_lock ;
2018-09-07 21:17:28 +00:00
match channel_state . by_id . entry ( msg . channel_id . clone ( ) ) {
hash_map ::Entry ::Occupied ( mut chan_entry ) = > {
2020-06-08 20:47:55 -04:00
if chan_entry . get ( ) . get_counterparty_node_id ( ) ! = * counterparty_node_id {
2020-07-13 13:16:32 +09:00
return Err ( MsgHandleErrInternal ::send_err_msg_no_close ( " Got a message for a channel from the wrong node! " . to_owned ( ) , msg . channel_id ) ) ;
2018-09-07 21:17:28 +00:00
}
2020-02-27 11:33:03 -05:00
let ( closing_signed , tx ) = try_chan_entry! ( self , chan_entry . get_mut ( ) . closing_signed ( & self . fee_estimator , & msg ) , channel_state , chan_entry ) ;
2018-10-19 21:50:16 -04:00
if let Some ( msg ) = closing_signed {
channel_state . pending_msg_events . push ( events ::MessageSendEvent ::SendClosingSigned {
2020-06-08 20:47:55 -04:00
node_id : counterparty_node_id . clone ( ) ,
2018-10-19 21:50:16 -04:00
msg ,
} ) ;
}
if tx . is_some ( ) {
2018-09-07 21:17:28 +00:00
// We're done with this channel, we've got a signed closing transaction and
// will send the closing_signed back to the remote peer upon return. This
// also implies there are no pending HTLCs left on the channel, so we can
// fully delete it from tracking (the channel monitor is still around to
// watch for old state broadcasts)!
if let Some ( short_id ) = chan_entry . get ( ) . get_short_channel_id ( ) {
channel_state . short_to_id . remove ( & short_id ) ;
}
2018-10-19 21:50:16 -04:00
( tx , Some ( chan_entry . remove_entry ( ) . 1 ) )
} else { ( tx , None ) }
2018-09-07 21:17:28 +00:00
} ,
2020-07-13 13:16:32 +09:00
hash_map ::Entry ::Vacant ( _ ) = > return Err ( MsgHandleErrInternal ::send_err_msg_no_close ( " Failed to find corresponding channel " . to_owned ( ) , msg . channel_id ) )
2018-09-07 21:17:28 +00:00
}
} ;
2018-10-19 21:50:16 -04:00
if let Some ( broadcast_tx ) = tx {
2020-03-02 12:55:53 -05:00
log_trace! ( self . logger , " Broadcast onchain {} " , log_tx! ( broadcast_tx ) ) ;
2018-09-07 21:17:28 +00:00
self . tx_broadcaster . broadcast_transaction ( & broadcast_tx ) ;
}
if let Some ( chan ) = chan_option {
if let Ok ( update ) = self . get_channel_update ( & chan ) {
2018-10-19 16:25:32 -04:00
let mut channel_state = self . channel_state . lock ( ) . unwrap ( ) ;
channel_state . pending_msg_events . push ( events ::MessageSendEvent ::BroadcastChannelUpdate {
2018-09-07 21:17:28 +00:00
msg : update
} ) ;
}
}
2018-10-19 21:50:16 -04:00
Ok ( ( ) )
2018-09-07 21:17:28 +00:00
}
2020-06-08 20:47:55 -04:00
fn internal_update_add_htlc ( & self , counterparty_node_id : & PublicKey , msg : & msgs ::UpdateAddHTLC ) -> Result < ( ) , MsgHandleErrInternal > {
2018-09-07 21:30:00 +00:00
//TODO: BOLT 4 points out a specific attack where a peer may re-send an onion packet and
//determine the state of the payment based on our response/if we forward anything/the time
//we take to respond. We should take care to avoid allowing such an attack.
//
//TODO: There exists a further attack where a node may garble the onion data, forward it to
//us repeatedly garbled in different ways, and compare our error messages, which are
2019-01-24 16:41:51 +02:00
//encrypted with the same key. It's not immediately obvious how to usefully exploit that,
2018-09-07 21:30:00 +00:00
//but we should prevent it anyway.
2020-06-05 15:27:30 -04:00
let ( pending_forward_info , mut channel_state_lock ) = self . decode_update_add_htlc_onion ( msg ) ;
2020-01-16 10:48:16 -08:00
let channel_state = & mut * channel_state_lock ;
2018-09-07 21:30:00 +00:00
2018-11-18 22:01:32 -05:00
match channel_state . by_id . entry ( msg . channel_id ) {
hash_map ::Entry ::Occupied ( mut chan ) = > {
2020-06-08 20:47:55 -04:00
if chan . get ( ) . get_counterparty_node_id ( ) ! = * counterparty_node_id {
2020-07-13 13:16:32 +09:00
return Err ( MsgHandleErrInternal ::send_err_msg_no_close ( " Got a message for a channel from the wrong node! " . to_owned ( ) , msg . channel_id ) ) ;
2018-09-07 21:30:00 +00:00
}
2020-06-05 15:27:30 -04:00
let create_pending_htlc_status = | chan : & Channel < ChanSigner > , pending_forward_info : PendingHTLCStatus , error_code : u16 | {
// Ensure error_code has the UPDATE flag set, since by default we send a
// channel update along as part of failing the HTLC.
assert! ( ( error_code & 0x1000 ) ! = 0 ) ;
2018-10-30 21:47:56 -04:00
// If the update_add is completely bogus, the call will Err and we will close,
// but if we've sent a shutdown and they haven't acknowledged it yet, we just
// want to reject the new HTLC and fail it backwards instead of forwarding.
2020-06-05 15:27:30 -04:00
match pending_forward_info {
PendingHTLCStatus ::Forward ( PendingHTLCInfo { ref incoming_shared_secret , .. } ) = > {
let reason = if let Ok ( upd ) = self . get_channel_update ( chan ) {
onion_utils ::build_first_hop_failure_packet ( incoming_shared_secret , error_code , & {
2018-12-17 14:25:38 -05:00
let mut res = Vec ::with_capacity ( 8 + 128 ) ;
2020-06-28 14:43:10 +03:00
// TODO: underspecified, follow https://github.com/lightningnetwork/lightning-rfc/issues/791
res . extend_from_slice ( & byte_utils ::be16_to_array ( 0 ) ) ;
2020-06-05 15:27:30 -04:00
res . extend_from_slice ( & upd . encode_with_len ( ) [ .. ] ) ;
2018-12-17 14:25:38 -05:00
res
} [ .. ] )
2018-11-18 16:15:08 -05:00
} else {
2020-06-05 15:27:30 -04:00
// The only case where we'd be unable to
// successfully get a channel update is if the
// channel isn't in the fully-funded state yet,
// implying our counterparty is trying to route
// payments over the channel back to themselves
// (cause no one else should know the short_id
// is a lightning channel yet). We should have
// no problem just calling this
// unknown_next_peer (0x4000|10).
onion_utils ::build_first_hop_failure_packet ( incoming_shared_secret , 0x4000 | 10 , & [ ] )
} ;
let msg = msgs ::UpdateFailHTLC {
channel_id : msg . channel_id ,
htlc_id : msg . htlc_id ,
reason
} ;
PendingHTLCStatus ::Fail ( HTLCFailureMsg ::Relay ( msg ) )
} ,
_ = > pending_forward_info
2018-10-30 21:47:56 -04:00
}
2020-06-05 15:27:30 -04:00
} ;
2020-06-22 15:29:29 -04:00
try_chan_entry! ( self , chan . get_mut ( ) . update_add_htlc ( & msg , pending_forward_info , create_pending_htlc_status , & self . logger ) , channel_state , chan ) ;
2018-09-07 21:30:00 +00:00
} ,
2020-07-13 13:16:32 +09:00
hash_map ::Entry ::Vacant ( _ ) = > return Err ( MsgHandleErrInternal ::send_err_msg_no_close ( " Failed to find corresponding channel " . to_owned ( ) , msg . channel_id ) )
2018-09-07 21:30:00 +00:00
}
2018-11-18 22:01:32 -05:00
Ok ( ( ) )
2018-09-07 21:30:00 +00:00
}
2020-06-08 20:47:55 -04:00
fn internal_update_fulfill_htlc ( & self , counterparty_node_id : & PublicKey , msg : & msgs ::UpdateFulfillHTLC ) -> Result < ( ) , MsgHandleErrInternal > {
2018-11-18 22:01:32 -05:00
let mut channel_lock = self . channel_state . lock ( ) . unwrap ( ) ;
let htlc_source = {
2020-01-16 10:48:16 -08:00
let channel_state = & mut * channel_lock ;
2018-11-18 22:01:32 -05:00
match channel_state . by_id . entry ( msg . channel_id ) {
hash_map ::Entry ::Occupied ( mut chan ) = > {
2020-06-08 20:47:55 -04:00
if chan . get ( ) . get_counterparty_node_id ( ) ! = * counterparty_node_id {
2020-07-13 13:16:32 +09:00
return Err ( MsgHandleErrInternal ::send_err_msg_no_close ( " Got a message for a channel from the wrong node! " . to_owned ( ) , msg . channel_id ) ) ;
2018-11-18 22:01:32 -05:00
}
try_chan_entry! ( self , chan . get_mut ( ) . update_fulfill_htlc ( & msg ) , channel_state , chan )
} ,
2020-07-13 13:16:32 +09:00
hash_map ::Entry ::Vacant ( _ ) = > return Err ( MsgHandleErrInternal ::send_err_msg_no_close ( " Failed to find corresponding channel " . to_owned ( ) , msg . channel_id ) )
2018-11-18 22:01:32 -05:00
}
2018-09-11 14:20:40 -04:00
} ;
2018-11-18 22:01:32 -05:00
self . claim_funds_internal ( channel_lock , htlc_source , msg . payment_preimage . clone ( ) ) ;
2018-09-11 14:20:40 -04:00
Ok ( ( ) )
2018-09-07 21:36:55 +00:00
}
2020-06-08 20:47:55 -04:00
fn internal_update_fail_htlc ( & self , counterparty_node_id : & PublicKey , msg : & msgs ::UpdateFailHTLC ) -> Result < ( ) , MsgHandleErrInternal > {
2018-11-18 22:01:32 -05:00
let mut channel_lock = self . channel_state . lock ( ) . unwrap ( ) ;
2020-01-16 10:48:16 -08:00
let channel_state = & mut * channel_lock ;
2018-11-18 22:01:32 -05:00
match channel_state . by_id . entry ( msg . channel_id ) {
hash_map ::Entry ::Occupied ( mut chan ) = > {
2020-06-08 20:47:55 -04:00
if chan . get ( ) . get_counterparty_node_id ( ) ! = * counterparty_node_id {
2020-07-13 13:16:32 +09:00
return Err ( MsgHandleErrInternal ::send_err_msg_no_close ( " Got a message for a channel from the wrong node! " . to_owned ( ) , msg . channel_id ) ) ;
2018-09-07 21:42:07 +00:00
}
2019-11-04 19:09:51 -05:00
try_chan_entry! ( self , chan . get_mut ( ) . update_fail_htlc ( & msg , HTLCFailReason ::LightningError { err : msg . reason . clone ( ) } ) , channel_state , chan ) ;
2018-09-07 21:42:07 +00:00
} ,
2020-07-13 13:16:32 +09:00
hash_map ::Entry ::Vacant ( _ ) = > return Err ( MsgHandleErrInternal ::send_err_msg_no_close ( " Failed to find corresponding channel " . to_owned ( ) , msg . channel_id ) )
2018-11-18 22:01:32 -05:00
}
2018-10-22 11:12:44 -04:00
Ok ( ( ) )
2018-09-07 21:42:07 +00:00
}
2020-06-08 20:47:55 -04:00
fn internal_update_fail_malformed_htlc ( & self , counterparty_node_id : & PublicKey , msg : & msgs ::UpdateFailMalformedHTLC ) -> Result < ( ) , MsgHandleErrInternal > {
2018-11-18 22:01:32 -05:00
let mut channel_lock = self . channel_state . lock ( ) . unwrap ( ) ;
2020-01-16 10:48:16 -08:00
let channel_state = & mut * channel_lock ;
2018-11-18 22:01:32 -05:00
match channel_state . by_id . entry ( msg . channel_id ) {
hash_map ::Entry ::Occupied ( mut chan ) = > {
2020-06-08 20:47:55 -04:00
if chan . get ( ) . get_counterparty_node_id ( ) ! = * counterparty_node_id {
2020-07-13 13:16:32 +09:00
return Err ( MsgHandleErrInternal ::send_err_msg_no_close ( " Got a message for a channel from the wrong node! " . to_owned ( ) , msg . channel_id ) ) ;
2018-09-07 21:46:47 +00:00
}
2018-11-05 21:10:17 +09:00
if ( msg . failure_code & 0x8000 ) = = 0 {
2020-07-13 13:16:32 +09:00
let chan_err : ChannelError = ChannelError ::Close ( " Got update_fail_malformed_htlc with BADONION not set " . to_owned ( ) ) ;
2020-02-04 09:15:59 -08:00
try_chan_entry! ( self , Err ( chan_err ) , channel_state , chan ) ;
2018-09-30 19:33:03 -04:00
}
2018-11-18 22:01:32 -05:00
try_chan_entry! ( self , chan . get_mut ( ) . update_fail_malformed_htlc ( & msg , HTLCFailReason ::Reason { failure_code : msg . failure_code , data : Vec ::new ( ) } ) , channel_state , chan ) ;
2018-09-11 14:20:40 -04:00
Ok ( ( ) )
2018-09-07 21:46:47 +00:00
} ,
2020-07-13 13:16:32 +09:00
hash_map ::Entry ::Vacant ( _ ) = > return Err ( MsgHandleErrInternal ::send_err_msg_no_close ( " Failed to find corresponding channel " . to_owned ( ) , msg . channel_id ) )
2018-09-07 21:46:47 +00:00
}
}
2020-06-08 20:47:55 -04:00
fn internal_commitment_signed ( & self , counterparty_node_id : & PublicKey , msg : & msgs ::CommitmentSigned ) -> Result < ( ) , MsgHandleErrInternal > {
2018-10-20 12:56:42 -04:00
let mut channel_state_lock = self . channel_state . lock ( ) . unwrap ( ) ;
2020-01-16 10:48:16 -08:00
let channel_state = & mut * channel_state_lock ;
2018-11-18 22:01:32 -05:00
match channel_state . by_id . entry ( msg . channel_id ) {
hash_map ::Entry ::Occupied ( mut chan ) = > {
2020-06-08 20:47:55 -04:00
if chan . get ( ) . get_counterparty_node_id ( ) ! = * counterparty_node_id {
2020-07-13 13:16:32 +09:00
return Err ( MsgHandleErrInternal ::send_err_msg_no_close ( " Got a message for a channel from the wrong node! " . to_owned ( ) , msg . channel_id ) ) ;
2018-10-20 12:56:42 -04:00
}
2020-02-07 20:08:31 -05:00
let ( revoke_and_ack , commitment_signed , closing_signed , monitor_update ) =
2020-03-02 12:55:53 -05:00
match chan . get_mut ( ) . commitment_signed ( & msg , & self . fee_estimator , & self . logger ) {
Err ( ( None , e ) ) = > try_chan_entry! ( self , Err ( e ) , channel_state , chan ) ,
Err ( ( Some ( update ) , e ) ) = > {
assert! ( chan . get ( ) . is_awaiting_monitor_update ( ) ) ;
2020-07-20 17:03:52 -07:00
let _ = self . chain_monitor . update_channel ( chan . get ( ) . get_funding_txo ( ) . unwrap ( ) , update ) ;
2020-03-02 12:55:53 -05:00
try_chan_entry! ( self , Err ( e ) , channel_state , chan ) ;
unreachable! ( ) ;
} ,
Ok ( res ) = > res
} ;
2020-07-20 17:03:52 -07:00
if let Err ( e ) = self . chain_monitor . update_channel ( chan . get ( ) . get_funding_txo ( ) . unwrap ( ) , monitor_update ) {
2019-01-07 23:10:51 -05:00
return_monitor_err! ( self , e , channel_state , chan , RAACommitmentOrder ::RevokeAndACKFirst , true , commitment_signed . is_some ( ) ) ;
2018-11-26 21:54:14 -05:00
//TODO: Rebroadcast closing_signed if present on monitor update restoration
2018-10-20 12:56:42 -04:00
}
channel_state . pending_msg_events . push ( events ::MessageSendEvent ::SendRevokeAndACK {
2020-06-08 20:47:55 -04:00
node_id : counterparty_node_id . clone ( ) ,
2018-10-20 12:56:42 -04:00
msg : revoke_and_ack ,
} ) ;
if let Some ( msg ) = commitment_signed {
channel_state . pending_msg_events . push ( events ::MessageSendEvent ::UpdateHTLCs {
2020-06-08 20:47:55 -04:00
node_id : counterparty_node_id . clone ( ) ,
2018-10-20 12:56:42 -04:00
updates : msgs ::CommitmentUpdate {
update_add_htlcs : Vec ::new ( ) ,
update_fulfill_htlcs : Vec ::new ( ) ,
update_fail_htlcs : Vec ::new ( ) ,
update_fail_malformed_htlcs : Vec ::new ( ) ,
update_fee : None ,
commitment_signed : msg ,
} ,
} ) ;
}
2018-10-30 16:25:38 -04:00
if let Some ( msg ) = closing_signed {
channel_state . pending_msg_events . push ( events ::MessageSendEvent ::SendClosingSigned {
2020-06-08 20:47:55 -04:00
node_id : counterparty_node_id . clone ( ) ,
2018-10-30 16:25:38 -04:00
msg ,
} ) ;
}
2018-10-20 12:56:42 -04:00
Ok ( ( ) )
} ,
2020-07-13 13:16:32 +09:00
hash_map ::Entry ::Vacant ( _ ) = > return Err ( MsgHandleErrInternal ::send_err_msg_no_close ( " Failed to find corresponding channel " . to_owned ( ) , msg . channel_id ) )
2018-10-20 12:56:42 -04:00
}
2018-09-07 21:51:58 +00:00
}
2018-10-18 12:01:01 -04:00
#[ inline ]
2020-10-22 13:32:50 -04:00
fn forward_htlcs ( & self , per_source_pending_forwards : & mut [ ( u64 , OutPoint , Vec < ( PendingHTLCInfo , u64 ) > ) ] ) {
for & mut ( prev_short_channel_id , prev_funding_outpoint , ref mut pending_forwards ) in per_source_pending_forwards {
2018-10-18 12:01:01 -04:00
let mut forward_event = None ;
if ! pending_forwards . is_empty ( ) {
let mut channel_state = self . channel_state . lock ( ) . unwrap ( ) ;
if channel_state . forward_htlcs . is_empty ( ) {
2019-07-18 22:21:00 -04:00
forward_event = Some ( Duration ::from_millis ( MIN_HTLC_RELAY_HOLDING_CELL_MILLIS ) )
2018-10-18 12:01:01 -04:00
}
for ( forward_info , prev_htlc_id ) in pending_forwards . drain ( .. ) {
2020-01-01 17:39:51 -05:00
match channel_state . forward_htlcs . entry ( match forward_info . routing {
PendingHTLCRouting ::Forward { short_channel_id , .. } = > short_channel_id ,
PendingHTLCRouting ::Receive { .. } = > 0 ,
} ) {
2018-10-18 12:01:01 -04:00
hash_map ::Entry ::Occupied ( mut entry ) = > {
2020-10-22 13:32:50 -04:00
entry . get_mut ( ) . push ( HTLCForwardInfo ::AddHTLC { prev_short_channel_id , prev_funding_outpoint ,
prev_htlc_id , forward_info } ) ;
2018-10-18 12:01:01 -04:00
} ,
hash_map ::Entry ::Vacant ( entry ) = > {
2020-10-22 13:32:50 -04:00
entry . insert ( vec! ( HTLCForwardInfo ::AddHTLC { prev_short_channel_id , prev_funding_outpoint ,
prev_htlc_id , forward_info } ) ) ;
2018-10-18 12:01:01 -04:00
}
}
}
}
match forward_event {
Some ( time ) = > {
let mut pending_events = self . pending_events . lock ( ) . unwrap ( ) ;
pending_events . push ( events ::Event ::PendingHTLCsForwardable {
time_forwardable : time
} ) ;
}
None = > { } ,
}
}
}
2020-06-08 20:47:55 -04:00
fn internal_revoke_and_ack ( & self , counterparty_node_id : & PublicKey , msg : & msgs ::RevokeAndACK ) -> Result < ( ) , MsgHandleErrInternal > {
2020-05-06 18:15:43 -04:00
let mut htlcs_to_fail = Vec ::new ( ) ;
let res = loop {
2018-10-20 12:56:42 -04:00
let mut channel_state_lock = self . channel_state . lock ( ) . unwrap ( ) ;
2020-01-16 10:48:16 -08:00
let channel_state = & mut * channel_state_lock ;
2018-11-18 22:01:32 -05:00
match channel_state . by_id . entry ( msg . channel_id ) {
hash_map ::Entry ::Occupied ( mut chan ) = > {
2020-06-08 20:47:55 -04:00
if chan . get ( ) . get_counterparty_node_id ( ) ! = * counterparty_node_id {
2020-05-06 18:15:43 -04:00
break Err ( MsgHandleErrInternal ::send_err_msg_no_close ( " Got a message for a channel from the wrong node! " . to_owned ( ) , msg . channel_id ) ) ;
2018-09-07 21:57:06 +00:00
}
2019-01-09 11:05:53 -05:00
let was_frozen_for_monitor = chan . get ( ) . is_awaiting_monitor_update ( ) ;
2020-05-06 18:15:43 -04:00
let ( commitment_update , pending_forwards , pending_failures , closing_signed , monitor_update , htlcs_to_fail_in ) =
break_chan_entry! ( self , chan . get_mut ( ) . revoke_and_ack ( & msg , & self . fee_estimator , & self . logger ) , channel_state , chan ) ;
htlcs_to_fail = htlcs_to_fail_in ;
2020-07-20 17:03:52 -07:00
if let Err ( e ) = self . chain_monitor . update_channel ( chan . get ( ) . get_funding_txo ( ) . unwrap ( ) , monitor_update ) {
2019-01-09 11:05:53 -05:00
if was_frozen_for_monitor {
assert! ( commitment_update . is_none ( ) & & closing_signed . is_none ( ) & & pending_forwards . is_empty ( ) & & pending_failures . is_empty ( ) ) ;
2020-05-06 18:15:43 -04:00
break Err ( MsgHandleErrInternal ::ignore_no_close ( " Previous monitor update failure prevented responses to RAA " . to_owned ( ) ) ) ;
2019-01-09 11:05:53 -05:00
} else {
2020-05-06 18:15:43 -04:00
if let Err ( e ) = handle_monitor_err! ( self , e , channel_state , chan , RAACommitmentOrder ::CommitmentFirst , false , commitment_update . is_some ( ) , pending_forwards , pending_failures ) {
break Err ( e ) ;
} else { unreachable! ( ) ; }
2019-01-09 11:05:53 -05:00
}
2018-10-17 08:47:33 -04:00
}
2018-10-20 12:56:42 -04:00
if let Some ( updates ) = commitment_update {
channel_state . pending_msg_events . push ( events ::MessageSendEvent ::UpdateHTLCs {
2020-06-08 20:47:55 -04:00
node_id : counterparty_node_id . clone ( ) ,
2018-10-20 12:56:42 -04:00
updates ,
} ) ;
}
2018-10-30 16:25:38 -04:00
if let Some ( msg ) = closing_signed {
channel_state . pending_msg_events . push ( events ::MessageSendEvent ::SendClosingSigned {
2020-06-08 20:47:55 -04:00
node_id : counterparty_node_id . clone ( ) ,
2018-10-30 16:25:38 -04:00
msg ,
} ) ;
}
2020-10-22 13:32:50 -04:00
break Ok ( ( pending_forwards , pending_failures , chan . get ( ) . get_short_channel_id ( ) . expect ( " RAA should only work on a short-id-available channel " ) , chan . get ( ) . get_funding_txo ( ) . unwrap ( ) ) )
2018-09-07 21:57:06 +00:00
} ,
2020-05-06 18:15:43 -04:00
hash_map ::Entry ::Vacant ( _ ) = > break Err ( MsgHandleErrInternal ::send_err_msg_no_close ( " Failed to find corresponding channel " . to_owned ( ) , msg . channel_id ) )
2018-09-07 21:57:06 +00:00
}
} ;
2020-05-06 18:15:43 -04:00
self . fail_holding_cell_htlcs ( htlcs_to_fail , msg . channel_id ) ;
match res {
2020-10-22 13:32:50 -04:00
Ok ( ( pending_forwards , mut pending_failures , short_channel_id , channel_outpoint ) ) = > {
2020-05-06 18:15:43 -04:00
for failure in pending_failures . drain ( .. ) {
self . fail_htlc_backwards_internal ( self . channel_state . lock ( ) . unwrap ( ) , failure . 0 , & failure . 1 , failure . 2 ) ;
}
2020-10-22 13:32:50 -04:00
self . forward_htlcs ( & mut [ ( short_channel_id , channel_outpoint , pending_forwards ) ] ) ;
2020-05-06 18:15:43 -04:00
Ok ( ( ) )
} ,
Err ( e ) = > Err ( e )
2018-09-07 21:57:06 +00:00
}
}
2020-06-08 20:47:55 -04:00
fn internal_update_fee ( & self , counterparty_node_id : & PublicKey , msg : & msgs ::UpdateFee ) -> Result < ( ) , MsgHandleErrInternal > {
2018-11-18 22:01:32 -05:00
let mut channel_lock = self . channel_state . lock ( ) . unwrap ( ) ;
2020-01-16 10:48:16 -08:00
let channel_state = & mut * channel_lock ;
2018-11-18 22:01:32 -05:00
match channel_state . by_id . entry ( msg . channel_id ) {
hash_map ::Entry ::Occupied ( mut chan ) = > {
2020-06-08 20:47:55 -04:00
if chan . get ( ) . get_counterparty_node_id ( ) ! = * counterparty_node_id {
2020-07-13 13:16:32 +09:00
return Err ( MsgHandleErrInternal ::send_err_msg_no_close ( " Got a message for a channel from the wrong node! " . to_owned ( ) , msg . channel_id ) ) ;
2018-09-07 21:59:45 +00:00
}
2020-02-27 11:33:03 -05:00
try_chan_entry! ( self , chan . get_mut ( ) . update_fee ( & self . fee_estimator , & msg ) , channel_state , chan ) ;
2018-09-07 21:59:45 +00:00
} ,
2020-07-13 13:16:32 +09:00
hash_map ::Entry ::Vacant ( _ ) = > return Err ( MsgHandleErrInternal ::send_err_msg_no_close ( " Failed to find corresponding channel " . to_owned ( ) , msg . channel_id ) )
2018-09-07 21:59:45 +00:00
}
2018-11-18 22:01:32 -05:00
Ok ( ( ) )
2018-09-07 21:59:45 +00:00
}
2020-06-08 20:47:55 -04:00
fn internal_announcement_signatures ( & self , counterparty_node_id : & PublicKey , msg : & msgs ::AnnouncementSignatures ) -> Result < ( ) , MsgHandleErrInternal > {
2018-10-19 16:25:32 -04:00
let mut channel_state_lock = self . channel_state . lock ( ) . unwrap ( ) ;
2020-01-16 10:48:16 -08:00
let channel_state = & mut * channel_state_lock ;
2018-09-04 20:07:29 -04:00
2018-11-18 22:01:32 -05:00
match channel_state . by_id . entry ( msg . channel_id ) {
hash_map ::Entry ::Occupied ( mut chan ) = > {
2020-06-08 20:47:55 -04:00
if chan . get ( ) . get_counterparty_node_id ( ) ! = * counterparty_node_id {
2020-07-13 13:16:32 +09:00
return Err ( MsgHandleErrInternal ::send_err_msg_no_close ( " Got a message for a channel from the wrong node! " . to_owned ( ) , msg . channel_id ) ) ;
2018-10-19 16:25:32 -04:00
}
2018-11-18 22:01:32 -05:00
if ! chan . get ( ) . is_usable ( ) {
2020-07-13 13:16:32 +09:00
return Err ( MsgHandleErrInternal ::from_no_close ( LightningError { err : " Got an announcement_signatures before we were ready for it " . to_owned ( ) , action : msgs ::ErrorAction ::IgnoreError } ) ) ;
2018-10-19 16:25:32 -04:00
}
2018-09-04 20:07:29 -04:00
2018-10-19 16:25:32 -04:00
let our_node_id = self . get_our_node_id ( ) ;
2018-11-18 22:01:32 -05:00
let ( announcement , our_bitcoin_sig ) =
try_chan_entry! ( self , chan . get_mut ( ) . get_channel_announcement ( our_node_id . clone ( ) , self . genesis_hash . clone ( ) ) , channel_state , chan ) ;
2018-09-04 20:07:29 -04:00
2018-10-19 16:25:32 -04:00
let were_node_one = announcement . node_id_1 = = our_node_id ;
2019-03-04 18:02:02 +01:00
let msghash = hash_to_message! ( & Sha256dHash ::hash ( & announcement . encode ( ) [ .. ] ) [ .. ] ) ;
2020-07-13 13:24:40 +09:00
{
let their_node_key = if were_node_one { & announcement . node_id_2 } else { & announcement . node_id_1 } ;
let their_bitcoin_key = if were_node_one { & announcement . bitcoin_key_2 } else { & announcement . bitcoin_key_1 } ;
match ( self . secp_ctx . verify ( & msghash , & msg . node_signature , their_node_key ) ,
self . secp_ctx . verify ( & msghash , & msg . bitcoin_signature , their_bitcoin_key ) ) {
( Err ( e ) , _ ) = > {
let chan_err : ChannelError = ChannelError ::Close ( format! ( " Bad announcement_signatures. Failed to verify node_signature: {:?} . Maybe using different node_secret for transport and routing msg? UnsignedChannelAnnouncement used for verification is {:?} . their_node_key is {:?} " , e , & announcement , their_node_key ) ) ;
try_chan_entry! ( self , Err ( chan_err ) , channel_state , chan ) ;
} ,
( _ , Err ( e ) ) = > {
let chan_err : ChannelError = ChannelError ::Close ( format! ( " Bad announcement_signatures. Failed to verify bitcoin_signature: {:?} . UnsignedChannelAnnouncement used for verification is {:?} . their_bitcoin_key is ( {:?} ) " , e , & announcement , their_bitcoin_key ) ) ;
try_chan_entry! ( self , Err ( chan_err ) , channel_state , chan ) ;
} ,
_ = > { }
}
2018-11-22 22:45:51 -05:00
}
2018-09-04 20:07:29 -04:00
2018-10-19 16:25:32 -04:00
let our_node_sig = self . secp_ctx . sign ( & msghash , & self . our_network_key ) ;
channel_state . pending_msg_events . push ( events ::MessageSendEvent ::BroadcastChannelAnnouncement {
msg : msgs ::ChannelAnnouncement {
2018-09-04 20:07:29 -04:00
node_signature_1 : if were_node_one { our_node_sig } else { msg . node_signature } ,
node_signature_2 : if were_node_one { msg . node_signature } else { our_node_sig } ,
bitcoin_signature_1 : if were_node_one { our_bitcoin_sig } else { msg . bitcoin_signature } ,
bitcoin_signature_2 : if were_node_one { msg . bitcoin_signature } else { our_bitcoin_sig } ,
contents : announcement ,
2018-10-19 16:25:32 -04:00
} ,
2018-11-18 22:01:32 -05:00
update_msg : self . get_channel_update ( chan . get ( ) ) . unwrap ( ) , // can only fail if we're not in a ready state
2018-10-19 16:25:32 -04:00
} ) ;
} ,
2020-07-13 13:16:32 +09:00
hash_map ::Entry ::Vacant ( _ ) = > return Err ( MsgHandleErrInternal ::send_err_msg_no_close ( " Failed to find corresponding channel " . to_owned ( ) , msg . channel_id ) )
2018-10-19 16:25:32 -04:00
}
2018-09-04 20:07:29 -04:00
Ok ( ( ) )
}
2020-06-08 20:47:55 -04:00
fn internal_channel_reestablish ( & self , counterparty_node_id : & PublicKey , msg : & msgs ::ChannelReestablish ) -> Result < ( ) , MsgHandleErrInternal > {
2018-10-20 17:18:53 -04:00
let mut channel_state_lock = self . channel_state . lock ( ) . unwrap ( ) ;
2020-01-16 10:48:16 -08:00
let channel_state = & mut * channel_state_lock ;
2018-10-20 17:18:53 -04:00
2018-11-18 22:01:32 -05:00
match channel_state . by_id . entry ( msg . channel_id ) {
hash_map ::Entry ::Occupied ( mut chan ) = > {
2020-06-08 20:47:55 -04:00
if chan . get ( ) . get_counterparty_node_id ( ) ! = * counterparty_node_id {
2020-07-13 13:16:32 +09:00
return Err ( MsgHandleErrInternal ::send_err_msg_no_close ( " Got a message for a channel from the wrong node! " . to_owned ( ) , msg . channel_id ) ) ;
2018-10-20 17:18:53 -04:00
}
2020-05-06 18:15:43 -04:00
// Currently, we expect all holding cell update_adds to be dropped on peer
// disconnect, so Channel's reestablish will never hand us any holding cell
// freed HTLCs to fail backwards. If in the future we no longer drop pending
// add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here.
2020-02-07 20:08:31 -05:00
let ( funding_locked , revoke_and_ack , commitment_update , monitor_update_opt , mut order , shutdown ) =
2020-03-02 12:55:53 -05:00
try_chan_entry! ( self , chan . get_mut ( ) . channel_reestablish ( msg , & self . logger ) , channel_state , chan ) ;
2020-02-07 20:08:31 -05:00
if let Some ( monitor_update ) = monitor_update_opt {
2020-07-20 17:03:52 -07:00
if let Err ( e ) = self . chain_monitor . update_channel ( chan . get ( ) . get_funding_txo ( ) . unwrap ( ) , monitor_update ) {
2018-11-26 21:54:14 -05:00
// channel_reestablish doesn't guarantee the order it returns is sensical
// for the messages it returns, but if we're setting what messages to
// re-transmit on monitor update success, we need to make sure it is sane.
if revoke_and_ack . is_none ( ) {
order = RAACommitmentOrder ::CommitmentFirst ;
}
if commitment_update . is_none ( ) {
order = RAACommitmentOrder ::RevokeAndACKFirst ;
}
2019-01-07 23:10:51 -05:00
return_monitor_err! ( self , e , channel_state , chan , order , revoke_and_ack . is_some ( ) , commitment_update . is_some ( ) ) ;
2018-11-26 21:54:14 -05:00
//TODO: Resend the funding_locked if needed once we get the monitor running again
2018-09-08 16:02:46 -04:00
}
2018-10-20 17:18:53 -04:00
}
if let Some ( msg ) = funding_locked {
channel_state . pending_msg_events . push ( events ::MessageSendEvent ::SendFundingLocked {
2020-06-08 20:47:55 -04:00
node_id : counterparty_node_id . clone ( ) ,
2018-10-20 17:18:53 -04:00
msg
} ) ;
}
macro_rules ! send_raa { ( ) = > {
if let Some ( msg ) = revoke_and_ack {
channel_state . pending_msg_events . push ( events ::MessageSendEvent ::SendRevokeAndACK {
2020-06-08 20:47:55 -04:00
node_id : counterparty_node_id . clone ( ) ,
2018-10-20 17:18:53 -04:00
msg
} ) ;
2018-10-17 08:47:33 -04:00
}
2018-10-20 17:18:53 -04:00
} }
macro_rules ! send_cu { ( ) = > {
if let Some ( updates ) = commitment_update {
channel_state . pending_msg_events . push ( events ::MessageSendEvent ::UpdateHTLCs {
2020-06-08 20:47:55 -04:00
node_id : counterparty_node_id . clone ( ) ,
2018-10-20 17:18:53 -04:00
updates
} ) ;
}
} }
match order {
RAACommitmentOrder ::RevokeAndACKFirst = > {
send_raa! ( ) ;
send_cu! ( ) ;
} ,
RAACommitmentOrder ::CommitmentFirst = > {
send_cu! ( ) ;
send_raa! ( ) ;
} ,
}
2018-11-01 17:17:28 -04:00
if let Some ( msg ) = shutdown {
channel_state . pending_msg_events . push ( events ::MessageSendEvent ::SendShutdown {
2020-06-08 20:47:55 -04:00
node_id : counterparty_node_id . clone ( ) ,
2018-11-01 17:17:28 -04:00
msg ,
} ) ;
}
2018-10-20 17:18:53 -04:00
Ok ( ( ) )
} ,
2020-07-13 13:16:32 +09:00
hash_map ::Entry ::Vacant ( _ ) = > return Err ( MsgHandleErrInternal ::send_err_msg_no_close ( " Failed to find corresponding channel " . to_owned ( ) , msg . channel_id ) )
2018-10-20 17:18:53 -04:00
}
2018-09-08 16:02:46 -04:00
}
2018-09-26 19:55:47 -04:00
/// Begin Update fee process. Allowed only on an outbound channel.
/// If successful, will generate a UpdateHTLCs event, so you should probably poll
/// PeerManager::process_events afterwards.
2018-09-28 19:06:41 -04:00
/// Note: This API is likely to change!
2020-05-12 13:48:07 -04:00
/// (C-not exported) Cause its doc(hidden) anyway
2018-09-28 19:06:41 -04:00
#[ doc(hidden) ]
2020-06-15 17:28:01 -04:00
pub fn update_fee ( & self , channel_id : [ u8 ; 32 ] , feerate_per_kw : u32 ) -> Result < ( ) , APIError > {
2020-11-19 12:53:16 -05:00
let _persistence_guard = PersistenceNotifierGuard ::new ( & self . total_consistency_lock , & self . persistence_notifier ) ;
2020-06-08 20:47:55 -04:00
let counterparty_node_id ;
2018-11-22 18:58:23 -05:00
let err : Result < ( ) , _ > = loop {
2020-01-13 16:10:30 -05:00
let mut channel_state_lock = self . channel_state . lock ( ) . unwrap ( ) ;
2020-01-16 10:48:16 -08:00
let channel_state = & mut * channel_state_lock ;
2018-10-19 16:25:32 -04:00
2018-11-22 18:58:23 -05:00
match channel_state . by_id . entry ( channel_id ) {
2020-07-13 13:16:32 +09:00
hash_map ::Entry ::Vacant ( _ ) = > return Err ( APIError ::APIMisuseError { err : format ! ( " Failed to find corresponding channel for id {} " , channel_id . to_hex ( ) ) } ) ,
2018-11-22 18:58:23 -05:00
hash_map ::Entry ::Occupied ( mut chan ) = > {
if ! chan . get ( ) . is_outbound ( ) {
2020-07-13 13:16:32 +09:00
return Err ( APIError ::APIMisuseError { err : " update_fee cannot be sent for an inbound channel " . to_owned ( ) } ) ;
2018-11-22 18:58:23 -05:00
}
if chan . get ( ) . is_awaiting_monitor_update ( ) {
return Err ( APIError ::MonitorUpdateFailed ) ;
}
if ! chan . get ( ) . is_live ( ) {
2020-07-13 13:16:32 +09:00
return Err ( APIError ::ChannelUnavailable { err : " Channel is either not yet fully established or peer is currently disconnected " . to_owned ( ) } ) ;
2018-11-22 18:58:23 -05:00
}
2020-06-08 20:47:55 -04:00
counterparty_node_id = chan . get ( ) . get_counterparty_node_id ( ) ;
2020-02-07 20:08:31 -05:00
if let Some ( ( update_fee , commitment_signed , monitor_update ) ) =
2020-03-02 12:55:53 -05:00
break_chan_entry! ( self , chan . get_mut ( ) . send_update_fee_and_commit ( feerate_per_kw , & self . logger ) , channel_state , chan )
2018-11-22 18:58:23 -05:00
{
2020-07-20 17:03:52 -07:00
if let Err ( _e ) = self . chain_monitor . update_channel ( chan . get ( ) . get_funding_txo ( ) . unwrap ( ) , monitor_update ) {
2018-11-22 18:58:23 -05:00
unimplemented! ( ) ;
}
channel_state . pending_msg_events . push ( events ::MessageSendEvent ::UpdateHTLCs {
2020-06-08 20:47:55 -04:00
node_id : chan . get ( ) . get_counterparty_node_id ( ) ,
2018-11-22 18:58:23 -05:00
updates : msgs ::CommitmentUpdate {
update_add_htlcs : Vec ::new ( ) ,
update_fulfill_htlcs : Vec ::new ( ) ,
update_fail_htlcs : Vec ::new ( ) ,
update_fail_malformed_htlcs : Vec ::new ( ) ,
update_fee : Some ( update_fee ) ,
commitment_signed ,
2018-10-29 20:38:29 -04:00
} ,
2018-11-22 18:58:23 -05:00
} ) ;
2018-09-26 19:55:47 -04:00
}
2018-11-22 18:58:23 -05:00
} ,
}
return Ok ( ( ) )
} ;
2020-06-08 20:47:55 -04:00
match handle_error! ( self , err , counterparty_node_id ) {
2018-11-22 18:58:23 -05:00
Ok ( _ ) = > unreachable! ( ) ,
2019-11-05 18:51:05 -05:00
Err ( e ) = > { Err ( APIError ::APIMisuseError { err : e . err } ) }
2018-09-26 19:55:47 -04:00
}
}
2020-08-24 17:27:49 -04:00
2020-07-20 17:03:52 -07:00
/// Process pending events from the `chain::Watch`.
2020-08-24 17:27:49 -04:00
fn process_pending_monitor_events ( & self ) {
let mut failed_channels = Vec ::new ( ) ;
{
2020-07-20 17:03:52 -07:00
for monitor_event in self . chain_monitor . release_pending_monitor_events ( ) {
2020-08-24 17:27:49 -04:00
match monitor_event {
MonitorEvent ::HTLCEvent ( htlc_update ) = > {
if let Some ( preimage ) = htlc_update . payment_preimage {
log_trace! ( self . logger , " Claiming HTLC with preimage {} from our monitor " , log_bytes! ( preimage . 0 ) ) ;
self . claim_funds_internal ( self . channel_state . lock ( ) . unwrap ( ) , htlc_update . source , preimage ) ;
} else {
log_trace! ( self . logger , " Failing HTLC with hash {} from our monitor " , log_bytes! ( htlc_update . payment_hash . 0 ) ) ;
self . fail_htlc_backwards_internal ( self . channel_state . lock ( ) . unwrap ( ) , htlc_update . source , & htlc_update . payment_hash , HTLCFailReason ::Reason { failure_code : 0x4000 | 8 , data : Vec ::new ( ) } ) ;
}
} ,
MonitorEvent ::CommitmentTxBroadcasted ( funding_outpoint ) = > {
let mut channel_lock = self . channel_state . lock ( ) . unwrap ( ) ;
let channel_state = & mut * channel_lock ;
let by_id = & mut channel_state . by_id ;
let short_to_id = & mut channel_state . short_to_id ;
let pending_msg_events = & mut channel_state . pending_msg_events ;
if let Some ( mut chan ) = by_id . remove ( & funding_outpoint . to_channel_id ( ) ) {
if let Some ( short_id ) = chan . get_short_channel_id ( ) {
short_to_id . remove ( & short_id ) ;
}
failed_channels . push ( chan . force_shutdown ( false ) ) ;
if let Ok ( update ) = self . get_channel_update ( & chan ) {
pending_msg_events . push ( events ::MessageSendEvent ::BroadcastChannelUpdate {
msg : update
} ) ;
}
}
} ,
}
}
}
for failure in failed_channels . drain ( .. ) {
self . finish_force_close_channel ( failure ) ;
}
}
2017-12-25 01:05:27 -05:00
}
2020-05-30 23:20:17 -04:00
impl < ChanSigner : ChannelKeys , M : Deref , T : Deref , K : Deref , F : Deref , L : Deref > MessageSendEventsProvider for ChannelManager < ChanSigner , M , T , K , F , L >
2020-07-20 17:03:52 -07:00
where M ::Target : chain ::Watch < Keys = ChanSigner > ,
2020-02-20 14:14:12 -05:00
T ::Target : BroadcasterInterface ,
2020-02-26 16:00:26 -05:00
K ::Target : KeysInterface < ChanKeySigner = ChanSigner > ,
2020-02-27 11:33:03 -05:00
F ::Target : FeeEstimator ,
2020-03-02 12:55:53 -05:00
L ::Target : Logger ,
2020-02-20 14:14:12 -05:00
{
2020-05-30 23:20:17 -04:00
fn get_and_clear_pending_msg_events ( & self ) -> Vec < MessageSendEvent > {
2020-08-24 17:27:49 -04:00
//TODO: This behavior should be documented. It's non-intuitive that we query
// ChannelMonitors when clearing other events.
self . process_pending_monitor_events ( ) ;
2018-12-10 22:47:21 -05:00
2018-10-19 16:25:32 -04:00
let mut ret = Vec ::new ( ) ;
let mut channel_state = self . channel_state . lock ( ) . unwrap ( ) ;
mem ::swap ( & mut ret , & mut channel_state . pending_msg_events ) ;
ret
}
}
2020-05-30 23:20:17 -04:00
impl < ChanSigner : ChannelKeys , M : Deref , T : Deref , K : Deref , F : Deref , L : Deref > EventsProvider for ChannelManager < ChanSigner , M , T , K , F , L >
2020-07-20 17:03:52 -07:00
where M ::Target : chain ::Watch < Keys = ChanSigner > ,
2020-02-20 14:14:12 -05:00
T ::Target : BroadcasterInterface ,
2020-02-26 16:00:26 -05:00
K ::Target : KeysInterface < ChanKeySigner = ChanSigner > ,
2020-02-27 11:33:03 -05:00
F ::Target : FeeEstimator ,
2020-03-02 12:55:53 -05:00
L ::Target : Logger ,
2020-02-20 14:14:12 -05:00
{
2020-05-30 23:20:17 -04:00
fn get_and_clear_pending_events ( & self ) -> Vec < Event > {
2020-08-24 17:27:49 -04:00
//TODO: This behavior should be documented. It's non-intuitive that we query
// ChannelMonitors when clearing other events.
self . process_pending_monitor_events ( ) ;
2018-12-10 22:47:21 -05:00
2017-12-25 01:05:27 -05:00
let mut ret = Vec ::new ( ) ;
2018-10-19 16:25:32 -04:00
let mut pending_events = self . pending_events . lock ( ) . unwrap ( ) ;
2017-12-25 01:05:27 -05:00
mem ::swap ( & mut ret , & mut * pending_events ) ;
ret
}
}
2020-07-29 13:02:29 -07:00
impl < ChanSigner : ChannelKeys , M : Deref , T : Deref , K : Deref , F : Deref , L : Deref > ChannelManager < ChanSigner , M , T , K , F , L >
2020-07-20 17:03:52 -07:00
where M ::Target : chain ::Watch < Keys = ChanSigner > ,
2020-02-20 14:14:12 -05:00
T ::Target : BroadcasterInterface ,
2020-02-26 16:00:26 -05:00
K ::Target : KeysInterface < ChanKeySigner = ChanSigner > ,
2020-02-27 11:33:03 -05:00
F ::Target : FeeEstimator ,
2020-07-29 13:02:29 -07:00
L ::Target : Logger ,
2020-02-20 14:14:12 -05:00
{
2020-07-29 13:02:29 -07:00
/// Updates channel state based on transactions seen in a connected block.
2020-09-09 12:16:09 -07:00
pub fn block_connected ( & self , header : & BlockHeader , txdata : & TransactionData , height : u32 ) {
2020-08-25 17:12:00 -04:00
let header_hash = header . block_hash ( ) ;
2020-06-16 15:10:17 -07:00
log_trace! ( self . logger , " Block {} at height {} connected " , header_hash , height ) ;
2020-11-19 12:53:16 -05:00
let _persistence_guard = PersistenceNotifierGuard ::new ( & self . total_consistency_lock , & self . persistence_notifier ) ;
2018-07-28 19:15:45 -04:00
let mut failed_channels = Vec ::new ( ) ;
2020-01-09 14:09:25 -05:00
let mut timed_out_htlcs = Vec ::new ( ) ;
2017-12-25 01:05:27 -05:00
{
2018-07-28 19:15:20 -04:00
let mut channel_lock = self . channel_state . lock ( ) . unwrap ( ) ;
2020-01-16 10:48:16 -08:00
let channel_state = & mut * channel_lock ;
let short_to_id = & mut channel_state . short_to_id ;
let pending_msg_events = & mut channel_state . pending_msg_events ;
2018-04-24 00:19:52 -04:00
channel_state . by_id . retain ( | _ , channel | {
2020-06-16 15:10:17 -07:00
let res = channel . block_connected ( header , txdata , height ) ;
2018-12-07 22:09:58 -05:00
if let Ok ( ( chan_res , mut timed_out_pending_htlcs ) ) = res {
for ( source , payment_hash ) in timed_out_pending_htlcs . drain ( .. ) {
let chan_update = self . get_channel_update ( & channel ) . map ( | u | u . encode_with_len ( ) ) . unwrap ( ) ; // Cannot add/recv HTLCs before we have a short_id so unwrap is safe
timed_out_htlcs . push ( ( source , payment_hash , HTLCFailReason ::Reason {
failure_code : 0x1000 | 14 , // expiry_too_soon, or at least it is now
data : chan_update ,
} ) ) ;
}
if let Some ( funding_locked ) = chan_res {
pending_msg_events . push ( events ::MessageSendEvent ::SendFundingLocked {
2020-06-08 20:47:55 -04:00
node_id : channel . get_counterparty_node_id ( ) ,
2018-12-07 22:09:58 -05:00
msg : funding_locked ,
2018-10-19 17:30:52 -04:00
} ) ;
2018-12-07 22:09:58 -05:00
if let Some ( announcement_sigs ) = self . get_announcement_sigs ( channel ) {
2020-03-02 12:55:53 -05:00
log_trace! ( self . logger , " Sending funding_locked and announcement_signatures for {} " , log_bytes! ( channel . channel_id ( ) ) ) ;
2018-12-07 22:09:58 -05:00
pending_msg_events . push ( events ::MessageSendEvent ::SendAnnouncementSignatures {
2020-06-08 20:47:55 -04:00
node_id : channel . get_counterparty_node_id ( ) ,
2018-12-07 22:09:58 -05:00
msg : announcement_sigs ,
} ) ;
} else {
2020-03-02 12:55:53 -05:00
log_trace! ( self . logger , " Sending funding_locked WITHOUT announcement_signatures for {} " , log_bytes! ( channel . channel_id ( ) ) ) ;
2018-12-07 22:09:58 -05:00
}
short_to_id . insert ( channel . get_short_channel_id ( ) . unwrap ( ) , channel . channel_id ( ) ) ;
2018-10-19 17:30:52 -04:00
}
2018-12-07 22:09:58 -05:00
} else if let Err ( e ) = res {
2018-10-19 16:25:32 -04:00
pending_msg_events . push ( events ::MessageSendEvent ::HandleError {
2020-06-08 20:47:55 -04:00
node_id : channel . get_counterparty_node_id ( ) ,
2019-11-04 19:54:43 -05:00
action : msgs ::ErrorAction ::SendErrorMessage { msg : e } ,
2018-08-01 16:34:03 +00:00
} ) ;
2018-11-22 20:50:13 -05:00
return false ;
2018-04-24 00:19:52 -04:00
}
if let Some ( funding_txo ) = channel . get_funding_txo ( ) {
2020-06-16 15:10:17 -07:00
for & ( _ , tx ) in txdata . iter ( ) {
2018-04-24 00:19:52 -04:00
for inp in tx . input . iter ( ) {
2018-08-20 17:13:07 -04:00
if inp . previous_output = = funding_txo . into_bitcoin_outpoint ( ) {
2020-03-02 12:55:53 -05:00
log_trace! ( self . logger , " Detected channel-closing tx {} spending {}:{}, closing channel {} " , tx . txid ( ) , inp . previous_output . txid , inp . previous_output . vout , log_bytes! ( channel . channel_id ( ) ) ) ;
2018-04-24 00:19:52 -04:00
if let Some ( short_id ) = channel . get_short_channel_id ( ) {
2018-07-28 19:15:20 -04:00
short_to_id . remove ( & short_id ) ;
2018-04-24 00:19:52 -04:00
}
2018-07-28 19:15:45 -04:00
// It looks like our counterparty went on-chain. We go ahead and
// broadcast our latest local state as well here, just in case its
// some kind of SPV attack, though we expect these to be dropped.
2020-03-18 16:30:05 -04:00
failed_channels . push ( channel . force_shutdown ( true ) ) ;
2018-04-24 20:40:22 -04:00
if let Ok ( update ) = self . get_channel_update ( & channel ) {
2018-10-19 16:25:32 -04:00
pending_msg_events . push ( events ::MessageSendEvent ::BroadcastChannelUpdate {
2018-04-24 20:40:22 -04:00
msg : update
} ) ;
}
2018-04-24 00:19:52 -04:00
return false ;
2017-12-25 01:05:27 -05:00
}
2018-04-24 00:19:52 -04:00
}
}
2017-12-25 01:05:27 -05:00
}
2018-04-24 00:19:52 -04:00
true
} ) ;
2020-01-09 14:09:25 -05:00
channel_state . claimable_htlcs . retain ( | & ( ref payment_hash , _ ) , htlcs | {
htlcs . retain ( | htlc | {
// If height is approaching the number of blocks we think it takes us to get
// our commitment transaction confirmed before the HTLC expires, plus the
// number of blocks we generally consider it to take to do a commitment update,
// just give up on it and fail the HTLC.
if height > = htlc . cltv_expiry - HTLC_FAIL_BACK_BUFFER {
let mut htlc_msat_height_data = byte_utils ::be64_to_array ( htlc . value ) . to_vec ( ) ;
htlc_msat_height_data . extend_from_slice ( & byte_utils ::be32_to_array ( height ) ) ;
timed_out_htlcs . push ( ( HTLCSource ::PreviousHopData ( htlc . prev_hop . clone ( ) ) , payment_hash . clone ( ) , HTLCFailReason ::Reason {
failure_code : 0x4000 | 15 ,
data : htlc_msat_height_data
} ) ) ;
false
} else { true }
} ) ;
! htlcs . is_empty ( ) // Only retain this entry if htlcs has at least one entry.
} ) ;
2017-12-25 01:05:27 -05:00
}
2018-07-28 19:15:45 -04:00
for failure in failed_channels . drain ( .. ) {
self . finish_force_close_channel ( failure ) ;
}
2020-01-09 14:09:25 -05:00
for ( source , payment_hash , reason ) in timed_out_htlcs . drain ( .. ) {
self . fail_htlc_backwards_internal ( self . channel_state . lock ( ) . unwrap ( ) , source , & payment_hash , reason ) ;
}
2018-07-23 19:45:59 -04:00
self . latest_block_height . store ( height as usize , Ordering ::Release ) ;
2018-12-09 12:17:27 -05:00
* self . last_block_hash . try_lock ( ) . expect ( " block_(dis)connected must not be called in parallel " ) = header_hash ;
2020-03-05 18:01:06 -05:00
loop {
// Update last_node_announcement_serial to be the max of its current value and the
// block timestamp. This should keep us close to the current time without relying on
// having an explicit local time source.
// Just in case we end up in a race, we loop until we either successfully update
// last_node_announcement_serial or decide we don't need to.
let old_serial = self . last_node_announcement_serial . load ( Ordering ::Acquire ) ;
if old_serial > = header . time as usize { break ; }
if self . last_node_announcement_serial . compare_exchange ( old_serial , header . time as usize , Ordering ::AcqRel , Ordering ::Relaxed ) . is_ok ( ) {
break ;
}
}
2017-12-25 01:05:27 -05:00
}
2020-07-29 13:02:29 -07:00
/// Updates channel state based on a disconnected block.
///
/// If necessary, the channel may be force-closed without letting the counterparty participate
/// in the shutdown.
pub fn block_disconnected ( & self , header : & BlockHeader ) {
2020-11-19 12:53:16 -05:00
let _persistence_guard = PersistenceNotifierGuard ::new ( & self . total_consistency_lock , & self . persistence_notifier ) ;
2018-07-28 19:15:45 -04:00
let mut failed_channels = Vec ::new ( ) ;
{
let mut channel_lock = self . channel_state . lock ( ) . unwrap ( ) ;
2020-01-16 10:48:16 -08:00
let channel_state = & mut * channel_lock ;
let short_to_id = & mut channel_state . short_to_id ;
let pending_msg_events = & mut channel_state . pending_msg_events ;
2018-07-28 19:15:45 -04:00
channel_state . by_id . retain ( | _ , v | {
if v . block_disconnected ( header ) {
if let Some ( short_id ) = v . get_short_channel_id ( ) {
short_to_id . remove ( & short_id ) ;
}
2020-03-18 16:30:05 -04:00
failed_channels . push ( v . force_shutdown ( true ) ) ;
2018-07-28 19:15:45 -04:00
if let Ok ( update ) = self . get_channel_update ( & v ) {
2018-10-19 16:25:32 -04:00
pending_msg_events . push ( events ::MessageSendEvent ::BroadcastChannelUpdate {
2018-07-28 19:15:45 -04:00
msg : update
} ) ;
}
false
} else {
true
2018-07-14 02:08:14 +00:00
}
2018-07-28 19:15:45 -04:00
} ) ;
}
for failure in failed_channels . drain ( .. ) {
self . finish_force_close_channel ( failure ) ;
}
2018-07-23 19:45:59 -04:00
self . latest_block_height . fetch_sub ( 1 , Ordering ::AcqRel ) ;
2020-08-25 17:12:00 -04:00
* self . last_block_hash . try_lock ( ) . expect ( " block_(dis)connected must not be called in parallel " ) = header . block_hash ( ) ;
2017-12-25 01:05:27 -05:00
}
2020-11-19 12:53:16 -05:00
/// Blocks until ChannelManager needs to be persisted or a timeout is reached. It returns a bool
/// indicating whether persistence is necessary. Only one listener on `wait_timeout` is
/// guaranteed to be woken up.
/// Note that the feature `allow_wallclock_use` must be enabled to use this function.
#[ cfg(any(test, feature = " allow_wallclock_use " )) ]
pub fn wait_timeout ( & self , max_wait : Duration ) -> bool {
self . persistence_notifier . wait_timeout ( max_wait )
}
/// Blocks until ChannelManager needs to be persisted. Only one listener on `wait` is
/// guaranteed to be woken up.
pub fn wait ( & self ) {
self . persistence_notifier . wait ( )
}
#[ cfg(any(test, feature = " _test_utils " )) ]
pub fn get_persistence_condvar_value ( & self ) -> bool {
let mutcond = & self . persistence_notifier . persistence_lock ;
let & ( ref mtx , _ ) = mutcond ;
let guard = mtx . lock ( ) . unwrap ( ) ;
* guard
}
2017-12-25 01:05:27 -05:00
}
2020-03-02 12:55:53 -05:00
impl < ChanSigner : ChannelKeys , M : Deref + Sync + Send , T : Deref + Sync + Send , K : Deref + Sync + Send , F : Deref + Sync + Send , L : Deref + Sync + Send >
ChannelMessageHandler for ChannelManager < ChanSigner , M , T , K , F , L >
2020-07-20 17:03:52 -07:00
where M ::Target : chain ::Watch < Keys = ChanSigner > ,
2020-02-20 14:14:12 -05:00
T ::Target : BroadcasterInterface ,
2020-02-26 16:00:26 -05:00
K ::Target : KeysInterface < ChanKeySigner = ChanSigner > ,
2020-02-27 11:33:03 -05:00
F ::Target : FeeEstimator ,
2020-03-02 12:55:53 -05:00
L ::Target : Logger ,
2020-02-20 14:14:12 -05:00
{
2020-06-08 20:47:55 -04:00
fn handle_open_channel ( & self , counterparty_node_id : & PublicKey , their_features : InitFeatures , msg : & msgs ::OpenChannel ) {
2020-11-19 12:53:16 -05:00
let _persistence_guard = PersistenceNotifierGuard ::new ( & self . total_consistency_lock , & self . persistence_notifier ) ;
2020-06-08 20:47:55 -04:00
let _ = handle_error! ( self , self . internal_open_channel ( counterparty_node_id , their_features , msg ) , * counterparty_node_id ) ;
2017-12-25 01:05:27 -05:00
}
2020-06-08 20:47:55 -04:00
fn handle_accept_channel ( & self , counterparty_node_id : & PublicKey , their_features : InitFeatures , msg : & msgs ::AcceptChannel ) {
2020-11-19 12:53:16 -05:00
let _persistence_guard = PersistenceNotifierGuard ::new ( & self . total_consistency_lock , & self . persistence_notifier ) ;
2020-06-08 20:47:55 -04:00
let _ = handle_error! ( self , self . internal_accept_channel ( counterparty_node_id , their_features , msg ) , * counterparty_node_id ) ;
2017-12-25 01:05:27 -05:00
}
2020-06-08 20:47:55 -04:00
fn handle_funding_created ( & self , counterparty_node_id : & PublicKey , msg : & msgs ::FundingCreated ) {
2020-11-19 12:53:16 -05:00
let _persistence_guard = PersistenceNotifierGuard ::new ( & self . total_consistency_lock , & self . persistence_notifier ) ;
2020-06-08 20:47:55 -04:00
let _ = handle_error! ( self , self . internal_funding_created ( counterparty_node_id , msg ) , * counterparty_node_id ) ;
2017-12-25 01:05:27 -05:00
}
2020-06-08 20:47:55 -04:00
fn handle_funding_signed ( & self , counterparty_node_id : & PublicKey , msg : & msgs ::FundingSigned ) {
2020-11-19 12:53:16 -05:00
let _persistence_guard = PersistenceNotifierGuard ::new ( & self . total_consistency_lock , & self . persistence_notifier ) ;
2020-06-08 20:47:55 -04:00
let _ = handle_error! ( self , self . internal_funding_signed ( counterparty_node_id , msg ) , * counterparty_node_id ) ;
2017-12-25 01:05:27 -05:00
}
2020-06-08 20:47:55 -04:00
fn handle_funding_locked ( & self , counterparty_node_id : & PublicKey , msg : & msgs ::FundingLocked ) {
2020-11-19 12:53:16 -05:00
let _persistence_guard = PersistenceNotifierGuard ::new ( & self . total_consistency_lock , & self . persistence_notifier ) ;
2020-06-08 20:47:55 -04:00
let _ = handle_error! ( self , self . internal_funding_locked ( counterparty_node_id , msg ) , * counterparty_node_id ) ;
2017-12-25 01:05:27 -05:00
}
2020-06-08 20:47:55 -04:00
fn handle_shutdown ( & self , counterparty_node_id : & PublicKey , msg : & msgs ::Shutdown ) {
2020-11-19 12:53:16 -05:00
let _persistence_guard = PersistenceNotifierGuard ::new ( & self . total_consistency_lock , & self . persistence_notifier ) ;
2020-06-08 20:47:55 -04:00
let _ = handle_error! ( self , self . internal_shutdown ( counterparty_node_id , msg ) , * counterparty_node_id ) ;
2017-12-25 01:05:27 -05:00
}
2020-06-08 20:47:55 -04:00
fn handle_closing_signed ( & self , counterparty_node_id : & PublicKey , msg : & msgs ::ClosingSigned ) {
2020-11-19 12:53:16 -05:00
let _persistence_guard = PersistenceNotifierGuard ::new ( & self . total_consistency_lock , & self . persistence_notifier ) ;
2020-06-08 20:47:55 -04:00
let _ = handle_error! ( self , self . internal_closing_signed ( counterparty_node_id , msg ) , * counterparty_node_id ) ;
2017-12-25 01:05:27 -05:00
}
2020-06-08 20:47:55 -04:00
fn handle_update_add_htlc ( & self , counterparty_node_id : & PublicKey , msg : & msgs ::UpdateAddHTLC ) {
2020-11-19 12:53:16 -05:00
let _persistence_guard = PersistenceNotifierGuard ::new ( & self . total_consistency_lock , & self . persistence_notifier ) ;
2020-06-08 20:47:55 -04:00
let _ = handle_error! ( self , self . internal_update_add_htlc ( counterparty_node_id , msg ) , * counterparty_node_id ) ;
2017-12-25 01:05:27 -05:00
}
2020-06-08 20:47:55 -04:00
fn handle_update_fulfill_htlc ( & self , counterparty_node_id : & PublicKey , msg : & msgs ::UpdateFulfillHTLC ) {
2020-11-19 12:53:16 -05:00
let _persistence_guard = PersistenceNotifierGuard ::new ( & self . total_consistency_lock , & self . persistence_notifier ) ;
2020-06-08 20:47:55 -04:00
let _ = handle_error! ( self , self . internal_update_fulfill_htlc ( counterparty_node_id , msg ) , * counterparty_node_id ) ;
2017-12-25 01:05:27 -05:00
}
2020-06-08 20:47:55 -04:00
fn handle_update_fail_htlc ( & self , counterparty_node_id : & PublicKey , msg : & msgs ::UpdateFailHTLC ) {
2020-11-19 12:53:16 -05:00
let _persistence_guard = PersistenceNotifierGuard ::new ( & self . total_consistency_lock , & self . persistence_notifier ) ;
2020-06-08 20:47:55 -04:00
let _ = handle_error! ( self , self . internal_update_fail_htlc ( counterparty_node_id , msg ) , * counterparty_node_id ) ;
2017-12-25 01:05:27 -05:00
}
2020-06-08 20:47:55 -04:00
fn handle_update_fail_malformed_htlc ( & self , counterparty_node_id : & PublicKey , msg : & msgs ::UpdateFailMalformedHTLC ) {
2020-11-19 12:53:16 -05:00
let _persistence_guard = PersistenceNotifierGuard ::new ( & self . total_consistency_lock , & self . persistence_notifier ) ;
2020-06-08 20:47:55 -04:00
let _ = handle_error! ( self , self . internal_update_fail_malformed_htlc ( counterparty_node_id , msg ) , * counterparty_node_id ) ;
2017-12-25 01:05:27 -05:00
}
2020-06-08 20:47:55 -04:00
fn handle_commitment_signed ( & self , counterparty_node_id : & PublicKey , msg : & msgs ::CommitmentSigned ) {
2020-11-19 12:53:16 -05:00
let _persistence_guard = PersistenceNotifierGuard ::new ( & self . total_consistency_lock , & self . persistence_notifier ) ;
2020-06-08 20:47:55 -04:00
let _ = handle_error! ( self , self . internal_commitment_signed ( counterparty_node_id , msg ) , * counterparty_node_id ) ;
2018-04-04 11:56:54 -04:00
}
2020-06-08 20:47:55 -04:00
fn handle_revoke_and_ack ( & self , counterparty_node_id : & PublicKey , msg : & msgs ::RevokeAndACK ) {
2020-11-19 12:53:16 -05:00
let _persistence_guard = PersistenceNotifierGuard ::new ( & self . total_consistency_lock , & self . persistence_notifier ) ;
2020-06-08 20:47:55 -04:00
let _ = handle_error! ( self , self . internal_revoke_and_ack ( counterparty_node_id , msg ) , * counterparty_node_id ) ;
2017-12-25 01:05:27 -05:00
}
2020-06-08 20:47:55 -04:00
fn handle_update_fee ( & self , counterparty_node_id : & PublicKey , msg : & msgs ::UpdateFee ) {
2020-11-19 12:53:16 -05:00
let _persistence_guard = PersistenceNotifierGuard ::new ( & self . total_consistency_lock , & self . persistence_notifier ) ;
2020-06-08 20:47:55 -04:00
let _ = handle_error! ( self , self . internal_update_fee ( counterparty_node_id , msg ) , * counterparty_node_id ) ;
2017-12-25 01:05:27 -05:00
}
2020-06-08 20:47:55 -04:00
fn handle_announcement_signatures ( & self , counterparty_node_id : & PublicKey , msg : & msgs ::AnnouncementSignatures ) {
2020-11-19 12:53:16 -05:00
let _persistence_guard = PersistenceNotifierGuard ::new ( & self . total_consistency_lock , & self . persistence_notifier ) ;
2020-06-08 20:47:55 -04:00
let _ = handle_error! ( self , self . internal_announcement_signatures ( counterparty_node_id , msg ) , * counterparty_node_id ) ;
2017-12-25 01:05:27 -05:00
}
2018-04-01 19:23:09 -04:00
2020-06-08 20:47:55 -04:00
fn handle_channel_reestablish ( & self , counterparty_node_id : & PublicKey , msg : & msgs ::ChannelReestablish ) {
2020-11-19 12:53:16 -05:00
let _persistence_guard = PersistenceNotifierGuard ::new ( & self . total_consistency_lock , & self . persistence_notifier ) ;
2020-06-08 20:47:55 -04:00
let _ = handle_error! ( self , self . internal_channel_reestablish ( counterparty_node_id , msg ) , * counterparty_node_id ) ;
2018-09-07 15:51:40 -04:00
}
2020-06-08 20:47:55 -04:00
fn peer_disconnected ( & self , counterparty_node_id : & PublicKey , no_connection_possible : bool ) {
2020-11-19 12:53:16 -05:00
let _persistence_guard = PersistenceNotifierGuard ::new ( & self . total_consistency_lock , & self . persistence_notifier ) ;
2018-07-28 19:15:45 -04:00
let mut failed_channels = Vec ::new ( ) ;
2018-09-05 18:32:55 -04:00
let mut failed_payments = Vec ::new ( ) ;
2019-12-29 14:22:43 -05:00
let mut no_channels_remain = true ;
2018-04-24 20:40:22 -04:00
{
let mut channel_state_lock = self . channel_state . lock ( ) . unwrap ( ) ;
2020-01-16 10:48:16 -08:00
let channel_state = & mut * channel_state_lock ;
let short_to_id = & mut channel_state . short_to_id ;
let pending_msg_events = & mut channel_state . pending_msg_events ;
2018-04-24 20:40:22 -04:00
if no_connection_possible {
2020-06-08 20:47:55 -04:00
log_debug! ( self . logger , " Failing all channels with {} due to no_connection_possible " , log_pubkey! ( counterparty_node_id ) ) ;
2018-04-24 20:40:22 -04:00
channel_state . by_id . retain ( | _ , chan | {
2020-06-08 20:47:55 -04:00
if chan . get_counterparty_node_id ( ) = = * counterparty_node_id {
2018-04-24 20:40:22 -04:00
if let Some ( short_id ) = chan . get_short_channel_id ( ) {
short_to_id . remove ( & short_id ) ;
}
2020-03-18 16:30:05 -04:00
failed_channels . push ( chan . force_shutdown ( true ) ) ;
2018-04-24 20:40:22 -04:00
if let Ok ( update ) = self . get_channel_update ( & chan ) {
2018-10-19 16:25:32 -04:00
pending_msg_events . push ( events ::MessageSendEvent ::BroadcastChannelUpdate {
2018-04-24 20:40:22 -04:00
msg : update
} ) ;
}
false
} else {
true
2018-04-01 19:23:09 -04:00
}
2018-04-24 20:40:22 -04:00
} ) ;
} else {
2020-06-08 20:47:55 -04:00
log_debug! ( self . logger , " Marking channels with {} disconnected and generating channel_updates " , log_pubkey! ( counterparty_node_id ) ) ;
2018-09-07 15:11:52 -04:00
channel_state . by_id . retain ( | _ , chan | {
2020-06-08 20:47:55 -04:00
if chan . get_counterparty_node_id ( ) = = * counterparty_node_id {
2020-05-06 18:15:43 -04:00
// Note that currently on channel reestablish we assert that there are no
// holding cell add-HTLCs, so if in the future we stop removing uncommitted HTLCs
// on peer disconnect here, there will need to be corresponding changes in
// reestablish logic.
2020-03-02 12:55:53 -05:00
let failed_adds = chan . remove_uncommitted_htlcs_and_mark_paused ( & self . logger ) ;
2019-11-18 00:43:13 -05:00
chan . to_disabled_marked ( ) ;
2018-09-05 18:32:55 -04:00
if ! failed_adds . is_empty ( ) {
2018-09-07 15:11:52 -04:00
let chan_update = self . get_channel_update ( & chan ) . map ( | u | u . encode_with_len ( ) ) . unwrap ( ) ; // Cannot add/recv HTLCs before we have a short_id so unwrap is safe
2018-09-05 18:32:55 -04:00
failed_payments . push ( ( chan_update , failed_adds ) ) ;
}
2018-09-07 15:11:52 -04:00
if chan . is_shutdown ( ) {
if let Some ( short_id ) = chan . get_short_channel_id ( ) {
short_to_id . remove ( & short_id ) ;
}
return false ;
2019-12-29 14:22:43 -05:00
} else {
no_channels_remain = false ;
2018-09-07 15:11:52 -04:00
}
2018-04-24 00:19:52 -04:00
}
2018-09-07 15:11:52 -04:00
true
} )
2018-04-01 19:23:09 -04:00
}
2019-01-07 23:11:37 -05:00
pending_msg_events . retain ( | msg | {
match msg {
2020-06-08 20:47:55 -04:00
& events ::MessageSendEvent ::SendAcceptChannel { ref node_id , .. } = > node_id ! = counterparty_node_id ,
& events ::MessageSendEvent ::SendOpenChannel { ref node_id , .. } = > node_id ! = counterparty_node_id ,
& events ::MessageSendEvent ::SendFundingCreated { ref node_id , .. } = > node_id ! = counterparty_node_id ,
& events ::MessageSendEvent ::SendFundingSigned { ref node_id , .. } = > node_id ! = counterparty_node_id ,
& events ::MessageSendEvent ::SendFundingLocked { ref node_id , .. } = > node_id ! = counterparty_node_id ,
& events ::MessageSendEvent ::SendAnnouncementSignatures { ref node_id , .. } = > node_id ! = counterparty_node_id ,
& events ::MessageSendEvent ::UpdateHTLCs { ref node_id , .. } = > node_id ! = counterparty_node_id ,
& events ::MessageSendEvent ::SendRevokeAndACK { ref node_id , .. } = > node_id ! = counterparty_node_id ,
& events ::MessageSendEvent ::SendClosingSigned { ref node_id , .. } = > node_id ! = counterparty_node_id ,
& events ::MessageSendEvent ::SendShutdown { ref node_id , .. } = > node_id ! = counterparty_node_id ,
& events ::MessageSendEvent ::SendChannelReestablish { ref node_id , .. } = > node_id ! = counterparty_node_id ,
2019-01-07 23:11:37 -05:00
& events ::MessageSendEvent ::BroadcastChannelAnnouncement { .. } = > true ,
2020-01-02 20:32:37 -05:00
& events ::MessageSendEvent ::BroadcastNodeAnnouncement { .. } = > true ,
2019-01-07 23:11:37 -05:00
& events ::MessageSendEvent ::BroadcastChannelUpdate { .. } = > true ,
2020-06-08 20:47:55 -04:00
& events ::MessageSendEvent ::HandleError { ref node_id , .. } = > node_id ! = counterparty_node_id ,
2019-01-07 23:11:37 -05:00
& events ::MessageSendEvent ::PaymentFailureNetworkUpdate { .. } = > true ,
2020-10-22 08:47:24 -04:00
& events ::MessageSendEvent ::SendChannelRangeQuery { .. } = > false ,
& events ::MessageSendEvent ::SendShortIdsQuery { .. } = > false ,
2019-01-07 23:11:37 -05:00
}
} ) ;
2018-04-01 19:23:09 -04:00
}
2019-12-29 14:22:43 -05:00
if no_channels_remain {
2020-06-08 20:47:55 -04:00
self . per_peer_state . write ( ) . unwrap ( ) . remove ( counterparty_node_id ) ;
2019-12-29 14:22:43 -05:00
}
2018-07-28 19:15:45 -04:00
for failure in failed_channels . drain ( .. ) {
self . finish_force_close_channel ( failure ) ;
}
2018-09-05 18:32:55 -04:00
for ( chan_update , mut htlc_sources ) in failed_payments {
for ( htlc_source , payment_hash ) in htlc_sources . drain ( .. ) {
self . fail_htlc_backwards_internal ( self . channel_state . lock ( ) . unwrap ( ) , htlc_source , & payment_hash , HTLCFailReason ::Reason { failure_code : 0x1000 | 7 , data : chan_update . clone ( ) } ) ;
}
}
2018-04-01 19:23:09 -04:00
}
2018-08-25 14:48:18 -04:00
2020-06-08 20:47:55 -04:00
fn peer_connected ( & self , counterparty_node_id : & PublicKey , init_msg : & msgs ::Init ) {
log_debug! ( self . logger , " Generating channel_reestablish events for {} " , log_pubkey! ( counterparty_node_id ) ) ;
2018-11-02 10:45:29 -04:00
2020-11-19 12:53:16 -05:00
let _persistence_guard = PersistenceNotifierGuard ::new ( & self . total_consistency_lock , & self . persistence_notifier ) ;
2019-12-29 14:22:43 -05:00
{
let mut peer_state_lock = self . per_peer_state . write ( ) . unwrap ( ) ;
2020-06-08 20:47:55 -04:00
match peer_state_lock . entry ( counterparty_node_id . clone ( ) ) {
2019-12-29 14:22:43 -05:00
hash_map ::Entry ::Vacant ( e ) = > {
e . insert ( Mutex ::new ( PeerState {
latest_features : init_msg . features . clone ( ) ,
} ) ) ;
} ,
hash_map ::Entry ::Occupied ( e ) = > {
e . get ( ) . lock ( ) . unwrap ( ) . latest_features = init_msg . features . clone ( ) ;
} ,
}
}
2018-10-20 17:50:34 -04:00
let mut channel_state_lock = self . channel_state . lock ( ) . unwrap ( ) ;
2020-01-16 10:48:16 -08:00
let channel_state = & mut * channel_state_lock ;
let pending_msg_events = & mut channel_state . pending_msg_events ;
2018-09-08 16:02:46 -04:00
channel_state . by_id . retain ( | _ , chan | {
2020-06-08 20:47:55 -04:00
if chan . get_counterparty_node_id ( ) = = * counterparty_node_id {
2018-09-08 16:02:46 -04:00
if ! chan . have_received_message ( ) {
// If we created this (outbound) channel while we were disconnected from the
// peer we probably failed to send the open_channel message, which is now
// lost. We can't have had anything pending related to this channel, so we just
// drop it.
false
} else {
2018-10-20 17:50:34 -04:00
pending_msg_events . push ( events ::MessageSendEvent ::SendChannelReestablish {
2020-06-08 20:47:55 -04:00
node_id : chan . get_counterparty_node_id ( ) ,
2020-03-02 12:55:53 -05:00
msg : chan . get_channel_reestablish ( & self . logger ) ,
2018-10-20 17:50:34 -04:00
} ) ;
2018-09-08 16:02:46 -04:00
true
}
} else { true }
} ) ;
//TODO: Also re-broadcast announcement_signatures
2018-09-07 15:51:40 -04:00
}
2020-06-08 20:47:55 -04:00
fn handle_error ( & self , counterparty_node_id : & PublicKey , msg : & msgs ::ErrorMessage ) {
2020-11-19 12:53:16 -05:00
let _persistence_guard = PersistenceNotifierGuard ::new ( & self . total_consistency_lock , & self . persistence_notifier ) ;
2018-10-20 18:46:03 -04:00
2018-08-25 14:48:18 -04:00
if msg . channel_id = = [ 0 ; 32 ] {
for chan in self . list_channels ( ) {
2020-06-08 20:47:55 -04:00
if chan . remote_network_id = = * counterparty_node_id {
2021-01-14 17:05:38 +01:00
// Untrusted messages from peer, we throw away the error if id points to a non-existent channel
2021-01-15 21:34:17 -05:00
let _ = self . force_close_channel_with_peer ( & chan . channel_id , Some ( counterparty_node_id ) ) ;
2018-08-25 14:48:18 -04:00
}
}
} else {
2021-01-14 17:05:38 +01:00
// Untrusted messages from peer, we throw away the error if id points to a non-existent channel
2021-01-15 21:34:17 -05:00
let _ = self . force_close_channel_with_peer ( & msg . channel_id , Some ( counterparty_node_id ) ) ;
2018-08-25 14:48:18 -04:00
}
}
2017-12-25 01:05:27 -05:00
}
2020-11-19 12:53:16 -05:00
/// Used to signal to the ChannelManager persister that the manager needs to be re-persisted to
/// disk/backups, through `wait_timeout` and `wait`.
struct PersistenceNotifier {
/// Users won't access the persistence_lock directly, but rather wait on its bool using
/// `wait_timeout` and `wait`.
persistence_lock : ( Mutex < bool > , Condvar ) ,
}
impl PersistenceNotifier {
fn new ( ) -> Self {
Self {
persistence_lock : ( Mutex ::new ( false ) , Condvar ::new ( ) ) ,
}
}
fn wait ( & self ) {
loop {
let & ( ref mtx , ref cvar ) = & self . persistence_lock ;
let mut guard = mtx . lock ( ) . unwrap ( ) ;
guard = cvar . wait ( guard ) . unwrap ( ) ;
let result = * guard ;
if result {
* guard = false ;
return
}
}
}
#[ cfg(any(test, feature = " allow_wallclock_use " )) ]
fn wait_timeout ( & self , max_wait : Duration ) -> bool {
let current_time = Instant ::now ( ) ;
loop {
let & ( ref mtx , ref cvar ) = & self . persistence_lock ;
let mut guard = mtx . lock ( ) . unwrap ( ) ;
guard = cvar . wait_timeout ( guard , max_wait ) . unwrap ( ) . 0 ;
// Due to spurious wakeups that can happen on `wait_timeout`, here we need to check if the
// desired wait time has actually passed, and if not then restart the loop with a reduced wait
// time. Note that this logic can be highly simplified through the use of
// `Condvar::wait_while` and `Condvar::wait_timeout_while`, if and when our MSRV is raised to
// 1.42.0.
let elapsed = current_time . elapsed ( ) ;
let result = * guard ;
if result | | elapsed > = max_wait {
* guard = false ;
return result ;
}
match max_wait . checked_sub ( elapsed ) {
None = > return result ,
Some ( _ ) = > continue
}
}
}
// Signal to the ChannelManager persister that there are updates necessitating persisting to disk.
fn notify ( & self ) {
let & ( ref persist_mtx , ref cnd ) = & self . persistence_lock ;
let mut persistence_lock = persist_mtx . lock ( ) . unwrap ( ) ;
* persistence_lock = true ;
mem ::drop ( persistence_lock ) ;
cnd . notify_all ( ) ;
}
}
2018-10-26 14:35:50 -04:00
const SERIALIZATION_VERSION : u8 = 1 ;
const MIN_SERIALIZATION_VERSION : u8 = 1 ;
2020-01-01 15:56:03 -05:00
impl Writeable for PendingHTLCInfo {
2018-10-26 14:35:50 -04:00
fn write < W : Writer > ( & self , writer : & mut W ) -> Result < ( ) , ::std ::io ::Error > {
2020-01-01 17:39:51 -05:00
match & self . routing {
& PendingHTLCRouting ::Forward { ref onion_packet , ref short_channel_id } = > {
0 u8 . write ( writer ) ? ;
onion_packet . write ( writer ) ? ;
short_channel_id . write ( writer ) ? ;
} ,
2020-01-09 14:09:25 -05:00
& PendingHTLCRouting ::Receive { ref payment_data , ref incoming_cltv_expiry } = > {
2020-01-01 17:39:51 -05:00
1 u8 . write ( writer ) ? ;
2020-01-01 20:20:42 -05:00
payment_data . write ( writer ) ? ;
2020-01-09 14:09:25 -05:00
incoming_cltv_expiry . write ( writer ) ? ;
2020-01-01 17:39:51 -05:00
} ,
}
2018-10-26 14:35:50 -04:00
self . incoming_shared_secret . write ( writer ) ? ;
self . payment_hash . write ( writer ) ? ;
self . amt_to_forward . write ( writer ) ? ;
self . outgoing_cltv_value . write ( writer ) ? ;
Ok ( ( ) )
}
}
2020-02-23 23:12:19 -05:00
impl Readable for PendingHTLCInfo {
fn read < R : ::std ::io ::Read > ( reader : & mut R ) -> Result < PendingHTLCInfo , DecodeError > {
2020-01-01 15:56:03 -05:00
Ok ( PendingHTLCInfo {
2020-01-01 17:39:51 -05:00
routing : match Readable ::read ( reader ) ? {
0 u8 = > PendingHTLCRouting ::Forward {
onion_packet : Readable ::read ( reader ) ? ,
short_channel_id : Readable ::read ( reader ) ? ,
} ,
2020-01-01 20:20:42 -05:00
1 u8 = > PendingHTLCRouting ::Receive {
payment_data : Readable ::read ( reader ) ? ,
2020-01-09 14:09:25 -05:00
incoming_cltv_expiry : Readable ::read ( reader ) ? ,
2020-01-01 20:20:42 -05:00
} ,
2020-01-01 17:39:51 -05:00
_ = > return Err ( DecodeError ::InvalidValue ) ,
} ,
2018-10-26 14:35:50 -04:00
incoming_shared_secret : Readable ::read ( reader ) ? ,
payment_hash : Readable ::read ( reader ) ? ,
amt_to_forward : Readable ::read ( reader ) ? ,
outgoing_cltv_value : Readable ::read ( reader ) ? ,
} )
}
}
impl Writeable for HTLCFailureMsg {
fn write < W : Writer > ( & self , writer : & mut W ) -> Result < ( ) , ::std ::io ::Error > {
match self {
& HTLCFailureMsg ::Relay ( ref fail_msg ) = > {
0 u8 . write ( writer ) ? ;
fail_msg . write ( writer ) ? ;
} ,
& HTLCFailureMsg ::Malformed ( ref fail_msg ) = > {
1 u8 . write ( writer ) ? ;
fail_msg . write ( writer ) ? ;
}
}
Ok ( ( ) )
}
}
2020-02-23 23:12:19 -05:00
impl Readable for HTLCFailureMsg {
fn read < R : ::std ::io ::Read > ( reader : & mut R ) -> Result < HTLCFailureMsg , DecodeError > {
match < u8 as Readable > ::read ( reader ) ? {
2018-10-26 14:35:50 -04:00
0 = > Ok ( HTLCFailureMsg ::Relay ( Readable ::read ( reader ) ? ) ) ,
1 = > Ok ( HTLCFailureMsg ::Malformed ( Readable ::read ( reader ) ? ) ) ,
_ = > Err ( DecodeError ::InvalidValue ) ,
}
}
}
impl Writeable for PendingHTLCStatus {
fn write < W : Writer > ( & self , writer : & mut W ) -> Result < ( ) , ::std ::io ::Error > {
match self {
& PendingHTLCStatus ::Forward ( ref forward_info ) = > {
0 u8 . write ( writer ) ? ;
forward_info . write ( writer ) ? ;
} ,
& PendingHTLCStatus ::Fail ( ref fail_msg ) = > {
1 u8 . write ( writer ) ? ;
fail_msg . write ( writer ) ? ;
}
}
Ok ( ( ) )
}
}
2020-02-23 23:12:19 -05:00
impl Readable for PendingHTLCStatus {
fn read < R : ::std ::io ::Read > ( reader : & mut R ) -> Result < PendingHTLCStatus , DecodeError > {
match < u8 as Readable > ::read ( reader ) ? {
2018-10-26 14:35:50 -04:00
0 = > Ok ( PendingHTLCStatus ::Forward ( Readable ::read ( reader ) ? ) ) ,
1 = > Ok ( PendingHTLCStatus ::Fail ( Readable ::read ( reader ) ? ) ) ,
_ = > Err ( DecodeError ::InvalidValue ) ,
}
}
}
impl_writeable! ( HTLCPreviousHopData , 0 , {
short_channel_id ,
2020-10-22 13:32:50 -04:00
outpoint ,
2018-10-26 14:35:50 -04:00
htlc_id ,
incoming_packet_shared_secret
} ) ;
2020-01-01 20:20:42 -05:00
impl_writeable! ( ClaimableHTLC , 0 , {
prev_hop ,
value ,
2020-01-09 14:09:25 -05:00
payment_data ,
cltv_expiry
2020-01-01 20:20:42 -05:00
} ) ;
2018-10-26 14:35:50 -04:00
impl Writeable for HTLCSource {
fn write < W : Writer > ( & self , writer : & mut W ) -> Result < ( ) , ::std ::io ::Error > {
match self {
& HTLCSource ::PreviousHopData ( ref hop_data ) = > {
0 u8 . write ( writer ) ? ;
hop_data . write ( writer ) ? ;
} ,
2020-01-03 19:31:40 -05:00
& HTLCSource ::OutboundRoute { ref path , ref session_priv , ref first_hop_htlc_msat } = > {
2018-10-26 14:35:50 -04:00
1 u8 . write ( writer ) ? ;
2020-01-03 19:31:40 -05:00
path . write ( writer ) ? ;
2018-10-26 14:35:50 -04:00
session_priv . write ( writer ) ? ;
first_hop_htlc_msat . write ( writer ) ? ;
}
}
Ok ( ( ) )
}
}
2020-02-23 23:12:19 -05:00
impl Readable for HTLCSource {
fn read < R : ::std ::io ::Read > ( reader : & mut R ) -> Result < HTLCSource , DecodeError > {
match < u8 as Readable > ::read ( reader ) ? {
2018-10-26 14:35:50 -04:00
0 = > Ok ( HTLCSource ::PreviousHopData ( Readable ::read ( reader ) ? ) ) ,
1 = > Ok ( HTLCSource ::OutboundRoute {
2020-01-03 19:31:40 -05:00
path : Readable ::read ( reader ) ? ,
2018-10-26 14:35:50 -04:00
session_priv : Readable ::read ( reader ) ? ,
first_hop_htlc_msat : Readable ::read ( reader ) ? ,
} ) ,
_ = > Err ( DecodeError ::InvalidValue ) ,
}
}
}
impl Writeable for HTLCFailReason {
fn write < W : Writer > ( & self , writer : & mut W ) -> Result < ( ) , ::std ::io ::Error > {
match self {
2019-11-04 19:09:51 -05:00
& HTLCFailReason ::LightningError { ref err } = > {
2018-10-26 14:35:50 -04:00
0 u8 . write ( writer ) ? ;
err . write ( writer ) ? ;
} ,
& HTLCFailReason ::Reason { ref failure_code , ref data } = > {
1 u8 . write ( writer ) ? ;
failure_code . write ( writer ) ? ;
data . write ( writer ) ? ;
}
}
Ok ( ( ) )
}
}
2020-02-23 23:12:19 -05:00
impl Readable for HTLCFailReason {
fn read < R : ::std ::io ::Read > ( reader : & mut R ) -> Result < HTLCFailReason , DecodeError > {
match < u8 as Readable > ::read ( reader ) ? {
2019-11-04 19:09:51 -05:00
0 = > Ok ( HTLCFailReason ::LightningError { err : Readable ::read ( reader ) ? } ) ,
2018-10-26 14:35:50 -04:00
1 = > Ok ( HTLCFailReason ::Reason {
failure_code : Readable ::read ( reader ) ? ,
data : Readable ::read ( reader ) ? ,
} ) ,
_ = > Err ( DecodeError ::InvalidValue ) ,
}
}
}
2018-12-20 15:36:02 -05:00
impl Writeable for HTLCForwardInfo {
fn write < W : Writer > ( & self , writer : & mut W ) -> Result < ( ) , ::std ::io ::Error > {
match self {
2020-10-22 13:32:50 -04:00
& HTLCForwardInfo ::AddHTLC { ref prev_short_channel_id , ref prev_funding_outpoint , ref prev_htlc_id , ref forward_info } = > {
2018-12-20 15:36:02 -05:00
0 u8 . write ( writer ) ? ;
prev_short_channel_id . write ( writer ) ? ;
2020-10-22 13:32:50 -04:00
prev_funding_outpoint . write ( writer ) ? ;
2018-12-20 15:36:02 -05:00
prev_htlc_id . write ( writer ) ? ;
forward_info . write ( writer ) ? ;
} ,
2018-12-20 16:15:07 -05:00
& HTLCForwardInfo ::FailHTLC { ref htlc_id , ref err_packet } = > {
1 u8 . write ( writer ) ? ;
htlc_id . write ( writer ) ? ;
err_packet . write ( writer ) ? ;
} ,
2018-12-20 15:36:02 -05:00
}
Ok ( ( ) )
}
}
2020-02-23 23:12:19 -05:00
impl Readable for HTLCForwardInfo {
fn read < R : ::std ::io ::Read > ( reader : & mut R ) -> Result < HTLCForwardInfo , DecodeError > {
match < u8 as Readable > ::read ( reader ) ? {
2018-12-20 15:36:02 -05:00
0 = > Ok ( HTLCForwardInfo ::AddHTLC {
prev_short_channel_id : Readable ::read ( reader ) ? ,
2020-10-22 13:32:50 -04:00
prev_funding_outpoint : Readable ::read ( reader ) ? ,
2018-12-20 15:36:02 -05:00
prev_htlc_id : Readable ::read ( reader ) ? ,
forward_info : Readable ::read ( reader ) ? ,
} ) ,
2018-12-20 16:15:07 -05:00
1 = > Ok ( HTLCForwardInfo ::FailHTLC {
htlc_id : Readable ::read ( reader ) ? ,
err_packet : Readable ::read ( reader ) ? ,
} ) ,
2018-12-20 15:36:02 -05:00
_ = > Err ( DecodeError ::InvalidValue ) ,
}
}
}
2018-10-26 14:35:50 -04:00
2020-11-25 12:23:37 -05:00
impl < ChanSigner : ChannelKeys , M : Deref , T : Deref , K : Deref , F : Deref , L : Deref > Writeable for ChannelManager < ChanSigner , M , T , K , F , L >
2020-07-20 17:03:52 -07:00
where M ::Target : chain ::Watch < Keys = ChanSigner > ,
2020-02-20 14:14:12 -05:00
T ::Target : BroadcasterInterface ,
2020-02-26 16:00:26 -05:00
K ::Target : KeysInterface < ChanKeySigner = ChanSigner > ,
2020-02-27 11:33:03 -05:00
F ::Target : FeeEstimator ,
2020-03-02 12:55:53 -05:00
L ::Target : Logger ,
2020-02-20 14:14:12 -05:00
{
2018-10-26 14:35:50 -04:00
fn write < W : Writer > ( & self , writer : & mut W ) -> Result < ( ) , ::std ::io ::Error > {
2020-10-02 12:51:25 -04:00
let _consistency_lock = self . total_consistency_lock . write ( ) . unwrap ( ) ;
2018-10-26 14:35:50 -04:00
writer . write_all ( & [ SERIALIZATION_VERSION ; 1 ] ) ? ;
writer . write_all ( & [ MIN_SERIALIZATION_VERSION ; 1 ] ) ? ;
self . genesis_hash . write ( writer ) ? ;
( self . latest_block_height . load ( Ordering ::Acquire ) as u32 ) . write ( writer ) ? ;
self . last_block_hash . lock ( ) . unwrap ( ) . write ( writer ) ? ;
let channel_state = self . channel_state . lock ( ) . unwrap ( ) ;
let mut unfunded_channels = 0 ;
for ( _ , channel ) in channel_state . by_id . iter ( ) {
if ! channel . is_funding_initiated ( ) {
unfunded_channels + = 1 ;
}
}
( ( channel_state . by_id . len ( ) - unfunded_channels ) as u64 ) . write ( writer ) ? ;
for ( _ , channel ) in channel_state . by_id . iter ( ) {
if channel . is_funding_initiated ( ) {
channel . write ( writer ) ? ;
}
}
( channel_state . forward_htlcs . len ( ) as u64 ) . write ( writer ) ? ;
for ( short_channel_id , pending_forwards ) in channel_state . forward_htlcs . iter ( ) {
short_channel_id . write ( writer ) ? ;
( pending_forwards . len ( ) as u64 ) . write ( writer ) ? ;
for forward in pending_forwards {
forward . write ( writer ) ? ;
}
}
( channel_state . claimable_htlcs . len ( ) as u64 ) . write ( writer ) ? ;
for ( payment_hash , previous_hops ) in channel_state . claimable_htlcs . iter ( ) {
payment_hash . write ( writer ) ? ;
( previous_hops . len ( ) as u64 ) . write ( writer ) ? ;
2020-01-01 20:20:42 -05:00
for htlc in previous_hops . iter ( ) {
htlc . write ( writer ) ? ;
2018-10-26 14:35:50 -04:00
}
}
2019-12-29 14:22:43 -05:00
let per_peer_state = self . per_peer_state . write ( ) . unwrap ( ) ;
( per_peer_state . len ( ) as u64 ) . write ( writer ) ? ;
for ( peer_pubkey , peer_state_mutex ) in per_peer_state . iter ( ) {
peer_pubkey . write ( writer ) ? ;
let peer_state = peer_state_mutex . lock ( ) . unwrap ( ) ;
peer_state . latest_features . write ( writer ) ? ;
}
2020-05-06 21:02:44 -04:00
let events = self . pending_events . lock ( ) . unwrap ( ) ;
( events . len ( ) as u64 ) . write ( writer ) ? ;
for event in events . iter ( ) {
event . write ( writer ) ? ;
}
2020-01-02 20:32:37 -05:00
( self . last_node_announcement_serial . load ( Ordering ::Acquire ) as u32 ) . write ( writer ) ? ;
2018-10-26 14:35:50 -04:00
Ok ( ( ) )
}
}
/// Arguments for the creation of a ChannelManager that are not deserialized.
///
/// At a high-level, the process for deserializing a ChannelManager and resuming normal operation
/// is:
/// 1) Deserialize all stored ChannelMonitors.
/// 2) Deserialize the ChannelManager by filling in this struct and calling <(Sha256dHash,
/// ChannelManager)>::read(reader, args).
/// This may result in closing some Channels if the ChannelMonitor is newer than the stored
/// ChannelManager state to ensure no loss of funds. Thus, transactions may be broadcasted.
/// 3) Register all relevant ChannelMonitor outpoints with your chain watch mechanism using
Drop the redundant/broken `ChannelMonitor::get_monitored_outpoints`
In review of the final doc changes in #649, I noticed there
appeared to be redundant monitored-outpoints function in
`ChannelMonitor` - `get_monitored_outpoints()` and
`get_outputs_to_watch()`.
In 6f08779b0439e7e4367a75f4ee88de093dfb68cb,
get_monitored_outpoints() was added, with its behavior largely the
same as today's - only returning the set of remote commitment txn
outputs that we've learned about on-chain. This is clearly not
sufficient, and in 73dce207dd0ea6c3ac57af3ebb8b87ee03e82c9e,
`get_outputs_to_watch` was added which was overly cautious to
ensure nothing was missed. Still, the author of 73dce207dd0ea6c3ac5
(me) seemed entirely unaware of the work in 6f08779b0439e7e4367a75f
(also me), despite the function being the literal next function in
the same file. This is presumably because it was assumed that
`get_monitored_outpoints` referred to oupoints for which we should
monitor for spends of (which is true), while `get_outputs_to_watch`
referred to outpouts which we should monitor for the transaction
containing said output (which is not true), or something of that
nature. Specifically, it is the expected behavior that the only
time we care about `Filter::register_tx` is for the funding
transaction (which we aren't aware of the inputs of), but for all
other transactions we register interest on the basis of an outpoint
in the previous transaction (ie via `Filter::register_output`).
Here we drop the broken-on-day-one `get_monitored_outpoints()`
version, but assert in testing that the values which it would return
are all present in `get_outputs_to_watch()`.
2020-09-27 17:52:09 -04:00
/// ChannelMonitor::get_outputs_to_watch() and ChannelMonitor::get_funding_txo().
2018-10-26 14:35:50 -04:00
/// 4) Reconnect blocks on your ChannelMonitors.
2020-07-20 17:03:52 -07:00
/// 5) Move the ChannelMonitors into your local chain::Watch.
2018-10-26 14:35:50 -04:00
/// 6) Disconnect/connect blocks on the ChannelManager.
2020-03-02 12:55:53 -05:00
pub struct ChannelManagerReadArgs < ' a , ChanSigner : ' a + ChannelKeys , M : Deref , T : Deref , K : Deref , F : Deref , L : Deref >
2020-07-20 17:03:52 -07:00
where M ::Target : chain ::Watch < Keys = ChanSigner > ,
2020-02-20 14:14:12 -05:00
T ::Target : BroadcasterInterface ,
2020-02-26 16:00:26 -05:00
K ::Target : KeysInterface < ChanKeySigner = ChanSigner > ,
2020-02-27 11:33:03 -05:00
F ::Target : FeeEstimator ,
2020-03-02 12:55:53 -05:00
L ::Target : Logger ,
2020-02-20 14:14:12 -05:00
{
2018-10-26 14:35:50 -04:00
/// The keys provider which will give us relevant keys. Some keys will be loaded during
2020-11-25 16:18:12 -05:00
/// deserialization and KeysInterface::read_chan_signer will be used to read per-Channel
/// signing data.
2020-02-26 16:00:26 -05:00
pub keys_manager : K ,
2018-10-26 14:35:50 -04:00
/// The fee_estimator for use in the ChannelManager in the future.
///
/// No calls to the FeeEstimator will be made during deserialization.
2020-02-27 11:33:03 -05:00
pub fee_estimator : F ,
2020-07-20 17:03:52 -07:00
/// The chain::Watch for use in the ChannelManager in the future.
2018-10-26 14:35:50 -04:00
///
2020-07-20 17:03:52 -07:00
/// No calls to the chain::Watch will be made during deserialization. It is assumed that
2018-10-26 14:35:50 -04:00
/// you have deserialized ChannelMonitors separately and will add them to your
2020-07-20 17:03:52 -07:00
/// chain::Watch after deserializing this ChannelManager.
pub chain_monitor : M ,
2019-11-08 20:12:13 -05:00
2018-10-26 14:35:50 -04:00
/// The BroadcasterInterface which will be used in the ChannelManager in the future and may be
/// used to broadcast the latest local commitment transactions of channels which must be
/// force-closed during deserialization.
2020-02-20 14:14:12 -05:00
pub tx_broadcaster : T ,
2018-10-26 14:35:50 -04:00
/// The Logger for use in the ChannelManager and which may be used to log information during
/// deserialization.
2020-03-02 12:55:53 -05:00
pub logger : L ,
2018-10-31 14:51:39 -04:00
/// Default settings used for new channels. Any existing channels will continue to use the
/// runtime settings which were stored when the ChannelManager was serialized.
pub default_config : UserConfig ,
2018-10-26 14:35:50 -04:00
/// A map from channel funding outpoints to ChannelMonitors for those channels (ie
/// value.get_funding_txo() should be the key).
///
/// If a monitor is inconsistent with the channel state during deserialization the channel will
2019-01-24 16:41:51 +02:00
/// be force-closed using the data in the ChannelMonitor and the channel will be dropped. This
2018-10-26 14:35:50 -04:00
/// is true for missing channels as well. If there is a monitor missing for which we find
/// channel data Err(DecodeError::InvalidValue) will be returned.
///
/// In such cases the latest local transactions will be sent to the tx_broadcaster included in
/// this struct.
2020-05-12 13:48:07 -04:00
///
/// (C-not exported) because we have no HashMap bindings
2020-08-07 16:27:26 -04:00
pub channel_monitors : HashMap < OutPoint , & ' a mut ChannelMonitor < ChanSigner > > ,
}
impl < ' a , ChanSigner : ' a + ChannelKeys , M : Deref , T : Deref , K : Deref , F : Deref , L : Deref >
ChannelManagerReadArgs < ' a , ChanSigner , M , T , K , F , L >
2020-07-20 17:03:52 -07:00
where M ::Target : chain ::Watch < Keys = ChanSigner > ,
2020-08-07 16:27:26 -04:00
T ::Target : BroadcasterInterface ,
K ::Target : KeysInterface < ChanKeySigner = ChanSigner > ,
F ::Target : FeeEstimator ,
L ::Target : Logger ,
{
/// Simple utility function to create a ChannelManagerReadArgs which creates the monitor
/// HashMap for you. This is primarily useful for C bindings where it is not practical to
/// populate a HashMap directly from C.
2020-07-20 17:03:52 -07:00
pub fn new ( keys_manager : K , fee_estimator : F , chain_monitor : M , tx_broadcaster : T , logger : L , default_config : UserConfig ,
2020-08-07 16:27:26 -04:00
mut channel_monitors : Vec < & ' a mut ChannelMonitor < ChanSigner > > ) -> Self {
Self {
2020-07-20 17:03:52 -07:00
keys_manager , fee_estimator , chain_monitor , tx_broadcaster , logger , default_config ,
2020-08-07 16:27:26 -04:00
channel_monitors : channel_monitors . drain ( .. ) . map ( | monitor | { ( monitor . get_funding_txo ( ) . 0 , monitor ) } ) . collect ( )
}
}
2018-10-26 14:35:50 -04:00
}
2020-01-27 10:38:13 -05:00
// Implement ReadableArgs for an Arc'd ChannelManager to make it a bit easier to work with the
// SipmleArcChannelManager type:
2020-11-25 16:18:12 -05:00
impl < ' a , ChanSigner : ChannelKeys , M : Deref , T : Deref , K : Deref , F : Deref , L : Deref >
2020-03-02 12:55:53 -05:00
ReadableArgs < ChannelManagerReadArgs < ' a , ChanSigner , M , T , K , F , L > > for ( BlockHash , Arc < ChannelManager < ChanSigner , M , T , K , F , L > > )
2020-07-20 17:03:52 -07:00
where M ::Target : chain ::Watch < Keys = ChanSigner > ,
2020-01-27 10:38:13 -05:00
T ::Target : BroadcasterInterface ,
K ::Target : KeysInterface < ChanKeySigner = ChanSigner > ,
F ::Target : FeeEstimator ,
2020-03-02 12:55:53 -05:00
L ::Target : Logger ,
2020-01-27 10:38:13 -05:00
{
2020-03-02 12:55:53 -05:00
fn read < R : ::std ::io ::Read > ( reader : & mut R , args : ChannelManagerReadArgs < ' a , ChanSigner , M , T , K , F , L > ) -> Result < Self , DecodeError > {
let ( blockhash , chan_manager ) = < ( BlockHash , ChannelManager < ChanSigner , M , T , K , F , L > ) > ::read ( reader , args ) ? ;
2020-01-27 10:38:13 -05:00
Ok ( ( blockhash , Arc ::new ( chan_manager ) ) )
}
}
2020-11-25 16:18:12 -05:00
impl < ' a , ChanSigner : ChannelKeys , M : Deref , T : Deref , K : Deref , F : Deref , L : Deref >
2020-03-02 12:55:53 -05:00
ReadableArgs < ChannelManagerReadArgs < ' a , ChanSigner , M , T , K , F , L > > for ( BlockHash , ChannelManager < ChanSigner , M , T , K , F , L > )
2020-07-20 17:03:52 -07:00
where M ::Target : chain ::Watch < Keys = ChanSigner > ,
2020-02-20 14:14:12 -05:00
T ::Target : BroadcasterInterface ,
2020-02-26 16:00:26 -05:00
K ::Target : KeysInterface < ChanKeySigner = ChanSigner > ,
2020-02-27 11:33:03 -05:00
F ::Target : FeeEstimator ,
2020-03-02 12:55:53 -05:00
L ::Target : Logger ,
2020-02-20 14:14:12 -05:00
{
2020-08-07 16:27:26 -04:00
fn read < R : ::std ::io ::Read > ( reader : & mut R , mut args : ChannelManagerReadArgs < ' a , ChanSigner , M , T , K , F , L > ) -> Result < Self , DecodeError > {
2018-10-26 14:35:50 -04:00
let _ver : u8 = Readable ::read ( reader ) ? ;
let min_ver : u8 = Readable ::read ( reader ) ? ;
if min_ver > SERIALIZATION_VERSION {
return Err ( DecodeError ::UnknownVersion ) ;
}
2020-04-27 17:53:13 +02:00
let genesis_hash : BlockHash = Readable ::read ( reader ) ? ;
2018-10-26 14:35:50 -04:00
let latest_block_height : u32 = Readable ::read ( reader ) ? ;
2020-04-27 17:53:13 +02:00
let last_block_hash : BlockHash = Readable ::read ( reader ) ? ;
2018-10-26 14:35:50 -04:00
2020-03-18 16:30:05 -04:00
let mut failed_htlcs = Vec ::new ( ) ;
2018-10-26 14:35:50 -04:00
let channel_count : u64 = Readable ::read ( reader ) ? ;
let mut funding_txo_set = HashSet ::with_capacity ( cmp ::min ( channel_count as usize , 128 ) ) ;
let mut by_id = HashMap ::with_capacity ( cmp ::min ( channel_count as usize , 128 ) ) ;
let mut short_to_id = HashMap ::with_capacity ( cmp ::min ( channel_count as usize , 128 ) ) ;
for _ in 0 .. channel_count {
2020-11-25 16:18:12 -05:00
let mut channel : Channel < ChanSigner > = Channel ::read ( reader , & args . keys_manager ) ? ;
2020-02-15 12:12:50 -05:00
if channel . last_block_connected ! = Default ::default ( ) & & channel . last_block_connected ! = last_block_hash {
2018-10-26 14:35:50 -04:00
return Err ( DecodeError ::InvalidValue ) ;
}
2020-02-06 00:03:32 -05:00
let funding_txo = channel . get_funding_txo ( ) . ok_or ( DecodeError ::InvalidValue ) ? ;
2018-10-26 14:35:50 -04:00
funding_txo_set . insert ( funding_txo . clone ( ) ) ;
2019-12-13 01:58:08 -05:00
if let Some ( ref mut monitor ) = args . channel_monitors . get_mut ( & funding_txo ) {
2020-09-06 19:51:21 -04:00
if channel . get_cur_holder_commitment_transaction_number ( ) < monitor . get_cur_holder_commitment_number ( ) | |
channel . get_revoked_counterparty_commitment_transaction_number ( ) < monitor . get_min_seen_secret ( ) | |
channel . get_cur_counterparty_commitment_transaction_number ( ) < monitor . get_cur_counterparty_commitment_number ( ) | |
2020-03-19 19:15:06 -04:00
channel . get_latest_monitor_update_id ( ) > monitor . get_latest_update_id ( ) {
// If the channel is ahead of the monitor, return InvalidValue:
return Err ( DecodeError ::InvalidValue ) ;
2020-09-06 19:51:21 -04:00
} else if channel . get_cur_holder_commitment_transaction_number ( ) > monitor . get_cur_holder_commitment_number ( ) | |
channel . get_revoked_counterparty_commitment_transaction_number ( ) > monitor . get_min_seen_secret ( ) | |
channel . get_cur_counterparty_commitment_transaction_number ( ) > monitor . get_cur_counterparty_commitment_number ( ) | |
2020-03-19 19:15:06 -04:00
channel . get_latest_monitor_update_id ( ) < monitor . get_latest_update_id ( ) {
// But if the channel is behind of the monitor, close the channel:
2020-03-18 16:30:05 -04:00
let ( _ , _ , mut new_failed_htlcs ) = channel . force_shutdown ( true ) ;
failed_htlcs . append ( & mut new_failed_htlcs ) ;
2020-09-06 19:51:21 -04:00
monitor . broadcast_latest_holder_commitment_txn ( & args . tx_broadcaster , & args . logger ) ;
2018-10-26 14:35:50 -04:00
} else {
if let Some ( short_channel_id ) = channel . get_short_channel_id ( ) {
short_to_id . insert ( short_channel_id , channel . channel_id ( ) ) ;
}
by_id . insert ( channel . channel_id ( ) , channel ) ;
}
} else {
return Err ( DecodeError ::InvalidValue ) ;
}
}
2019-12-13 01:58:08 -05:00
for ( ref funding_txo , ref mut monitor ) in args . channel_monitors . iter_mut ( ) {
2018-10-26 14:35:50 -04:00
if ! funding_txo_set . contains ( funding_txo ) {
2020-09-06 19:51:21 -04:00
monitor . broadcast_latest_holder_commitment_txn ( & args . tx_broadcaster , & args . logger ) ;
2018-10-26 14:35:50 -04:00
}
}
2020-05-06 21:02:44 -04:00
const MAX_ALLOC_SIZE : usize = 1024 * 64 ;
2018-10-26 14:35:50 -04:00
let forward_htlcs_count : u64 = Readable ::read ( reader ) ? ;
let mut forward_htlcs = HashMap ::with_capacity ( cmp ::min ( forward_htlcs_count as usize , 128 ) ) ;
for _ in 0 .. forward_htlcs_count {
let short_channel_id = Readable ::read ( reader ) ? ;
let pending_forwards_count : u64 = Readable ::read ( reader ) ? ;
2020-05-06 21:02:44 -04:00
let mut pending_forwards = Vec ::with_capacity ( cmp ::min ( pending_forwards_count as usize , MAX_ALLOC_SIZE / mem ::size_of ::< HTLCForwardInfo > ( ) ) ) ;
2018-10-26 14:35:50 -04:00
for _ in 0 .. pending_forwards_count {
pending_forwards . push ( Readable ::read ( reader ) ? ) ;
}
forward_htlcs . insert ( short_channel_id , pending_forwards ) ;
}
let claimable_htlcs_count : u64 = Readable ::read ( reader ) ? ;
let mut claimable_htlcs = HashMap ::with_capacity ( cmp ::min ( claimable_htlcs_count as usize , 128 ) ) ;
for _ in 0 .. claimable_htlcs_count {
let payment_hash = Readable ::read ( reader ) ? ;
let previous_hops_len : u64 = Readable ::read ( reader ) ? ;
2020-05-06 21:02:44 -04:00
let mut previous_hops = Vec ::with_capacity ( cmp ::min ( previous_hops_len as usize , MAX_ALLOC_SIZE / mem ::size_of ::< ClaimableHTLC > ( ) ) ) ;
2018-10-26 14:35:50 -04:00
for _ in 0 .. previous_hops_len {
2020-01-01 20:20:42 -05:00
previous_hops . push ( Readable ::read ( reader ) ? ) ;
2018-10-26 14:35:50 -04:00
}
claimable_htlcs . insert ( payment_hash , previous_hops ) ;
}
2019-12-29 14:22:43 -05:00
let peer_count : u64 = Readable ::read ( reader ) ? ;
2020-05-06 21:02:44 -04:00
let mut per_peer_state = HashMap ::with_capacity ( cmp ::min ( peer_count as usize , MAX_ALLOC_SIZE / mem ::size_of ::< ( PublicKey , Mutex < PeerState > ) > ( ) ) ) ;
2019-12-29 14:22:43 -05:00
for _ in 0 .. peer_count {
let peer_pubkey = Readable ::read ( reader ) ? ;
let peer_state = PeerState {
latest_features : Readable ::read ( reader ) ? ,
} ;
per_peer_state . insert ( peer_pubkey , Mutex ::new ( peer_state ) ) ;
}
2020-05-06 21:02:44 -04:00
let event_count : u64 = Readable ::read ( reader ) ? ;
let mut pending_events_read : Vec < events ::Event > = Vec ::with_capacity ( cmp ::min ( event_count as usize , MAX_ALLOC_SIZE / mem ::size_of ::< events ::Event > ( ) ) ) ;
for _ in 0 .. event_count {
match MaybeReadable ::read ( reader ) ? {
Some ( event ) = > pending_events_read . push ( event ) ,
None = > continue ,
}
}
2020-01-02 20:32:37 -05:00
let last_node_announcement_serial : u32 = Readable ::read ( reader ) ? ;
2018-10-26 14:35:50 -04:00
let channel_manager = ChannelManager {
genesis_hash ,
fee_estimator : args . fee_estimator ,
2020-07-20 17:03:52 -07:00
chain_monitor : args . chain_monitor ,
2018-10-26 14:35:50 -04:00
tx_broadcaster : args . tx_broadcaster ,
latest_block_height : AtomicUsize ::new ( latest_block_height as usize ) ,
last_block_hash : Mutex ::new ( last_block_hash ) ,
secp_ctx : Secp256k1 ::new ( ) ,
channel_state : Mutex ::new ( ChannelHolder {
by_id ,
short_to_id ,
forward_htlcs ,
claimable_htlcs ,
pending_msg_events : Vec ::new ( ) ,
} ) ,
our_network_key : args . keys_manager . get_node_secret ( ) ,
2020-01-02 20:32:37 -05:00
last_node_announcement_serial : AtomicUsize ::new ( last_node_announcement_serial as usize ) ,
2019-12-29 14:22:43 -05:00
per_peer_state : RwLock ::new ( per_peer_state ) ,
2020-05-06 21:02:44 -04:00
pending_events : Mutex ::new ( pending_events_read ) ,
2018-10-26 14:35:50 -04:00
total_consistency_lock : RwLock ::new ( ( ) ) ,
2020-11-19 12:53:16 -05:00
persistence_notifier : PersistenceNotifier ::new ( ) ,
2018-10-26 14:35:50 -04:00
keys_manager : args . keys_manager ,
logger : args . logger ,
2018-10-31 14:51:39 -04:00
default_configuration : args . default_config ,
2018-10-26 14:35:50 -04:00
} ;
2020-03-18 16:30:05 -04:00
for htlc_source in failed_htlcs . drain ( .. ) {
channel_manager . fail_htlc_backwards_internal ( channel_manager . channel_state . lock ( ) . unwrap ( ) , htlc_source . 0 , & htlc_source . 1 , HTLCFailReason ::Reason { failure_code : 0x4000 | 8 , data : Vec ::new ( ) } ) ;
2018-10-26 14:35:50 -04:00
}
2020-03-18 16:30:05 -04:00
//TODO: Broadcast channel update for closed channels, but only after we've made a
//connection or two.
2018-10-26 14:35:50 -04:00
Ok ( ( last_block_hash . clone ( ) , channel_manager ) )
}
}
2020-11-19 12:53:16 -05:00
#[ cfg(test) ]
mod tests {
use ln ::channelmanager ::PersistenceNotifier ;
use std ::sync ::Arc ;
use std ::sync ::atomic ::{ AtomicBool , Ordering } ;
use std ::thread ;
use std ::time ::Duration ;
#[ test ]
fn test_wait_timeout ( ) {
let persistence_notifier = Arc ::new ( PersistenceNotifier ::new ( ) ) ;
let thread_notifier = Arc ::clone ( & persistence_notifier ) ;
let exit_thread = Arc ::new ( AtomicBool ::new ( false ) ) ;
let exit_thread_clone = exit_thread . clone ( ) ;
thread ::spawn ( move | | {
loop {
let & ( ref persist_mtx , ref cnd ) = & thread_notifier . persistence_lock ;
let mut persistence_lock = persist_mtx . lock ( ) . unwrap ( ) ;
* persistence_lock = true ;
cnd . notify_all ( ) ;
if exit_thread_clone . load ( Ordering ::SeqCst ) {
break
}
}
} ) ;
// Check that we can block indefinitely until updates are available.
let _ = persistence_notifier . wait ( ) ;
// Check that the PersistenceNotifier will return after the given duration if updates are
// available.
loop {
if persistence_notifier . wait_timeout ( Duration ::from_millis ( 100 ) ) {
break
}
}
exit_thread . store ( true , Ordering ::SeqCst ) ;
// Check that the PersistenceNotifier will return after the given duration even if no updates
// are available.
loop {
if ! persistence_notifier . wait_timeout ( Duration ::from_millis ( 100 ) ) {
break
}
}
}
}