Merge pull request #349 from ariard/2019-07-data_loss

Implement option_data_loss_protect on both sides
This commit is contained in:
Matt Corallo 2019-08-06 21:12:57 +00:00 committed by GitHub
commit 56f16eaa4d
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
6 changed files with 272 additions and 31 deletions

View file

@ -16,7 +16,7 @@ use secp256k1::{Secp256k1,Signature};
use secp256k1;
use ln::msgs;
use ln::msgs::{DecodeError, OptionalField, LocalFeatures};
use ln::msgs::{DecodeError, OptionalField, LocalFeatures, DataLossProtect};
use ln::channelmonitor::ChannelMonitor;
use ln::channelmanager::{PendingHTLCStatus, HTLCSource, HTLCFailReason, HTLCFailureMsg, PendingForwardHTLCInfo, RAACommitmentOrder, PaymentPreimage, PaymentHash, BREAKDOWN_TIMEOUT, MAX_LOCAL_BREAKDOWN_TIMEOUT};
use ln::chan_utils::{TxCreationKeys,HTLCOutputInCommitment,HTLC_SUCCESS_TX_WEIGHT,HTLC_TIMEOUT_TX_WEIGHT};
@ -32,7 +32,7 @@ use util::config::{UserConfig,ChannelConfig};
use std;
use std::default::Default;
use std::{cmp,mem};
use std::{cmp,mem,fmt};
use std::sync::{Arc};
#[cfg(test)]
@ -366,10 +366,23 @@ pub const OFFERED_HTLC_SCRIPT_WEIGHT: usize = 133;
/// Used to return a simple Error back to ChannelManager. Will get converted to a
/// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
/// channel_id in ChannelManager.
#[derive(Debug)]
pub(super) enum ChannelError {
Ignore(&'static str),
Close(&'static str),
CloseDelayBroadcast {
msg: &'static str,
update: Option<ChannelMonitor>
},
}
impl fmt::Debug for ChannelError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
&ChannelError::Ignore(e) => write!(f, "Ignore : {}", e),
&ChannelError::Close(e) => write!(f, "Close : {}", e),
&ChannelError::CloseDelayBroadcast { msg, .. } => write!(f, "CloseDelayBroadcast : {}", msg)
}
}
}
macro_rules! secp_check {
@ -2499,6 +2512,22 @@ impl Channel {
return Err(ChannelError::Close("Peer sent a garbage channel_reestablish"));
}
if msg.next_remote_commitment_number > 0 {
match msg.data_loss_protect {
OptionalField::Present(ref data_loss) => {
if chan_utils::build_commitment_secret(self.local_keys.commitment_seed, INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1) != data_loss.your_last_per_commitment_secret {
return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided"));
}
if msg.next_remote_commitment_number > INITIAL_COMMITMENT_NUMBER - self.cur_local_commitment_transaction_number {
self.channel_monitor.provide_rescue_remote_commitment_tx_info(data_loss.my_current_per_commitment_point);
return Err(ChannelError::CloseDelayBroadcast { msg: "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can't do any automated broadcasting", update: Some(self.channel_monitor.clone())
});
}
},
OptionalField::Absent => {}
}
}
// Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
// remaining cases either succeed or ErrorMessage-fail).
self.channel_state &= !(ChannelState::PeerDisconnected as u32);
@ -2575,7 +2604,7 @@ impl Channel {
// now!
match self.free_holding_cell_htlcs() {
Err(ChannelError::Close(msg)) => return Err(ChannelError::Close(msg)),
Err(ChannelError::Ignore(_)) => panic!("Got non-channel-failing result from free_holding_cell_htlcs"),
Err(ChannelError::Ignore(_)) | Err(ChannelError::CloseDelayBroadcast { .. }) => panic!("Got non-channel-failing result from free_holding_cell_htlcs"),
Ok(Some((commitment_update, channel_monitor))) => return Ok((resend_funding_locked, required_revoke, Some(commitment_update), Some(channel_monitor), self.resend_order.clone(), shutdown_msg)),
Ok(None) => return Ok((resend_funding_locked, required_revoke, None, None, self.resend_order.clone(), shutdown_msg)),
}
@ -3255,6 +3284,20 @@ impl Channel {
pub fn get_channel_reestablish(&self) -> msgs::ChannelReestablish {
assert_eq!(self.channel_state & ChannelState::PeerDisconnected as u32, ChannelState::PeerDisconnected as u32);
assert_ne!(self.cur_remote_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
let data_loss_protect = if self.cur_remote_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
let remote_last_secret = self.channel_monitor.get_secret(self.cur_remote_commitment_transaction_number + 2).unwrap();
log_trace!(self, "Enough info to generate a Data Loss Protect with per_commitment_secret {}", log_bytes!(remote_last_secret));
OptionalField::Present(DataLossProtect {
your_last_per_commitment_secret: remote_last_secret,
my_current_per_commitment_point: PublicKey::from_secret_key(&self.secp_ctx, &self.build_local_commitment_secret(self.cur_local_commitment_transaction_number + 1))
})
} else {
log_debug!(self, "We don't seen yet any revoked secret, if this channnel has already been updated it means we are fallen-behind, you should wait for other peer closing");
OptionalField::Present(DataLossProtect {
your_last_per_commitment_secret: [0;32],
my_current_per_commitment_point: PublicKey::from_secret_key(&self.secp_ctx, &self.build_local_commitment_secret(self.cur_local_commitment_transaction_number))
})
};
msgs::ChannelReestablish {
channel_id: self.channel_id(),
// The protocol has two different commitment number concepts - the "commitment
@ -3275,7 +3318,7 @@ impl Channel {
// dropped this channel on disconnect as it hasn't yet reached FundingSent so we can't
// overflow here.
next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.cur_remote_commitment_transaction_number - 1,
data_loss_protect: OptionalField::Absent,
data_loss_protect,
}
}

View file

@ -208,6 +208,15 @@ impl MsgHandleErrInternal {
},
}),
},
ChannelError::CloseDelayBroadcast { msg, .. } => HandleError {
err: msg,
action: Some(msgs::ErrorAction::SendErrorMessage {
msg: msgs::ErrorMessage {
channel_id,
data: msg.to_string()
},
}),
},
},
shutdown_finish: None,
}
@ -447,6 +456,7 @@ macro_rules! break_chan_entry {
}
break Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok()))
},
Err(ChannelError::CloseDelayBroadcast { .. }) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); }
}
}
}
@ -466,6 +476,31 @@ macro_rules! try_chan_entry {
}
return Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok()))
},
Err(ChannelError::CloseDelayBroadcast { msg, update }) => {
log_error!($self, "Channel {} need to be shutdown but closing transactions not broadcast due to {}", log_bytes!($entry.key()[..]), msg);
let (channel_id, mut chan) = $entry.remove_entry();
if let Some(short_id) = chan.get_short_channel_id() {
$channel_state.short_to_id.remove(&short_id);
}
if let Some(update) = update {
if let Err(e) = $self.monitor.add_update_monitor(update.get_funding_txo().unwrap(), update) {
match e {
// Upstream channel is dead, but we want at least to fail backward HTLCs to save
// downstream channels. In case of PermanentFailure, we are not going to be able
// to claim back to_remote output on remote commitment transaction. Doesn't
// make a difference here, we are concern about HTLCs circuit, not onchain funds.
ChannelMonitorUpdateErr::PermanentFailure => {},
ChannelMonitorUpdateErr::TemporaryFailure => {},
}
}
}
let mut shutdown_res = chan.force_shutdown();
if shutdown_res.0.len() >= 1 {
log_error!($self, "You have a toxic local commitment transaction {} avaible in channel monitor, read comment in ChannelMonitor::get_latest_local_commitment_txn to be informed of manual action to take", shutdown_res.0[0].txid());
}
shutdown_res.0.clear();
return Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, shutdown_res, $self.get_channel_update(&chan).ok()))
}
}
}
}

View file

@ -456,6 +456,10 @@ pub struct ChannelMonitor {
payment_preimages: HashMap<PaymentHash, PaymentPreimage>,
destination_script: Script,
// Thanks to data loss protection, we may be able to claim our non-htlc funds
// back, this is the script we have to spend from but we need to
// scan every commitment transaction for that
to_remote_rescue: Option<(Script, SecretKey)>,
// Used to track outpoint in the process of being claimed by our transactions. We need to scan all transactions
// for inputs spending this. If height timer (u32) is expired and claim tx hasn't reached enough confirmations
@ -535,6 +539,7 @@ impl PartialEq for ChannelMonitor {
self.current_local_signed_commitment_tx != other.current_local_signed_commitment_tx ||
self.payment_preimages != other.payment_preimages ||
self.destination_script != other.destination_script ||
self.to_remote_rescue != other.to_remote_rescue ||
self.our_claim_txn_waiting_first_conf != other.our_claim_txn_waiting_first_conf ||
self.onchain_events_waiting_threshold_conf != other.onchain_events_waiting_threshold_conf
{
@ -585,6 +590,7 @@ impl ChannelMonitor {
payment_preimages: HashMap::new(),
destination_script: destination_script,
to_remote_rescue: None,
our_claim_txn_waiting_first_conf: HashMap::new(),
@ -763,6 +769,22 @@ impl ChannelMonitor {
}
}
pub(super) fn provide_rescue_remote_commitment_tx_info(&mut self, their_revocation_point: PublicKey) {
match self.key_storage {
Storage::Local { ref payment_base_key, .. } => {
if let Ok(payment_key) = chan_utils::derive_public_key(&self.secp_ctx, &their_revocation_point, &PublicKey::from_secret_key(&self.secp_ctx, &payment_base_key)) {
let to_remote_script = Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0)
.push_slice(&Hash160::hash(&payment_key.serialize())[..])
.into_script();
if let Ok(to_remote_key) = chan_utils::derive_private_key(&self.secp_ctx, &their_revocation_point, &payment_base_key) {
self.to_remote_rescue = Some((to_remote_script, to_remote_key));
}
}
},
Storage::Watchtower { .. } => {}
}
}
/// Informs this monitor of the latest local (ie broadcastable) commitment transaction. The
/// monitor watches for timeouts and may broadcast it if we approach such a timeout. Thus, it
/// is important that any clones of this channel monitor (including remote clones) by kept
@ -852,6 +874,7 @@ impl ChannelMonitor {
self.current_local_signed_commitment_tx = Some(local_tx);
}
self.payment_preimages = other.payment_preimages;
self.to_remote_rescue = other.to_remote_rescue;
}
self.current_remote_commitment_number = cmp::min(self.current_remote_commitment_number, other.current_remote_commitment_number);
@ -1105,6 +1128,13 @@ impl ChannelMonitor {
self.last_block_hash.write(writer)?;
self.destination_script.write(writer)?;
if let Some((ref to_remote_script, ref local_key)) = self.to_remote_rescue {
writer.write_all(&[1; 1])?;
to_remote_script.write(writer)?;
local_key.write(writer)?;
} else {
writer.write_all(&[0; 1])?;
}
writer.write_all(&byte_utils::be64_to_array(self.our_claim_txn_waiting_first_conf.len() as u64))?;
for (ref outpoint, claim_tx_data) in self.our_claim_txn_waiting_first_conf.iter() {
@ -1733,6 +1763,16 @@ impl ChannelMonitor {
txn_to_broadcast.push(spend_tx);
}
}
} else if let Some((ref to_remote_rescue, ref local_key)) = self.to_remote_rescue {
for (idx, outp) in tx.output.iter().enumerate() {
if to_remote_rescue == &outp.script_pubkey {
spendable_outputs.push(SpendableOutputDescriptor::DynamicOutputP2WPKH {
outpoint: BitcoinOutPoint { txid: commitment_txid, vout: idx as u32 },
key: local_key.clone(),
output: outp.clone(),
});
}
}
}
(txn_to_broadcast, (commitment_txid, watch_outputs), spendable_outputs)
@ -2048,9 +2088,16 @@ impl ChannelMonitor {
None
}
/// Used by ChannelManager deserialization to broadcast the latest local state if it's copy of
/// the Channel was out-of-date.
pub(super) fn get_latest_local_commitment_txn(&self) -> Vec<Transaction> {
/// Used by ChannelManager deserialization to broadcast the latest local state if its copy of
/// the Channel was out-of-date. You may use it to get a broadcastable local toxic tx in case of
/// fallen-behind, i.e when receiving a channel_reestablish with a proof that our remote side knows
/// a higher revocation secret than the local commitment number we are aware of. Broadcasting these
/// transactions are UNSAFE, as they allow remote side to punish you. Nevertheless you may want to
/// broadcast them if remote don't close channel with his higher commitment transaction after a
/// substantial amount of time (a month or even a year) to get back funds. Best may be to contact
/// out-of-band the other node operator to coordinate with him if option is available to you.
/// In any-case, choice is up to the user.
pub fn get_latest_local_commitment_txn(&self) -> Vec<Transaction> {
if let &Some(ref local_tx) = &self.current_local_signed_commitment_tx {
let mut res = vec![local_tx.tx.clone()];
match self.key_storage {
@ -2088,19 +2135,21 @@ impl ChannelMonitor {
}
};
if funding_txo.is_none() || (prevout.txid == funding_txo.as_ref().unwrap().0.txid && prevout.vout == funding_txo.as_ref().unwrap().0.index as u32) {
let (remote_txn, new_outputs, mut spendable_output) = self.check_spend_remote_transaction(tx, height, fee_estimator);
txn = remote_txn;
spendable_outputs.append(&mut spendable_output);
if !new_outputs.1.is_empty() {
watch_outputs.push(new_outputs);
}
if txn.is_empty() {
let (local_txn, mut spendable_output, new_outputs) = self.check_spend_local_transaction(tx, height);
if (tx.input[0].sequence >> 8*3) as u8 == 0x80 && (tx.lock_time >> 8*3) as u8 == 0x20 {
let (remote_txn, new_outputs, mut spendable_output) = self.check_spend_remote_transaction(tx, height, fee_estimator);
txn = remote_txn;
spendable_outputs.append(&mut spendable_output);
txn = local_txn;
if !new_outputs.1.is_empty() {
watch_outputs.push(new_outputs);
}
if txn.is_empty() {
let (local_txn, mut spendable_output, new_outputs) = self.check_spend_local_transaction(tx, height);
spendable_outputs.append(&mut spendable_output);
txn = local_txn;
if !new_outputs.1.is_empty() {
watch_outputs.push(new_outputs);
}
}
}
if !funding_txo.is_none() && txn.is_empty() {
if let Some(spendable_output) = self.check_spend_closing_transaction(tx) {
@ -2627,6 +2676,15 @@ impl<R: ::std::io::Read> ReadableArgs<R, Arc<Logger>> for (Sha256dHash, ChannelM
let last_block_hash: Sha256dHash = Readable::read(reader)?;
let destination_script = Readable::read(reader)?;
let to_remote_rescue = match <u8 as Readable<R>>::read(reader)? {
0 => None,
1 => {
let to_remote_script = Readable::read(reader)?;
let local_key = Readable::read(reader)?;
Some((to_remote_script, local_key))
}
_ => return Err(DecodeError::InvalidValue),
};
let our_claim_txn_waiting_first_conf_len: u64 = Readable::read(reader)?;
let mut our_claim_txn_waiting_first_conf = HashMap::with_capacity(cmp::min(our_claim_txn_waiting_first_conf_len as usize, MAX_ALLOC_SIZE / 128));
@ -2736,6 +2794,7 @@ impl<R: ::std::io::Read> ReadableArgs<R, Arc<Logger>> for (Sha256dHash, ChannelM
payment_preimages,
destination_script,
to_remote_rescue,
our_claim_txn_waiting_first_conf,

View file

@ -3,8 +3,9 @@
//! claim outputs on-chain.
use chain::transaction::OutPoint;
use chain::chaininterface::{ChainListener, ChainWatchInterface};
use chain::chaininterface::{ChainListener, ChainWatchInterface, ChainWatchInterfaceUtil};
use chain::keysinterface::{KeysInterface, SpendableOutputDescriptor, KeysManager};
use chain::keysinterface;
use ln::channel::{COMMITMENT_TX_BASE_WEIGHT, COMMITMENT_TX_WEIGHT_PER_HTLC};
use ln::channelmanager::{ChannelManager,ChannelManagerReadArgs,HTLCForwardInfo,RAACommitmentOrder, PaymentPreimage, PaymentHash, BREAKDOWN_TIMEOUT};
use ln::channelmonitor::{ChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ManyChannelMonitor, ANTI_REORG_DELAY};
@ -18,6 +19,7 @@ use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsPro
use util::errors::APIError;
use util::ser::{Writeable, ReadableArgs};
use util::config::UserConfig;
use util::logger::Logger;
use bitcoin::util::hash::BitcoinHash;
use bitcoin_hashes::sha256d::Hash as Sha256dHash;
@ -39,7 +41,7 @@ use secp256k1::key::{PublicKey,SecretKey};
use std::collections::{BTreeSet, HashMap, HashSet};
use std::default::Default;
use std::sync::Arc;
use std::sync::{Arc, Mutex};
use std::sync::atomic::Ordering;
use std::mem;
@ -5945,3 +5947,113 @@ fn test_user_configurable_csv_delay() {
}
} else { assert!(false); }
}
#[test]
fn test_data_loss_protect() {
// We want to be sure that :
// * we don't broadcast our Local Commitment Tx in case of fallen behind
// * we close channel in case of detecting other being fallen behind
// * we are able to claim our own outputs thanks to remote my_current_per_commitment_point
let mut nodes = create_network(2, &[None, None]);
let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, LocalFeatures::new(), LocalFeatures::new());
// Cache node A state before any channel update
let previous_node_state = nodes[0].node.encode();
let mut previous_chan_monitor_state = test_utils::TestVecWriter(Vec::new());
nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write_for_disk(&mut previous_chan_monitor_state).unwrap();
send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
// Restore node A from previous state
let logger: Arc<Logger> = Arc::new(test_utils::TestLogger::with_id(format!("node {}", 0)));
let chan_monitor = <(Sha256dHash, ChannelMonitor)>::read(&mut ::std::io::Cursor::new(previous_chan_monitor_state.0), Arc::clone(&logger)).unwrap().1;
let chain_monitor = Arc::new(ChainWatchInterfaceUtil::new(Network::Testnet, Arc::clone(&logger)));
let tx_broadcaster = Arc::new(test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new())});
let feeest = Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 });
let monitor = Arc::new(test_utils::TestChannelMonitor::new(chain_monitor.clone(), tx_broadcaster.clone(), logger.clone(), feeest.clone()));
let mut channel_monitors = HashMap::new();
channel_monitors.insert(OutPoint { txid: chan.3.txid(), index: 0 }, &chan_monitor);
let node_state_0 = <(Sha256dHash, ChannelManager)>::read(&mut ::std::io::Cursor::new(previous_node_state), ChannelManagerReadArgs {
keys_manager: Arc::new(keysinterface::KeysManager::new(&nodes[0].node_seed, Network::Testnet, Arc::clone(&logger), 42, 21)),
fee_estimator: feeest.clone(),
monitor: monitor.clone(),
chain_monitor: chain_monitor.clone(),
logger: Arc::clone(&logger),
tx_broadcaster,
default_config: UserConfig::new(),
channel_monitors: &channel_monitors
}).unwrap().1;
nodes[0].node = Arc::new(node_state_0);
monitor.add_update_monitor(OutPoint { txid: chan.3.txid(), index: 0 }, chan_monitor.clone()).is_ok();
nodes[0].chan_monitor = monitor;
nodes[0].chain_monitor = chain_monitor;
check_added_monitors!(nodes[0], 1);
nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id());
nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id());
let reestablish_0 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
// Check we update monitor following learning of per_commitment_point from B
if let Err(err) = nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_0[0]) {
if let Some(error) = err.action {
match error {
ErrorAction::SendErrorMessage { msg } => {
assert_eq!(msg.data, "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can't do any automated broadcasting");
},
_ => panic!("Unexpected event!"),
}
} else { assert!(false); }
} else { assert!(false); }
check_added_monitors!(nodes[0], 1);
{
let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
assert_eq!(node_txn.len(), 0);
}
let mut reestablish_1 = Vec::with_capacity(1);
for msg in nodes[0].node.get_and_clear_pending_msg_events() {
if let MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } = msg {
assert_eq!(*node_id, nodes[1].node.get_our_node_id());
reestablish_1.push(msg.clone());
} else if let MessageSendEvent::BroadcastChannelUpdate { .. } = msg {
} else {
panic!("Unexpected event")
}
}
// Check we close channel detecting A is fallen-behind
if let Err(err) = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]) {
if let Some(error) = err.action {
match error {
ErrorAction::SendErrorMessage { msg } => {
assert_eq!(msg.data, "Peer attempted to reestablish channel with a very old local commitment transaction"); },
_ => panic!("Unexpected event!"),
}
} else { assert!(false); }
} else { assert!(false); }
let events = nodes[1].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
match events[0] {
MessageSendEvent::BroadcastChannelUpdate { .. } => {},
_ => panic!("Unexpected event"),
}
// Check A is able to claim to_remote output
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
assert_eq!(node_txn.len(), 1);
check_spends!(node_txn[0], chan.3.clone());
assert_eq!(node_txn[0].output.len(), 2);
let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn[0].clone()]}, 1);
let spend_txn = check_spendable_outputs!(nodes[0], 1);
assert_eq!(spend_txn.len(), 1);
check_spends!(spend_txn[0], node_txn[0].clone());
}

View file

@ -63,23 +63,19 @@ impl LocalFeatures {
#[cfg(not(feature = "fuzztarget"))]
pub(crate) fn new() -> LocalFeatures {
LocalFeatures {
flags: vec![1 << 5],
flags: vec![2 | 1 << 5],
}
}
#[cfg(feature = "fuzztarget")]
pub fn new() -> LocalFeatures {
LocalFeatures {
flags: vec![1 << 5],
flags: vec![2 | 1 << 5],
}
}
pub(crate) fn supports_data_loss_protect(&self) -> bool {
self.flags.len() > 0 && (self.flags[0] & 3) != 0
}
pub(crate) fn requires_data_loss_protect(&self) -> bool {
self.flags.len() > 0 && (self.flags[0] & 1) != 0
}
pub(crate) fn initial_routing_sync(&self) -> bool {
self.flags.len() > 0 && (self.flags[0] & (1 << 3)) != 0
}
@ -2018,9 +2014,9 @@ mod tests {
target_value.append(&mut hex::decode("0000").unwrap());
}
if initial_routing_sync {
target_value.append(&mut hex::decode("000128").unwrap());
target_value.append(&mut hex::decode("00012a").unwrap());
} else {
target_value.append(&mut hex::decode("000120").unwrap());
target_value.append(&mut hex::decode("000122").unwrap());
}
assert_eq!(encoded_value, target_value);
}

View file

@ -623,10 +623,6 @@ impl<Descriptor: SocketDescriptor> PeerManager<Descriptor> {
log_info!(self, "Peer local features required unknown version bits");
return Err(PeerHandleError{ no_connection_possible: true });
}
if msg.local_features.requires_data_loss_protect() {
log_info!(self, "Peer local features required data_loss_protect");
return Err(PeerHandleError{ no_connection_possible: true });
}
if peer.their_global_features.is_some() {
return Err(PeerHandleError{ no_connection_possible: false });
}