Fix typos

This commit is contained in:
Dimitris Apostolou 2019-01-24 16:41:51 +02:00 committed by rex4539
parent e79b98ddda
commit ab10b05075
No known key found for this signature in database
GPG key ID: 4B5D20E938204A8A
13 changed files with 81 additions and 81 deletions

View file

@ -1,4 +1,4 @@
# see https://editorconfig.org for more options, and setup instructions for yours editor
# see https://editorconfig.org for more options, and setup instructions for your editor
[*]
indent_style = tab

View file

@ -2,7 +2,7 @@
//! blockchain.
//!
//! Includes traits for monitoring and receiving notifications of new blocks and block
//! disconnections, transactio broadcasting, and feerate information requests.
//! disconnections, transaction broadcasting, and feerate information requests.
use bitcoin::blockdata::block::{Block, BlockHeader};
use bitcoin::blockdata::transaction::Transaction;
@ -143,7 +143,7 @@ impl ChainWatchedUtil {
}
#[cfg(not(test))]
{
let _tx_unused = txid; // Its used in cfg(test), though
let _tx_unused = txid; // It's used in cfg(test), though
self.watched_txn.insert(script_pub_key.clone())
}
}
@ -155,7 +155,7 @@ impl ChainWatchedUtil {
self.watched_outpoints.insert(outpoint)
}
/// Sets us to match all transactions, returning true if this is a new setting anf false if
/// Sets us to match all transactions, returning true if this is a new setting and false if
/// we'd already been set to match everything.
pub fn watch_all(&mut self) -> bool {
if self.watch_all { return false; }

View file

@ -4,7 +4,7 @@
//!
//! A full-featured but also flexible lightning implementation, in library form. This allows the
//! user (you) to decide how they wish to use it instead of being a fully self-contained daemon.
//! This means there is no built-in threading/execution environment and its up to the user to
//! This means there is no built-in threading/execution environment and it's up to the user to
//! figure out how best to make networking happen/timers fire/things get written to disk/keys get
//! generated/etc. This makes it a good candidate for tight integration into an existing wallet
//! instead of having a rather-separate lightning appendage to a wallet.

View file

@ -93,14 +93,14 @@ enum OutboundHTLCState {
/// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
/// created it we would have put it in the holding cell instead). When they next revoke_and_ack
/// we will promote to Committed (note that they may not accept it until the next time we
/// revoke, but we dont really care about that:
/// revoke, but we don't really care about that:
/// * they've revoked, so worst case we can announce an old state and get our (option on)
/// money back (though we wont), and,
/// money back (though we won't), and,
/// * we'll send them a revoke when they send a commitment_signed, and since only they're
/// allowed to remove it, the "can only be removed once committed on both sides" requirement
/// doesn't matter to us and its up to them to enforce it, worst-case they jump ahead but
/// doesn't matter to us and it's up to them to enforce it, worst-case they jump ahead but
/// we'll never get out of sync).
/// Note that we Box the OnionPacket as its rather large and we don't want to blow up
/// Note that we Box the OnionPacket as it's rather large and we don't want to blow up
/// OutboundHTLCOutput's size just for a temporary bit
LocalAnnounced(Box<msgs::OnionPacket>),
Committed,
@ -292,7 +292,7 @@ pub(super) struct Channel {
last_sent_closing_fee: Option<(u64, u64)>, // (feerate, fee)
/// The hash of the block in which the funding transaction reached our CONF_TARGET. We use this
/// to detect unconfirmation after a serialize-unserialize roudtrip where we may not see a full
/// to detect unconfirmation after a serialize-unserialize roundtrip where we may not see a full
/// series of block_connected/block_disconnected calls. Obviously this is not a guarantee as we
/// could miss the funding_tx_confirmed_in block as well, but it serves as a useful fallback.
funding_tx_confirmed_in: Option<Sha256dHash>,
@ -551,7 +551,7 @@ impl Channel {
return Err(ChannelError::Close("Bogus; channel reserve is less than dust limit"));
}
if msg.htlc_minimum_msat >= (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000 {
return Err(ChannelError::Close("Miminum htlc value is full channel value"));
return Err(ChannelError::Close("Minimum htlc value is full channel value"));
}
Channel::check_remote_fee(fee_estimator, msg.feerate_per_kw)?;
@ -1113,7 +1113,7 @@ impl Channel {
}
/// Signs a transaction created by build_htlc_transaction. If the transaction is an
/// HTLC-Success transaction (ie htlc.offered is false), preimate must be set!
/// HTLC-Success transaction (ie htlc.offered is false), preimage must be set!
fn sign_htlc_transaction(&self, tx: &mut Transaction, their_sig: &Signature, preimage: &Option<PaymentPreimage>, htlc: &HTLCOutputInCommitment, keys: &TxCreationKeys) -> Result<Signature, ChannelError> {
if tx.input.len() != 1 {
panic!("Tried to sign HTLC transaction that had input count != 1!");
@ -1151,7 +1151,7 @@ impl Channel {
/// In such cases we debug_assert!(false) and return an IgnoreError. Thus, will always return
/// Ok(_) if debug assertions are turned on and preconditions are met.
fn get_update_fulfill_htlc(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage) -> Result<(Option<msgs::UpdateFulfillHTLC>, Option<ChannelMonitor>), ChannelError> {
// Either ChannelFunded got set (which means it wont bet unset) or there is no way any
// Either ChannelFunded got set (which means it won't be unset) or there is no way any
// caller thought we could have something claimed (cause we wouldn't have accepted in an
// incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
// either.
@ -1274,7 +1274,7 @@ impl Channel {
},
_ => {
debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
return Err(ChannelError::Ignore("Unable to find a pending HTLC which matchd the given HTLC ID"));
return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID"));
}
}
pending_idx = idx;
@ -1352,10 +1352,10 @@ impl Channel {
return Err(ChannelError::Close("They wanted our payments to be delayed by a needlessly long period"));
}
if msg.max_accepted_htlcs < 1 {
return Err(ChannelError::Close("0 max_accpted_htlcs makes for a useless channel"));
return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel"));
}
if msg.max_accepted_htlcs > 483 {
return Err(ChannelError::Close("max_accpted_htlcs > 483"));
return Err(ChannelError::Close("max_accepted_htlcs > 483"));
}
// Now check against optional parameters as set by config...
@ -1433,7 +1433,7 @@ impl Channel {
}
if self.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
// BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
// remember the channel, so its safe to just send an error_message here and drop the
// remember the channel, so it's safe to just send an error_message here and drop the
// channel.
return Err(ChannelError::Close("Received funding_created after we got the channel!"));
}
@ -1826,8 +1826,8 @@ impl Channel {
for htlc_update in htlc_updates.drain(..) {
// Note that this *can* fail, though it should be due to rather-rare conditions on
// fee races with adding too many outputs which push our total payments just over
// the limit. In case its less rare than I anticipate, we may want to revisit
// handling this case better and maybe fufilling some of the HTLCs while attempting
// the limit. In case it's less rare than I anticipate, we may want to revisit
// handling this case better and maybe fulfilling some of the HTLCs while attempting
// to rebalance channels.
if err.is_some() { // We're back to AwaitingRemoteRevoke (or are about to fail the channel)
self.holding_cell_htlc_updates.push(htlc_update);
@ -1869,8 +1869,8 @@ impl Channel {
}
}
}
//TODO: Need to examine the type of err - if its a fee issue or similar we may want to
//fail it back the route, if its a temporary issue we can ignore it...
//TODO: Need to examine the type of err - if it's a fee issue or similar we may want to
//fail it back the route, if it's a temporary issue we can ignore it...
match err {
None => {
if update_add_htlcs.is_empty() && update_fulfill_htlcs.is_empty() && update_fail_htlcs.is_empty() && self.holding_cell_update_fee.is_none() {
@ -2022,7 +2022,7 @@ impl Channel {
}
} else {
if let Some(feerate) = self.pending_update_fee {
// Because a node cannot send two commitment_signed's in a row without getting a
// Because a node cannot send two commitment_signeds in a row without getting a
// revoke_and_ack from us (as it would otherwise not know the per_commitment_point
// it should use to create keys with) and because a node can't send a
// commitment_signed without changes, checking if the feerate is equal to the
@ -2479,7 +2479,7 @@ impl Channel {
assert_eq!(self.channel_state & ChannelState::ShutdownComplete as u32, 0);
// BOLT 2 says we must only send a scriptpubkey of certain standard forms, which are up to
// 34 bytes in length, so dont let the remote peer feed us some super fee-heavy script.
// 34 bytes in length, so don't let the remote peer feed us some super fee-heavy script.
if self.channel_outbound && msg.scriptpubkey.len() > 34 {
return Err(ChannelError::Close("Got shutdown_scriptpubkey of absurd length from remote peer"));
}
@ -3119,7 +3119,7 @@ impl Channel {
/// waiting on the remote peer to send us a revoke_and_ack during which time we cannot add new
/// HTLCs on the wire or we wouldn't be able to determine what they actually ACK'ed.
/// You MUST call send_commitment prior to any other calls on this Channel
/// If an Err is returned, its a ChannelError::Ignore!
/// If an Err is returned, it's a ChannelError::Ignore!
pub fn send_htlc(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError> {
if (self.channel_state & (ChannelState::ChannelFunded as u32 | BOTH_SIDES_SHUTDOWN_MASK)) != (ChannelState::ChannelFunded as u32) {
return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down"));
@ -3376,7 +3376,7 @@ impl Channel {
}, dropped_outbound_htlcs))
}
/// Gets the latest commitment transaction and any dependant transactions for relay (forcing
/// Gets the latest commitment transaction and any dependent transactions for relay (forcing
/// shutdown of this channel - no more calls into this Channel may be made afterwards except
/// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
/// Also returns the list of payment_hashes for channels which we can safely fail backwards
@ -3930,7 +3930,7 @@ mod tests {
#[test]
fn test_max_funding_satoshis() {
assert!(MAX_FUNDING_SATOSHIS <= 21_000_000 * 100_000_000,
"MAX_FUNDING_SATOSHIS is greater than all satoshis on existence");
"MAX_FUNDING_SATOSHIS is greater than all satoshis in existence");
}
struct Keys {

View file

@ -249,7 +249,7 @@ pub(super) struct ChannelHolder {
pub(super) next_forward: Instant,
/// short channel id -> forward infos. Key of 0 means payments received
/// Note that while this is held in the same mutex as the channels themselves, no consistency
/// guarantees are made about there existing a channel with the short id here, nor the short
/// guarantees are made about the existence of a channel with the short id here, nor the short
/// ids in the PendingForwardHTLCInfo!
pub(super) forward_htlcs: HashMap<u64, Vec<HTLCForwardInfo>>,
/// Note that while this is held in the same mutex as the channels themselves, no consistency
@ -344,7 +344,7 @@ pub struct ChannelManager {
/// HTLC's CLTV. This should always be a few blocks greater than channelmonitor::CLTV_CLAIM_BUFFER,
/// ie the node we forwarded the payment on to should always have enough room to reliably time out
/// the HTLC via a full update_fail_htlc/commitment_signed dance before we hit the
/// CLTV_CLAIM_BUFFER point (we static assert that its at least 3 blocks more).
/// CLTV_CLAIM_BUFFER point (we static assert that it's at least 3 blocks more).
const CLTV_EXPIRY_DELTA: u16 = 6 * 12; //TODO?
pub(super) const CLTV_FAR_FAR_AWAY: u32 = 6 * 24 * 7; //TODO?
@ -744,7 +744,7 @@ impl ChannelManager {
if msg.onion_routing_packet.version != 0 {
//TODO: Spec doesn't indicate if we should only hash hop_data here (and in other
//sha256_of_onion error data packets), or the entire onion_routing_packet. Either way,
//the hash doesn't really serve any purpuse - in the case of hashing all data, the
//the hash doesn't really serve any purpose - in the case of hashing all data, the
//receiving node would have to brute force to figure out which version was put in the
//packet by the node that send us the message, in the case of hashing the hop_data, the
//node knows the HMAC matched, so they already know what is there...
@ -1146,7 +1146,7 @@ impl ChannelManager {
/// Processes HTLCs which are pending waiting on random forward delay.
///
/// Should only really ever be called in response to an PendingHTLCsForwardable event.
/// Should only really ever be called in response to a PendingHTLCsForwardable event.
/// Will likely generate further events.
pub fn process_pending_htlc_forwards(&self) {
let _ = self.total_consistency_lock.read().unwrap();
@ -1251,7 +1251,7 @@ impl ChannelManager {
// messages when we can.
// We don't need any kind of timer here as they should fail
// the channel onto the chain if they can't get our
// update_fail_htlc in time, its not our problem.
// update_fail_htlc in time, it's not our problem.
}
}
},
@ -1480,7 +1480,7 @@ impl ChannelManager {
None => {
// TODO: There is probably a channel manager somewhere that needs to
// learn the preimage as the channel already hit the chain and that's
// why its missing.
// why it's missing.
return
}
};
@ -1490,7 +1490,7 @@ impl ChannelManager {
Ok((msgs, monitor_option)) => {
if let Some(chan_monitor) = monitor_option {
if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
unimplemented!();// but def dont push the event...
unimplemented!();// but def don't push the event...
}
}
if let Some((msg, commitment_signed)) = msgs {
@ -1548,7 +1548,7 @@ impl ChannelManager {
// knowledge of those gets moved into the appropriate in-memory
// ChannelMonitor and they get failed backwards once we get
// on-chain confirmations.
// Note I think #198 addresses this, so once its merged a test
// Note I think #198 addresses this, so once it's merged a test
// should be written.
if let Some(short_id) = channel.get_short_channel_id() {
short_to_id.remove(&short_id);
@ -1848,7 +1848,7 @@ impl ChannelManager {
//
//TODO: There exists a further attack where a node may garble the onion data, forward it to
//us repeatedly garbled in different ways, and compare our error messages, which are
//encrypted with the same key. Its not immediately obvious how to usefully exploit that,
//encrypted with the same key. It's not immediately obvious how to usefully exploit that,
//but we should prevent it anyway.
let (mut pending_forward_info, mut channel_state_lock) = self.decode_update_add_htlc_onion(msg);
@ -2272,7 +2272,7 @@ impl ChannelManager {
impl events::MessageSendEventsProvider for ChannelManager {
fn get_and_clear_pending_msg_events(&self) -> Vec<events::MessageSendEvent> {
// TODO: Event release to users and serialization is currently race-y: its very easy for a
// TODO: Event release to users and serialization is currently race-y: it's very easy for a
// user to serialize a ChannelManager with pending events in it and lose those events on
// restart. This is doubly true for the fail/fulfill-backs from monitor events!
{
@ -2297,7 +2297,7 @@ impl events::MessageSendEventsProvider for ChannelManager {
impl events::EventsProvider for ChannelManager {
fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
// TODO: Event release to users and serialization is currently race-y: its very easy for a
// TODO: Event release to users and serialization is currently race-y: it's very easy for a
// user to serialize a ChannelManager with pending events in it and lose those events on
// restart. This is doubly true for the fail/fulfill-backs from monitor events!
{
@ -2897,7 +2897,7 @@ pub struct ChannelManagerReadArgs<'a> {
/// value.get_funding_txo() should be the key).
///
/// If a monitor is inconsistent with the channel state during deserialization the channel will
/// be force-closed using the data in the channelmonitor and the Channel will be dropped. This
/// be force-closed using the data in the ChannelMonitor and the channel will be dropped. This
/// is true for missing channels as well. If there is a monitor missing for which we find
/// channel data Err(DecodeError::InvalidValue) will be returned.
///

View file

@ -219,7 +219,7 @@ impl<Key : Send + cmp::Eq + hash::Hash + 'static> SimpleManyChannelMonitor<Key>
res
}
/// Adds or udpates the monitor which monitors the channel referred to by the given key.
/// Adds or updates the monitor which monitors the channel referred to by the given key.
pub fn add_update_monitor_by_key(&self, key: Key, monitor: ChannelMonitor) -> Result<(), MonitorUpdateError> {
let mut monitors = self.monitors.lock().unwrap();
match monitors.get_mut(&key) {
@ -299,7 +299,7 @@ pub(crate) const HTLC_FAIL_TIMEOUT_BLOCKS: u32 = 3;
/// Number of blocks we wait on seeing a confirmed HTLC-Timeout or previous revoked commitment
/// transaction before we fail corresponding inbound HTLCs. This prevents us from failing backwards
/// and then getting a reorg resulting in us losing money.
//TODO: We currently dont actually use this...we should
//TODO: We currently don't actually use this...we should
pub(crate) const HTLC_FAIL_ANTI_REORG_DELAY: u32 = 6;
#[derive(Clone, PartialEq)]
@ -642,7 +642,7 @@ impl ChannelMonitor {
let our_funding_info = funding_info;
if let Storage::Local { ref funding_info, .. } = other.key_storage {
if funding_info.is_none() { return Err(MonitorUpdateError("Try to combine a Local monitor without funding_info")); }
// We should be able to compare the entire funding_txo, but in fuzztarget its trivially
// We should be able to compare the entire funding_txo, but in fuzztarget it's trivially
// easy to collide the funding_txo hash and have a different scriptPubKey.
if funding_info.as_ref().unwrap().0 != our_funding_info.as_ref().unwrap().0 {
return Err(MonitorUpdateError("Funding transaction outputs are not identical!"));
@ -1445,7 +1445,7 @@ impl ChannelMonitor {
(txn_to_broadcast, (commitment_txid, watch_outputs), spendable_outputs, htlc_updated)
}
/// Attempst to claim a remote HTLC-Success/HTLC-Timeout s outputs using the revocation key
/// Attempts to claim a remote HTLC-Success/HTLC-Timeout's outputs using the revocation key
fn check_spend_remote_htlc(&self, tx: &Transaction, commitment_number: u64) -> (Option<Transaction>, Option<SpendableOutputDescriptor>) {
if tx.input.len() != 1 || tx.output.len() != 1 {
return (None, None)
@ -1615,7 +1615,7 @@ impl ChannelMonitor {
/// Should not be used if check_spend_revoked_transaction succeeds.
fn check_spend_local_transaction(&self, tx: &Transaction, _height: u32) -> (Vec<Transaction>, Vec<SpendableOutputDescriptor>, (Sha256dHash, Vec<TxOut>)) {
let commitment_txid = tx.txid();
// TODO: If we find a match here we need to fail back HTLCs that were't included in the
// TODO: If we find a match here we need to fail back HTLCs that weren't included in the
// broadcast commitment transaction, either because they didn't meet dust or because they
// weren't yet included in our commitment transaction(s).
if let &Some(ref local_tx) = &self.current_local_signed_commitment_tx {
@ -1669,7 +1669,7 @@ impl ChannelMonitor {
}
Storage::Watchtower { .. } => {
//TODO: we need to ensure an offline client will generate the event when it
// cames back online after only the watchtower saw the transaction
// comes back online after only the watchtower saw the transaction
}
}
}

View file

@ -1084,7 +1084,7 @@ pub fn reconnect_nodes(node_a: &Node, node_b: &Node, send_funding_locked: (bool,
check_added_monitors!(node_a, 0);
}
// We dont yet support both needing updates, as that would require a different commitment dance:
// We don't yet support both needing updates, as that would require a different commitment dance:
assert!((pending_htlc_adds.0 == 0 && pending_htlc_claims.0 == 0 && pending_cell_htlc_claims.0 == 0 && pending_cell_htlc_fails.0 == 0) ||
(pending_htlc_adds.1 == 0 && pending_htlc_claims.1 == 0 && pending_cell_htlc_claims.1 == 0 && pending_cell_htlc_fails.1 == 0));

View file

@ -1042,7 +1042,7 @@ fn fake_network_test() {
#[test]
fn duplicate_htlc_test() {
// Test that we accept duplicate payment_hash HTLCs across the network and that
// claiming/failing them are all separate and don't effect each other
// claiming/failing them are all separate and don't affect each other
let mut nodes = create_network(6);
// Create some initial channels to route via 3 to 4/5 from 0/1/2
@ -1682,7 +1682,7 @@ fn claim_htlc_outputs_single_tx() {
assert_eq!(node_txn[2], node_txn[9]);
assert_eq!(node_txn[3], node_txn[10]);
assert_eq!(node_txn[4], node_txn[11]);
assert_eq!(node_txn[3], node_txn[5]); //local commitment tx + htlc timeout tx broadcated by ChannelManger
assert_eq!(node_txn[3], node_txn[5]); //local commitment tx + htlc timeout tx broadcasted by ChannelManger
assert_eq!(node_txn[4], node_txn[6]);
assert_eq!(node_txn[0].input.len(), 1);
@ -1721,7 +1721,7 @@ fn claim_htlc_outputs_single_tx() {
#[test]
fn test_htlc_on_chain_success() {
// Test that in case of an unilateral close onchain, we detect the state of output thanks to
// Test that in case of a unilateral close onchain, we detect the state of output thanks to
// ChainWatchInterface and pass the preimage backward accordingly. So here we test that ChannelManager is
// broadcasting the right event to other nodes in payment path.
// We test with two HTLCs simultaneously as that was not handled correctly in the past.
@ -1750,7 +1750,7 @@ fn test_htlc_on_chain_success() {
let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
// Broadcast legit commitment tx from C on B's chain
// Broadcast HTLC Success transation by C on received output from C's commitment tx on B's chain
// Broadcast HTLC Success transaction by C on received output from C's commitment tx on B's chain
let commitment_tx = nodes[2].node.channel_state.lock().unwrap().by_id.get(&chan_2.2).unwrap().last_local_commitment_txn.clone();
assert_eq!(commitment_tx.len(), 1);
check_spends!(commitment_tx[0], chan_2.3.clone());
@ -1888,8 +1888,8 @@ fn test_htlc_on_chain_success() {
#[test]
fn test_htlc_on_chain_timeout() {
// Test that in case of an unilateral close onchain, we detect the state of output thanks to
// ChainWatchInterface and timeout the HTLC bacward accordingly. So here we test that ChannelManager is
// Test that in case of a unilateral close onchain, we detect the state of output thanks to
// ChainWatchInterface and timeout the HTLC backward accordingly. So here we test that ChannelManager is
// broadcasting the right event to other nodes in payment path.
// A ------------------> B ----------------------> C (timeout)
// B's commitment tx C's commitment tx
@ -1909,7 +1909,7 @@ fn test_htlc_on_chain_timeout() {
let (_payment_preimage, payment_hash) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000);
let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
// Brodacast legit commitment tx from C on B's chain
// Broadcast legit commitment tx from C on B's chain
let commitment_tx = nodes[2].node.channel_state.lock().unwrap().by_id.get(&chan_2.2).unwrap().last_local_commitment_txn.clone();
check_spends!(commitment_tx[0], chan_2.3.clone());
nodes[2].node.fail_htlc_backwards(&payment_hash, 0);
@ -1936,7 +1936,7 @@ fn test_htlc_on_chain_timeout() {
check_spends!(node_txn[0], chan_2.3.clone());
assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), 71);
// Broadcast timeout transaction by B on received output fron C's commitment tx on B's chain
// Broadcast timeout transaction by B on received output from C's commitment tx on B's chain
// Verify that B's ChannelManager is able to detect that HTLC is timeout by its own tx and react backward in consequence
nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 200);
let timeout_tx;
@ -2223,7 +2223,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use
commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true);
let events = nodes[0].node.get_and_clear_pending_msg_events();
// If we delievered B's RAA we got an unknown preimage error, not something
// If we delivered B's RAA we got an unknown preimage error, not something
// that we should update our routing table for.
assert_eq!(events.len(), if deliver_bs_raa { 2 } else { 3 });
for event in events {
@ -3037,7 +3037,7 @@ fn test_simple_manager_serialize_deserialize() {
#[test]
fn test_manager_serialize_deserialize_inconsistent_monitor() {
// Test deserializing a ChannelManager with a out-of-date ChannelMonitor
// Test deserializing a ChannelManager with an out-of-date ChannelMonitor
let mut nodes = create_network(4);
create_announced_chan_between_nodes(&nodes, 0, 1);
create_announced_chan_between_nodes(&nodes, 2, 0);
@ -4056,7 +4056,7 @@ fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
let _as_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
// As far as A is concerened, the HTLC is now present only in the latest remote commitment
// As far as A is concerned, the HTLC is now present only in the latest remote commitment
// transaction, however it is not in A's latest local commitment, so we can just broadcast that
// to "time out" the HTLC.
@ -4156,12 +4156,12 @@ fn run_onion_failure_test<F1,F2>(_name: &str, test_case: u8, nodes: &Vec<Node>,
}
// test_case
// 0: node1 fail backward
// 1: final node fail backward
// 2: payment completed but the user reject the payment
// 3: final node fail backward (but tamper onion payloads from node0)
// 100: trigger error in the intermediate node and tamper returnning fail_htlc
// 200: trigger error in the final node and tamper returnning fail_htlc
// 0: node1 fails backward
// 1: final node fails backward
// 2: payment completed but the user rejects the payment
// 3: final node fails backward (but tamper onion payloads from node0)
// 100: trigger error in the intermediate node and tamper returning fail_htlc
// 200: trigger error in the final node and tamper returning fail_htlc
fn run_onion_failure_test_with_fail_intercept<F1,F2,F3>(_name: &str, test_case: u8, nodes: &Vec<Node>, route: &Route, payment_hash: &PaymentHash, mut callback_msg: F1, mut callback_fail: F2, mut callback_node: F3, expected_retryable: bool, expected_error_code: Option<u16>, expected_channel_update: Option<HTLCFailChannelUpdate>)
where F1: for <'a> FnMut(&'a mut msgs::UpdateAddHTLC),
F2: for <'a> FnMut(&'a mut msgs::UpdateFailHTLC),
@ -4392,7 +4392,7 @@ fn test_onion_failure() {
// trigger error
msg.amount_msat -= 1;
}, |msg| {
// and tamper returing error message
// and tamper returning error message
let session_priv = SecretKey::from_slice(&[3; 32]).unwrap();
let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route, &session_priv).unwrap();
msg.reason = onion_utils::build_first_hop_failure_packet(&onion_keys[0].shared_secret[..], NODE|2, &[0;0]);
@ -4400,7 +4400,7 @@ fn test_onion_failure() {
// final node failure
run_onion_failure_test_with_fail_intercept("temporary_node_failure", 200, &nodes, &route, &payment_hash, |_msg| {}, |msg| {
// and tamper returing error message
// and tamper returning error message
let session_priv = SecretKey::from_slice(&[3; 32]).unwrap();
let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route, &session_priv).unwrap();
msg.reason = onion_utils::build_first_hop_failure_packet(&onion_keys[1].shared_secret[..], NODE|2, &[0;0]);

View file

@ -8,7 +8,7 @@
//!
//! Note that if you go with such an architecture (instead of passing raw socket events to a
//! non-internet-facing system) you trust the frontend internet-facing system to not lie about the
//! source node_id of the mssage, however this does allow you to significantly reduce bandwidth
//! source node_id of the message, however this does allow you to significantly reduce bandwidth
//! between the systems as routing messages can represent a significant chunk of bandwidth usage
//! (especially for non-channel-publicly-announcing nodes). As an alternate design which avoids
//! this issue, if you have sufficient bidirectional bandwidth between your systems, you may send
@ -337,25 +337,25 @@ pub struct AnnouncementSignatures {
/// An address which can be used to connect to a remote peer
#[derive(PartialEq, Clone)]
pub enum NetAddress {
/// An IPv4 address/port on which the peer is listenting.
/// An IPv4 address/port on which the peer is listening.
IPv4 {
/// The 4-byte IPv4 address
addr: [u8; 4],
/// The port on which the node is listenting
/// The port on which the node is listening
port: u16,
},
/// An IPv6 address/port on which the peer is listenting.
/// An IPv6 address/port on which the peer is listening.
IPv6 {
/// The 16-byte IPv6 address
addr: [u8; 16],
/// The port on which the node is listenting
/// The port on which the node is listening
port: u16,
},
/// An old-style Tor onion address/port on which the peer is listening.
OnionV2 {
/// The bytes (usually encoded in base32 with ".onion" appended)
addr: [u8; 10],
/// The port on which the node is listenting
/// The port on which the node is listening
port: u16,
},
/// A new-style Tor onion address/port on which the peer is listening.
@ -368,7 +368,7 @@ pub enum NetAddress {
checksum: u16,
/// The version byte, as defined by the Tor Onion v3 spec.
version: u8,
/// The port on which the node is listenting
/// The port on which the node is listening
port: u16,
},
}

View file

@ -569,8 +569,8 @@ mod tests {
assert_eq!(inbound_peer.process_act_one_with_ephemeral_key(&act_one[..], &our_node_id, our_ephemeral.clone()).unwrap()[..], hex::decode("0002466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f276e2470b93aac583c9ef6eafca3f730ae").unwrap()[..]);
let act_three = hex::decode("00b9e3a702e93e3a9948c2ed6e5fd7590a6e1c3a0344cfc9d5b57357049aa22355361aa02e55a8fc28fef5bd6d71ad0c38228dc68b1c466263b47fdf31e560e139ba").unwrap().to_vec();
// test vector doesn't specify the initiator static key, but its the same as the one
// from trasport-initiator successful handshake
// test vector doesn't specify the initiator static key, but it's the same as the one
// from transport-initiator successful handshake
assert_eq!(inbound_peer.process_act_three(&act_three[..]).unwrap().serialize()[..], hex::decode("034f355bdcb7cc0af728ef3cceb9615d90684bb5b2ca5f859ab0f0b704075871aa").unwrap()[..]);
match inbound_peer.noise_state {
@ -695,8 +695,8 @@ mod tests {
assert_eq!(inbound_peer.process_act_one_with_ephemeral_key(&act_one[..], &our_node_id, our_ephemeral.clone()).unwrap()[..], hex::decode("0002466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f276e2470b93aac583c9ef6eafca3f730ae").unwrap()[..]);
let act_three = hex::decode("00b9e3a702e93e3a9948c2ed6e5fd7590a6e1c3a0344cfc9d5b57357049aa22355361aa02e55a8fc28fef5bd6d71ad0c38228dc68b1c466263b47fdf31e560e139ba").unwrap().to_vec();
// test vector doesn't specify the initiator static key, but its the same as the one
// from trasport-initiator successful handshake
// test vector doesn't specify the initiator static key, but it's the same as the one
// from transport-initiator successful handshake
assert_eq!(inbound_peer.process_act_three(&act_three[..]).unwrap().serialize()[..], hex::decode("034f355bdcb7cc0af728ef3cceb9615d90684bb5b2ca5f859ab0f0b704075871aa").unwrap()[..]);
match inbound_peer.noise_state {

View file

@ -113,7 +113,7 @@ struct Peer {
}
impl Peer {
/// Returns true if the the channel announcements/updates for the given channel should be
/// Returns true if the channel announcements/updates for the given channel should be
/// forwarded to this peer.
/// If we are sending our routing table to this peer and we have not yet sent channel
/// announcements/updates for the given channel_id then we will send it when we get to that
@ -475,7 +475,7 @@ impl<Descriptor: SocketDescriptor> PeerManager<Descriptor> {
match e {
msgs::DecodeError::UnknownVersion => return Err(PeerHandleError{ no_connection_possible: false }),
msgs::DecodeError::UnknownRequiredFeature => {
log_debug!(self, "Got a channel/node announcement with an known required feature flag, you may want to udpate!");
log_debug!(self, "Got a channel/node announcement with an known required feature flag, you may want to update!");
continue;
},
msgs::DecodeError::InvalidValue => {
@ -1128,7 +1128,7 @@ mod tests {
#[test]
fn test_disconnect_peer() {
// Simple test which builds a network of PeerManager, connects and brings them to NoiseState::Finished and
// push an DisconnectPeer event to remove the node flagged by id
// push a DisconnectPeer event to remove the node flagged by id
let mut peers = create_network(2);
establish_connection(&peers[0], &peers[1]);
assert_eq!(peers[0].peers.lock().unwrap().peers.len(), 1);

View file

@ -512,7 +512,7 @@ impl RoutingMessageHandler for Router {
match network.channels.entry(NetworkMap::get_key(msg.contents.short_channel_id, msg.contents.chain_hash)) {
BtreeEntry::Occupied(mut entry) => {
//TODO: because asking the blockchain if short_channel_id is valid is only optional
//in the blockchain API, we need to handle it smartly here, though its unclear
//in the blockchain API, we need to handle it smartly here, though it's unclear
//exactly how...
if checked_utxo {
// Either our UTXO provider is busted, there was a reorg, or the UTXO provider
@ -779,7 +779,7 @@ impl Router {
}
/// Marks a node as having failed a route. This will avoid re-using the node in routes for now,
/// with an expotnential decay in node "badness". Note that there is deliberately no
/// with an exponential decay in node "badness". Note that there is deliberately no
/// mark_channel_bad as a node may simply lie and suggest that an upstream channel from it is
/// what failed the route and not the node itself. Instead, setting the blamed_upstream_node
/// boolean will reduce the penalty, returning the node to usability faster. If the node is

View file

@ -1,7 +1,7 @@
//! Events are returned from various bits in the library which indicate some action must be taken
//! by the client.
//!
//! Because we don't have a built-in runtime, its up to the client to call events at a time in the
//! Because we don't have a built-in runtime, it's up to the client to call events at a time in the
//! future, as well as generate and broadcast funding transactions handle payment preimages and a
//! few other things.
//!