mirror of
https://github.com/lightningdevkit/rust-lightning.git
synced 2025-02-25 07:17:40 +01:00
Merge pull request #276 from TheBlueMatt/2018-12-async-fail
Fail HTLCs backwards asynchronously
This commit is contained in:
commit
eb933d90f3
5 changed files with 458 additions and 437 deletions
File diff suppressed because one or more lines are too long
|
@ -1311,16 +1311,6 @@ impl Channel {
|
|||
}))
|
||||
}
|
||||
|
||||
pub fn get_update_fail_htlc_and_commit(&mut self, htlc_id: u64, err_packet: msgs::OnionErrorPacket) -> Result<Option<(msgs::UpdateFailHTLC, msgs::CommitmentSigned, ChannelMonitor)>, ChannelError> {
|
||||
match self.get_update_fail_htlc(htlc_id, err_packet)? {
|
||||
Some(update_fail_htlc) => {
|
||||
let (commitment, monitor_update) = self.send_commitment_no_status_check()?;
|
||||
Ok(Some((update_fail_htlc, commitment, monitor_update)))
|
||||
},
|
||||
None => Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
// Message handlers:
|
||||
|
||||
pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, config: &UserConfig) -> Result<(), ChannelError> {
|
||||
|
@ -3125,6 +3115,7 @@ impl Channel {
|
|||
/// waiting on the remote peer to send us a revoke_and_ack during which time we cannot add new
|
||||
/// HTLCs on the wire or we wouldn't be able to determine what they actually ACK'ed.
|
||||
/// You MUST call send_commitment prior to any other calls on this Channel
|
||||
/// If an Err is returned, its a ChannelError::Ignore!
|
||||
pub fn send_htlc(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError> {
|
||||
if (self.channel_state & (ChannelState::ChannelFunded as u32 | BOTH_SIDES_SHUTDOWN_MASK)) != (ChannelState::ChannelFunded as u32) {
|
||||
return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down"));
|
||||
|
@ -3236,6 +3227,12 @@ impl Channel {
|
|||
}
|
||||
if have_updates { break; }
|
||||
}
|
||||
for htlc in self.pending_inbound_htlcs.iter() {
|
||||
if let InboundHTLCState::LocalRemoved(_) = htlc.state {
|
||||
have_updates = true;
|
||||
}
|
||||
if have_updates { break; }
|
||||
}
|
||||
if !have_updates {
|
||||
panic!("Cannot create commitment tx until we have some updates to send");
|
||||
}
|
||||
|
|
|
@ -39,7 +39,6 @@ use util::ser::{Readable, ReadableArgs, Writeable, Writer};
|
|||
use util::chacha20::ChaCha20;
|
||||
use util::logger::Logger;
|
||||
use util::errors::APIError;
|
||||
use util::errors;
|
||||
|
||||
use std::{cmp, mem};
|
||||
use std::collections::{HashMap, hash_map, HashSet};
|
||||
|
@ -209,13 +208,16 @@ impl MsgHandleErrInternal {
|
|||
/// probably increase this significantly.
|
||||
const MIN_HTLC_RELAY_HOLDING_CELL_MILLIS: u32 = 50;
|
||||
|
||||
pub(super) struct HTLCForwardInfo {
|
||||
prev_short_channel_id: u64,
|
||||
prev_htlc_id: u64,
|
||||
#[cfg(test)]
|
||||
pub(super) forward_info: PendingForwardHTLCInfo,
|
||||
#[cfg(not(test))]
|
||||
forward_info: PendingForwardHTLCInfo,
|
||||
pub(super) enum HTLCForwardInfo {
|
||||
AddHTLC {
|
||||
prev_short_channel_id: u64,
|
||||
prev_htlc_id: u64,
|
||||
forward_info: PendingForwardHTLCInfo,
|
||||
},
|
||||
FailHTLC {
|
||||
htlc_id: u64,
|
||||
err_packet: msgs::OnionErrorPacket,
|
||||
},
|
||||
}
|
||||
|
||||
/// For events which result in both a RevokeAndACK and a CommitmentUpdate, by default they should
|
||||
|
@ -1162,13 +1164,23 @@ impl ChannelManager {
|
|||
Some(chan_id) => chan_id.clone(),
|
||||
None => {
|
||||
failed_forwards.reserve(pending_forwards.len());
|
||||
for HTLCForwardInfo { prev_short_channel_id, prev_htlc_id, forward_info } in pending_forwards.drain(..) {
|
||||
let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
|
||||
short_channel_id: prev_short_channel_id,
|
||||
htlc_id: prev_htlc_id,
|
||||
incoming_packet_shared_secret: forward_info.incoming_shared_secret,
|
||||
});
|
||||
failed_forwards.push((htlc_source, forward_info.payment_hash, 0x4000 | 10, None));
|
||||
for forward_info in pending_forwards.drain(..) {
|
||||
match forward_info {
|
||||
HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info } => {
|
||||
let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
|
||||
short_channel_id: prev_short_channel_id,
|
||||
htlc_id: prev_htlc_id,
|
||||
incoming_packet_shared_secret: forward_info.incoming_shared_secret,
|
||||
});
|
||||
failed_forwards.push((htlc_source, forward_info.payment_hash, 0x4000 | 10, None));
|
||||
},
|
||||
HTLCForwardInfo::FailHTLC { .. } => {
|
||||
// Channel went away before we could fail it. This implies
|
||||
// the channel is now on chain and our counterparty is
|
||||
// trying to broadcast the HTLC-Timeout, but that's their
|
||||
// problem, not ours.
|
||||
}
|
||||
}
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
@ -1176,36 +1188,70 @@ impl ChannelManager {
|
|||
let forward_chan = &mut channel_state.by_id.get_mut(&forward_chan_id).unwrap();
|
||||
|
||||
let mut add_htlc_msgs = Vec::new();
|
||||
for HTLCForwardInfo { prev_short_channel_id, prev_htlc_id, forward_info } in pending_forwards.drain(..) {
|
||||
let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
|
||||
short_channel_id: prev_short_channel_id,
|
||||
htlc_id: prev_htlc_id,
|
||||
incoming_packet_shared_secret: forward_info.incoming_shared_secret,
|
||||
});
|
||||
match forward_chan.send_htlc(forward_info.amt_to_forward, forward_info.payment_hash, forward_info.outgoing_cltv_value, htlc_source.clone(), forward_info.onion_packet.unwrap()) {
|
||||
Err(_e) => {
|
||||
let chan_update = self.get_channel_update(forward_chan).unwrap();
|
||||
failed_forwards.push((htlc_source, forward_info.payment_hash, 0x1000 | 7, Some(chan_update)));
|
||||
continue;
|
||||
},
|
||||
Ok(update_add) => {
|
||||
match update_add {
|
||||
Some(msg) => { add_htlc_msgs.push(msg); },
|
||||
None => {
|
||||
// Nothing to do here...we're waiting on a remote
|
||||
// revoke_and_ack before we can add anymore HTLCs. The Channel
|
||||
// will automatically handle building the update_add_htlc and
|
||||
// commitment_signed messages when we can.
|
||||
// TODO: Do some kind of timer to set the channel as !is_live()
|
||||
// as we don't really want others relying on us relaying through
|
||||
// this channel currently :/.
|
||||
let mut fail_htlc_msgs = Vec::new();
|
||||
for forward_info in pending_forwards.drain(..) {
|
||||
match forward_info {
|
||||
HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info } => {
|
||||
log_trace!(self, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", log_bytes!(forward_info.payment_hash.0), prev_short_channel_id, short_chan_id);
|
||||
let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
|
||||
short_channel_id: prev_short_channel_id,
|
||||
htlc_id: prev_htlc_id,
|
||||
incoming_packet_shared_secret: forward_info.incoming_shared_secret,
|
||||
});
|
||||
match forward_chan.send_htlc(forward_info.amt_to_forward, forward_info.payment_hash, forward_info.outgoing_cltv_value, htlc_source.clone(), forward_info.onion_packet.unwrap()) {
|
||||
Err(e) => {
|
||||
if let ChannelError::Ignore(_) = e {} else {
|
||||
panic!("Stated return value requirements in send_htlc() were not met");
|
||||
}
|
||||
let chan_update = self.get_channel_update(forward_chan).unwrap();
|
||||
failed_forwards.push((htlc_source, forward_info.payment_hash, 0x1000 | 7, Some(chan_update)));
|
||||
continue;
|
||||
},
|
||||
Ok(update_add) => {
|
||||
match update_add {
|
||||
Some(msg) => { add_htlc_msgs.push(msg); },
|
||||
None => {
|
||||
// Nothing to do here...we're waiting on a remote
|
||||
// revoke_and_ack before we can add anymore HTLCs. The Channel
|
||||
// will automatically handle building the update_add_htlc and
|
||||
// commitment_signed messages when we can.
|
||||
// TODO: Do some kind of timer to set the channel as !is_live()
|
||||
// as we don't really want others relying on us relaying through
|
||||
// this channel currently :/.
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => {
|
||||
log_trace!(self, "Failing HTLC back to channel with short id {} after delay", short_chan_id);
|
||||
match forward_chan.get_update_fail_htlc(htlc_id, err_packet) {
|
||||
Err(e) => {
|
||||
if let ChannelError::Ignore(_) = e {} else {
|
||||
panic!("Stated return value requirements in get_update_fail_htlc() were not met");
|
||||
}
|
||||
// fail-backs are best-effort, we probably already have one
|
||||
// pending, and if not that's OK, if not, the channel is on
|
||||
// the chain and sending the HTLC-Timeout is their problem.
|
||||
continue;
|
||||
},
|
||||
Ok(Some(msg)) => { fail_htlc_msgs.push(msg); },
|
||||
Ok(None) => {
|
||||
// Nothing to do here...we're waiting on a remote
|
||||
// revoke_and_ack before we can update the commitment
|
||||
// transaction. The Channel will automatically handle
|
||||
// building the update_fail_htlc and commitment_signed
|
||||
// messages when we can.
|
||||
// We don't need any kind of timer here as they should fail
|
||||
// the channel onto the chain if they can't get our
|
||||
// update_fail_htlc in time, its not our problem.
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
if !add_htlc_msgs.is_empty() {
|
||||
if !add_htlc_msgs.is_empty() || !fail_htlc_msgs.is_empty() {
|
||||
let (commitment_msg, monitor) = match forward_chan.send_commitment() {
|
||||
Ok(res) => res,
|
||||
Err(e) => {
|
||||
|
@ -1224,7 +1270,7 @@ impl ChannelManager {
|
|||
updates: msgs::CommitmentUpdate {
|
||||
update_add_htlcs: add_htlc_msgs,
|
||||
update_fulfill_htlcs: Vec::new(),
|
||||
update_fail_htlcs: Vec::new(),
|
||||
update_fail_htlcs: fail_htlc_msgs,
|
||||
update_fail_malformed_htlcs: Vec::new(),
|
||||
update_fee: None,
|
||||
commitment_signed: commitment_msg,
|
||||
|
@ -1232,20 +1278,27 @@ impl ChannelManager {
|
|||
});
|
||||
}
|
||||
} else {
|
||||
for HTLCForwardInfo { prev_short_channel_id, prev_htlc_id, forward_info } in pending_forwards.drain(..) {
|
||||
let prev_hop_data = HTLCPreviousHopData {
|
||||
short_channel_id: prev_short_channel_id,
|
||||
htlc_id: prev_htlc_id,
|
||||
incoming_packet_shared_secret: forward_info.incoming_shared_secret,
|
||||
};
|
||||
match channel_state.claimable_htlcs.entry(forward_info.payment_hash) {
|
||||
hash_map::Entry::Occupied(mut entry) => entry.get_mut().push(prev_hop_data),
|
||||
hash_map::Entry::Vacant(entry) => { entry.insert(vec![prev_hop_data]); },
|
||||
};
|
||||
new_events.push(events::Event::PaymentReceived {
|
||||
payment_hash: forward_info.payment_hash,
|
||||
amt: forward_info.amt_to_forward,
|
||||
});
|
||||
for forward_info in pending_forwards.drain(..) {
|
||||
match forward_info {
|
||||
HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info } => {
|
||||
let prev_hop_data = HTLCPreviousHopData {
|
||||
short_channel_id: prev_short_channel_id,
|
||||
htlc_id: prev_htlc_id,
|
||||
incoming_packet_shared_secret: forward_info.incoming_shared_secret,
|
||||
};
|
||||
match channel_state.claimable_htlcs.entry(forward_info.payment_hash) {
|
||||
hash_map::Entry::Occupied(mut entry) => entry.get_mut().push(prev_hop_data),
|
||||
hash_map::Entry::Vacant(entry) => { entry.insert(vec![prev_hop_data]); },
|
||||
};
|
||||
new_events.push(events::Event::PaymentReceived {
|
||||
payment_hash: forward_info.payment_hash,
|
||||
amt: forward_info.amt_to_forward,
|
||||
});
|
||||
},
|
||||
HTLCForwardInfo::FailHTLC { .. } => {
|
||||
panic!("Got pending fail of our own HTLC");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1290,6 +1343,10 @@ impl ChannelManager {
|
|||
/// drop it). In other words, no assumptions are made that entries in claimable_htlcs point to
|
||||
/// still-available channels.
|
||||
fn fail_htlc_backwards_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder>, source: HTLCSource, payment_hash: &PaymentHash, onion_error: HTLCFailReason) {
|
||||
//TODO: There is a timing attack here where if a node fails an HTLC back to us they can
|
||||
//identify whether we sent it or not based on the (I presume) very different runtime
|
||||
//between the branches here. We should make this async and move it into the forward HTLCs
|
||||
//timer handling.
|
||||
match source {
|
||||
HTLCSource::OutboundRoute { ref route, .. } => {
|
||||
log_trace!(self, "Failing outbound payment HTLC with payment_hash {}", log_bytes!(payment_hash.0));
|
||||
|
@ -1297,9 +1354,9 @@ impl ChannelManager {
|
|||
match &onion_error {
|
||||
&HTLCFailReason::ErrorPacket { ref err } => {
|
||||
#[cfg(test)]
|
||||
let (channel_update, payment_retryable, onion_error_code) = self.process_onion_failure(&source, err.data.clone());
|
||||
let (channel_update, payment_retryable, onion_error_code) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone());
|
||||
#[cfg(not(test))]
|
||||
let (channel_update, payment_retryable, _) = self.process_onion_failure(&source, err.data.clone());
|
||||
let (channel_update, payment_retryable, _) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone());
|
||||
// TODO: If we decided to blame ourselves (or one of our channels) in
|
||||
// process_onion_failure we should close that channel as it implies our
|
||||
// next-hop is needlessly blaming us!
|
||||
|
@ -1354,36 +1411,25 @@ impl ChannelManager {
|
|||
}
|
||||
};
|
||||
|
||||
let channel_state = channel_state_lock.borrow_parts();
|
||||
|
||||
let chan_id = match channel_state.short_to_id.get(&short_channel_id) {
|
||||
Some(chan_id) => chan_id.clone(),
|
||||
None => return
|
||||
};
|
||||
|
||||
let chan = channel_state.by_id.get_mut(&chan_id).unwrap();
|
||||
match chan.get_update_fail_htlc_and_commit(htlc_id, err_packet) {
|
||||
Ok(Some((msg, commitment_msg, chan_monitor))) => {
|
||||
if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
|
||||
unimplemented!();
|
||||
}
|
||||
channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
|
||||
node_id: chan.get_their_node_id(),
|
||||
updates: msgs::CommitmentUpdate {
|
||||
update_add_htlcs: Vec::new(),
|
||||
update_fulfill_htlcs: Vec::new(),
|
||||
update_fail_htlcs: vec![msg],
|
||||
update_fail_malformed_htlcs: Vec::new(),
|
||||
update_fee: None,
|
||||
commitment_signed: commitment_msg,
|
||||
},
|
||||
});
|
||||
},
|
||||
Ok(None) => {},
|
||||
Err(_e) => {
|
||||
//TODO: Do something with e?
|
||||
return;
|
||||
let mut forward_event = None;
|
||||
if channel_state_lock.forward_htlcs.is_empty() {
|
||||
forward_event = Some(Instant::now() + Duration::from_millis(((rng::rand_f32() * 4.0 + 1.0) * MIN_HTLC_RELAY_HOLDING_CELL_MILLIS as f32) as u64));
|
||||
channel_state_lock.next_forward = forward_event.unwrap();
|
||||
}
|
||||
match channel_state_lock.forward_htlcs.entry(short_channel_id) {
|
||||
hash_map::Entry::Occupied(mut entry) => {
|
||||
entry.get_mut().push(HTLCForwardInfo::FailHTLC { htlc_id, err_packet });
|
||||
},
|
||||
hash_map::Entry::Vacant(entry) => {
|
||||
entry.insert(vec!(HTLCForwardInfo::FailHTLC { htlc_id, err_packet }));
|
||||
}
|
||||
}
|
||||
mem::drop(channel_state_lock);
|
||||
if let Some(time) = forward_event {
|
||||
let mut pending_events = self.pending_events.lock().unwrap();
|
||||
pending_events.push(events::Event::PendingHTLCsForwardable {
|
||||
time_forwardable: time
|
||||
});
|
||||
}
|
||||
},
|
||||
}
|
||||
|
@ -1865,155 +1911,6 @@ impl ChannelManager {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
// Process failure we got back from upstream on a payment we sent. Returns update and a boolean
|
||||
// indicating that the payment itself failed
|
||||
fn process_onion_failure(&self, htlc_source: &HTLCSource, mut packet_decrypted: Vec<u8>) -> (Option<msgs::HTLCFailChannelUpdate>, bool, Option<u16>) {
|
||||
if let &HTLCSource::OutboundRoute { ref route, ref session_priv, ref first_hop_htlc_msat } = htlc_source {
|
||||
|
||||
let mut res = None;
|
||||
let mut htlc_msat = *first_hop_htlc_msat;
|
||||
let mut error_code_ret = None;
|
||||
let mut next_route_hop_ix = 0;
|
||||
let mut is_from_final_node = false;
|
||||
|
||||
// Handle packed channel/node updates for passing back for the route handler
|
||||
onion_utils::construct_onion_keys_callback(&self.secp_ctx, route, session_priv, |shared_secret, _, _, route_hop| {
|
||||
next_route_hop_ix += 1;
|
||||
if res.is_some() { return; }
|
||||
|
||||
let amt_to_forward = htlc_msat - route_hop.fee_msat;
|
||||
htlc_msat = amt_to_forward;
|
||||
|
||||
let ammag = onion_utils::gen_ammag_from_shared_secret(&shared_secret[..]);
|
||||
|
||||
let mut decryption_tmp = Vec::with_capacity(packet_decrypted.len());
|
||||
decryption_tmp.resize(packet_decrypted.len(), 0);
|
||||
let mut chacha = ChaCha20::new(&ammag, &[0u8; 8]);
|
||||
chacha.process(&packet_decrypted, &mut decryption_tmp[..]);
|
||||
packet_decrypted = decryption_tmp;
|
||||
|
||||
is_from_final_node = route.hops.last().unwrap().pubkey == route_hop.pubkey;
|
||||
|
||||
if let Ok(err_packet) = msgs::DecodedOnionErrorPacket::read(&mut Cursor::new(&packet_decrypted)) {
|
||||
let um = onion_utils::gen_um_from_shared_secret(&shared_secret[..]);
|
||||
let mut hmac = HmacEngine::<Sha256>::new(&um);
|
||||
hmac.input(&err_packet.encode()[32..]);
|
||||
|
||||
if fixed_time_eq(&Hmac::from_engine(hmac).into_inner(), &err_packet.hmac) {
|
||||
if let Some(error_code_slice) = err_packet.failuremsg.get(0..2) {
|
||||
const PERM: u16 = 0x4000;
|
||||
const NODE: u16 = 0x2000;
|
||||
const UPDATE: u16 = 0x1000;
|
||||
|
||||
let error_code = byte_utils::slice_to_be16(&error_code_slice);
|
||||
error_code_ret = Some(error_code);
|
||||
|
||||
let (debug_field, debug_field_size) = errors::get_onion_debug_field(error_code);
|
||||
|
||||
// indicate that payment parameter has failed and no need to
|
||||
// update Route object
|
||||
let payment_failed = (match error_code & 0xff {
|
||||
15|16|17|18|19 => true,
|
||||
_ => false,
|
||||
} && is_from_final_node) // PERM bit observed below even this error is from the intermediate nodes
|
||||
|| error_code == 21; // Special case error 21 as the Route object is bogus, TODO: Maybe fail the node if the CLTV was reasonable?
|
||||
|
||||
let mut fail_channel_update = None;
|
||||
|
||||
if error_code & NODE == NODE {
|
||||
fail_channel_update = Some(msgs::HTLCFailChannelUpdate::NodeFailure { node_id: route_hop.pubkey, is_permanent: error_code & PERM == PERM });
|
||||
}
|
||||
else if error_code & PERM == PERM {
|
||||
fail_channel_update = if payment_failed {None} else {Some(msgs::HTLCFailChannelUpdate::ChannelClosed {
|
||||
short_channel_id: route.hops[next_route_hop_ix - if next_route_hop_ix == route.hops.len() { 1 } else { 0 }].short_channel_id,
|
||||
is_permanent: true,
|
||||
})};
|
||||
}
|
||||
else if error_code & UPDATE == UPDATE {
|
||||
if let Some(update_len_slice) = err_packet.failuremsg.get(debug_field_size+2..debug_field_size+4) {
|
||||
let update_len = byte_utils::slice_to_be16(&update_len_slice) as usize;
|
||||
if let Some(update_slice) = err_packet.failuremsg.get(debug_field_size + 4..debug_field_size + 4 + update_len) {
|
||||
if let Ok(chan_update) = msgs::ChannelUpdate::read(&mut Cursor::new(&update_slice)) {
|
||||
// if channel_update should NOT have caused the failure:
|
||||
// MAY treat the channel_update as invalid.
|
||||
let is_chan_update_invalid = match error_code & 0xff {
|
||||
7 => false,
|
||||
11 => amt_to_forward > chan_update.contents.htlc_minimum_msat,
|
||||
12 => {
|
||||
let new_fee = amt_to_forward.checked_mul(chan_update.contents.fee_proportional_millionths as u64).and_then(|prop_fee| { (prop_fee / 1000000).checked_add(chan_update.contents.fee_base_msat as u64) });
|
||||
new_fee.is_some() && route_hop.fee_msat >= new_fee.unwrap()
|
||||
}
|
||||
13 => route_hop.cltv_expiry_delta as u16 >= chan_update.contents.cltv_expiry_delta,
|
||||
14 => false, // expiry_too_soon; always valid?
|
||||
20 => chan_update.contents.flags & 2 == 0,
|
||||
_ => false, // unknown error code; take channel_update as valid
|
||||
};
|
||||
fail_channel_update = if is_chan_update_invalid {
|
||||
// This probably indicates the node which forwarded
|
||||
// to the node in question corrupted something.
|
||||
Some(msgs::HTLCFailChannelUpdate::ChannelClosed {
|
||||
short_channel_id: route_hop.short_channel_id,
|
||||
is_permanent: true,
|
||||
})
|
||||
} else {
|
||||
Some(msgs::HTLCFailChannelUpdate::ChannelUpdateMessage {
|
||||
msg: chan_update,
|
||||
})
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
if fail_channel_update.is_none() {
|
||||
// They provided an UPDATE which was obviously bogus, not worth
|
||||
// trying to relay through them anymore.
|
||||
fail_channel_update = Some(msgs::HTLCFailChannelUpdate::NodeFailure {
|
||||
node_id: route_hop.pubkey,
|
||||
is_permanent: true,
|
||||
});
|
||||
}
|
||||
} else if !payment_failed {
|
||||
// We can't understand their error messages and they failed to
|
||||
// forward...they probably can't understand our forwards so its
|
||||
// really not worth trying any further.
|
||||
fail_channel_update = Some(msgs::HTLCFailChannelUpdate::NodeFailure {
|
||||
node_id: route_hop.pubkey,
|
||||
is_permanent: true,
|
||||
});
|
||||
}
|
||||
|
||||
// TODO: Here (and a few other places) we assume that BADONION errors
|
||||
// are always "sourced" from the node previous to the one which failed
|
||||
// to decode the onion.
|
||||
res = Some((fail_channel_update, !(error_code & PERM == PERM && is_from_final_node)));
|
||||
|
||||
let (description, title) = errors::get_onion_error_description(error_code);
|
||||
if debug_field_size > 0 && err_packet.failuremsg.len() >= 4 + debug_field_size {
|
||||
log_warn!(self, "Onion Error[{}({:#x}) {}({})] {}", title, error_code, debug_field, log_bytes!(&err_packet.failuremsg[4..4+debug_field_size]), description);
|
||||
}
|
||||
else {
|
||||
log_warn!(self, "Onion Error[{}({:#x})] {}", title, error_code, description);
|
||||
}
|
||||
} else {
|
||||
// Useless packet that we can't use but it passed HMAC, so it
|
||||
// definitely came from the peer in question
|
||||
res = Some((Some(msgs::HTLCFailChannelUpdate::NodeFailure {
|
||||
node_id: route_hop.pubkey,
|
||||
is_permanent: true,
|
||||
}), !is_from_final_node));
|
||||
}
|
||||
}
|
||||
}
|
||||
}).expect("Route that we sent via spontaneously grew invalid keys in the middle of it?");
|
||||
if let Some((channel_update, payment_retryable)) = res {
|
||||
(channel_update, payment_retryable, error_code_ret)
|
||||
} else {
|
||||
// only not set either packet unparseable or hmac does not match with any
|
||||
// payment not retryable only when garbage is from the final node
|
||||
(None, !is_from_final_node, None)
|
||||
}
|
||||
} else { unreachable!(); }
|
||||
}
|
||||
|
||||
fn internal_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), MsgHandleErrInternal> {
|
||||
let mut channel_lock = self.channel_state.lock().unwrap();
|
||||
let channel_state = channel_lock.borrow_parts();
|
||||
|
@ -2106,10 +2003,10 @@ impl ChannelManager {
|
|||
for (forward_info, prev_htlc_id) in pending_forwards.drain(..) {
|
||||
match channel_state.forward_htlcs.entry(forward_info.short_channel_id) {
|
||||
hash_map::Entry::Occupied(mut entry) => {
|
||||
entry.get_mut().push(HTLCForwardInfo { prev_short_channel_id, prev_htlc_id, forward_info });
|
||||
entry.get_mut().push(HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info });
|
||||
},
|
||||
hash_map::Entry::Vacant(entry) => {
|
||||
entry.insert(vec!(HTLCForwardInfo { prev_short_channel_id, prev_htlc_id, forward_info }));
|
||||
entry.insert(vec!(HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info }));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2864,11 +2761,41 @@ impl<R: ::std::io::Read> Readable<R> for HTLCFailReason {
|
|||
}
|
||||
}
|
||||
|
||||
impl_writeable!(HTLCForwardInfo, 0, {
|
||||
prev_short_channel_id,
|
||||
prev_htlc_id,
|
||||
forward_info
|
||||
});
|
||||
impl Writeable for HTLCForwardInfo {
|
||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
|
||||
match self {
|
||||
&HTLCForwardInfo::AddHTLC { ref prev_short_channel_id, ref prev_htlc_id, ref forward_info } => {
|
||||
0u8.write(writer)?;
|
||||
prev_short_channel_id.write(writer)?;
|
||||
prev_htlc_id.write(writer)?;
|
||||
forward_info.write(writer)?;
|
||||
},
|
||||
&HTLCForwardInfo::FailHTLC { ref htlc_id, ref err_packet } => {
|
||||
1u8.write(writer)?;
|
||||
htlc_id.write(writer)?;
|
||||
err_packet.write(writer)?;
|
||||
},
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<R: ::std::io::Read> Readable<R> for HTLCForwardInfo {
|
||||
fn read(reader: &mut R) -> Result<HTLCForwardInfo, DecodeError> {
|
||||
match <u8 as Readable<R>>::read(reader)? {
|
||||
0 => Ok(HTLCForwardInfo::AddHTLC {
|
||||
prev_short_channel_id: Readable::read(reader)?,
|
||||
prev_htlc_id: Readable::read(reader)?,
|
||||
forward_info: Readable::read(reader)?,
|
||||
}),
|
||||
1 => Ok(HTLCForwardInfo::FailHTLC {
|
||||
htlc_id: Readable::read(reader)?,
|
||||
err_packet: Readable::read(reader)?,
|
||||
}),
|
||||
_ => Err(DecodeError::InvalidValue),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Writeable for ChannelManager {
|
||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
|
||||
|
|
|
@ -8,7 +8,7 @@ use chain::chaininterface::{ChainListener, ChainWatchInterface};
|
|||
use chain::keysinterface::{KeysInterface, SpendableOutputDescriptor};
|
||||
use chain::keysinterface;
|
||||
use ln::channel::{COMMITMENT_TX_BASE_WEIGHT, COMMITMENT_TX_WEIGHT_PER_HTLC};
|
||||
use ln::channelmanager::{ChannelManager,ChannelManagerReadArgs,RAACommitmentOrder, PaymentPreimage, PaymentHash};
|
||||
use ln::channelmanager::{ChannelManager,ChannelManagerReadArgs,HTLCForwardInfo,RAACommitmentOrder, PaymentPreimage, PaymentHash};
|
||||
use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, CLTV_CLAIM_BUFFER, HTLC_FAIL_TIMEOUT_BLOCKS, ManyChannelMonitor};
|
||||
use ln::channel::{ACCEPTED_HTLC_SCRIPT_WEIGHT, OFFERED_HTLC_SCRIPT_WEIGHT};
|
||||
use ln::onion_utils;
|
||||
|
@ -490,16 +490,7 @@ macro_rules! commitment_signed_dance {
|
|||
{
|
||||
let (extra_msg_option, bs_revoke_and_ack) = commitment_signed_dance!($node_a, $node_b, (), $fail_backwards, true, true, true);
|
||||
$node_a.node.handle_revoke_and_ack(&$node_b.node.get_our_node_id(), &bs_revoke_and_ack).unwrap();
|
||||
{
|
||||
let mut added_monitors = $node_a.chan_monitor.added_monitors.lock().unwrap();
|
||||
if $fail_backwards {
|
||||
assert_eq!(added_monitors.len(), 2);
|
||||
assert!(added_monitors[0].0 != added_monitors[1].0);
|
||||
} else {
|
||||
assert_eq!(added_monitors.len(), 1);
|
||||
}
|
||||
added_monitors.clear();
|
||||
}
|
||||
check_added_monitors!($node_a, 1);
|
||||
extra_msg_option
|
||||
}
|
||||
};
|
||||
|
@ -512,6 +503,9 @@ macro_rules! commitment_signed_dance {
|
|||
{
|
||||
commitment_signed_dance!($node_a, $node_b, $commitment_signed, $fail_backwards, true);
|
||||
if $fail_backwards {
|
||||
expect_pending_htlcs_forwardable!($node_a);
|
||||
check_added_monitors!($node_a, 1);
|
||||
|
||||
let channel_state = $node_a.node.channel_state.lock().unwrap();
|
||||
assert_eq!(channel_state.pending_msg_events.len(), 1);
|
||||
if let MessageSendEvent::UpdateHTLCs { ref node_id, .. } = channel_state.pending_msg_events[0] {
|
||||
|
@ -535,6 +529,20 @@ macro_rules! get_payment_preimage_hash {
|
|||
}
|
||||
}
|
||||
|
||||
macro_rules! expect_pending_htlcs_forwardable {
|
||||
($node: expr) => {{
|
||||
let events = $node.node.get_and_clear_pending_events();
|
||||
assert_eq!(events.len(), 1);
|
||||
match events[0] {
|
||||
Event::PendingHTLCsForwardable { .. } => { },
|
||||
_ => panic!("Unexpected event"),
|
||||
};
|
||||
let node_ref: &Node = &$node;
|
||||
node_ref.node.channel_state.lock().unwrap().next_forward = Instant::now();
|
||||
$node.node.process_pending_htlc_forwards();
|
||||
}}
|
||||
}
|
||||
|
||||
fn send_along_route(origin_node: &Node, route: Route, expected_route: &[&Node], recv_value: u64) -> (PaymentPreimage, PaymentHash) {
|
||||
let (our_payment_preimage, our_payment_hash) = get_payment_preimage_hash!(origin_node);
|
||||
|
||||
|
@ -555,15 +563,7 @@ fn send_along_route(origin_node: &Node, route: Route, expected_route: &[&Node],
|
|||
check_added_monitors!(node, 0);
|
||||
commitment_signed_dance!(node, prev_node, payment_event.commitment_msg, false);
|
||||
|
||||
let events_1 = node.node.get_and_clear_pending_events();
|
||||
assert_eq!(events_1.len(), 1);
|
||||
match events_1[0] {
|
||||
Event::PendingHTLCsForwardable { .. } => { },
|
||||
_ => panic!("Unexpected event"),
|
||||
};
|
||||
|
||||
node.node.channel_state.lock().unwrap().next_forward = Instant::now();
|
||||
node.node.process_pending_htlc_forwards();
|
||||
expect_pending_htlcs_forwardable!(node);
|
||||
|
||||
if idx == expected_route.len() - 1 {
|
||||
let events_2 = node.node.get_and_clear_pending_events();
|
||||
|
@ -713,6 +713,7 @@ fn send_payment(origin: &Node, expected_route: &[&Node], recv_value: u64) {
|
|||
|
||||
fn fail_payment_along_route(origin_node: &Node, expected_route: &[&Node], skip_last: bool, our_payment_hash: PaymentHash) {
|
||||
assert!(expected_route.last().unwrap().node.fail_htlc_backwards(&our_payment_hash, 0));
|
||||
expect_pending_htlcs_forwardable!(expected_route.last().unwrap());
|
||||
check_added_monitors!(expected_route.last().unwrap(), 1);
|
||||
|
||||
let mut next_msgs: Option<(msgs::UpdateFailHTLC, msgs::CommitmentSigned)> = None;
|
||||
|
@ -721,6 +722,9 @@ fn fail_payment_along_route(origin_node: &Node, expected_route: &[&Node], skip_l
|
|||
{
|
||||
$node.node.handle_update_fail_htlc(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0).unwrap();
|
||||
commitment_signed_dance!($node, $prev_node, next_msgs.as_ref().unwrap().1, !$last_node);
|
||||
if skip_last && $last_node {
|
||||
expect_pending_htlcs_forwardable!($node);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1244,14 +1248,7 @@ fn test_update_fee_with_fundee_update_add_htlc() {
|
|||
check_added_monitors!(nodes[0], 1);
|
||||
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
|
||||
|
||||
let events = nodes[0].node.get_and_clear_pending_events();
|
||||
assert_eq!(events.len(), 1);
|
||||
match events[0] {
|
||||
Event::PendingHTLCsForwardable { .. } => { },
|
||||
_ => panic!("Unexpected event"),
|
||||
};
|
||||
nodes[0].node.channel_state.lock().unwrap().next_forward = Instant::now();
|
||||
nodes[0].node.process_pending_htlc_forwards();
|
||||
expect_pending_htlcs_forwardable!(nodes[0]);
|
||||
|
||||
let events = nodes[0].node.get_and_clear_pending_events();
|
||||
assert_eq!(events.len(), 1);
|
||||
|
@ -1962,19 +1959,6 @@ fn get_announce_close_broadcast_events(nodes: &Vec<Node>, a: usize, b: usize) {
|
|||
}
|
||||
}
|
||||
|
||||
macro_rules! expect_pending_htlcs_forwardable {
|
||||
($node: expr) => {{
|
||||
let events = $node.node.get_and_clear_pending_events();
|
||||
assert_eq!(events.len(), 1);
|
||||
match events[0] {
|
||||
Event::PendingHTLCsForwardable { .. } => { },
|
||||
_ => panic!("Unexpected event"),
|
||||
};
|
||||
$node.node.channel_state.lock().unwrap().next_forward = Instant::now();
|
||||
$node.node.process_pending_htlc_forwards();
|
||||
}}
|
||||
}
|
||||
|
||||
fn do_channel_reserve_test(test_recv: bool) {
|
||||
use util::rng;
|
||||
use std::sync::atomic::Ordering;
|
||||
|
@ -2840,11 +2824,10 @@ fn test_htlc_on_chain_timeout() {
|
|||
let commitment_tx = nodes[2].node.channel_state.lock().unwrap().by_id.get(&chan_2.2).unwrap().last_local_commitment_txn.clone();
|
||||
check_spends!(commitment_tx[0], chan_2.3.clone());
|
||||
nodes[2].node.fail_htlc_backwards(&payment_hash, 0);
|
||||
{
|
||||
let mut added_monitors = nodes[2].chan_monitor.added_monitors.lock().unwrap();
|
||||
assert_eq!(added_monitors.len(), 1);
|
||||
added_monitors.clear();
|
||||
}
|
||||
check_added_monitors!(nodes[2], 0);
|
||||
expect_pending_htlcs_forwardable!(nodes[2]);
|
||||
check_added_monitors!(nodes[2], 1);
|
||||
|
||||
let events = nodes[2].node.get_and_clear_pending_msg_events();
|
||||
assert_eq!(events.len(), 1);
|
||||
match events[0] {
|
||||
|
@ -2894,14 +2877,20 @@ fn test_htlc_on_chain_timeout() {
|
|||
}
|
||||
|
||||
nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![timeout_tx]}, 1);
|
||||
check_added_monitors!(nodes[1], 0);
|
||||
let events = nodes[1].node.get_and_clear_pending_msg_events();
|
||||
check_added_monitors!(nodes[1], 1);
|
||||
assert_eq!(events.len(), 2);
|
||||
assert_eq!(events.len(), 1);
|
||||
match events[0] {
|
||||
MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { .. } } => {},
|
||||
_ => panic!("Unexpected event"),
|
||||
}
|
||||
match events[1] {
|
||||
|
||||
|
||||
expect_pending_htlcs_forwardable!(nodes[1]);
|
||||
check_added_monitors!(nodes[1], 1);
|
||||
let events = nodes[1].node.get_and_clear_pending_msg_events();
|
||||
assert_eq!(events.len(), 1);
|
||||
match events[0] {
|
||||
MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => {
|
||||
assert!(update_add_htlcs.is_empty());
|
||||
assert!(!update_fail_htlcs.is_empty());
|
||||
|
@ -2957,14 +2946,19 @@ fn test_simple_commitment_revoked_fail_backward() {
|
|||
|
||||
let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
|
||||
nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
|
||||
check_added_monitors!(nodes[1], 0);
|
||||
let events = nodes[1].node.get_and_clear_pending_msg_events();
|
||||
check_added_monitors!(nodes[1], 1);
|
||||
assert_eq!(events.len(), 2);
|
||||
assert_eq!(events.len(), 1);
|
||||
match events[0] {
|
||||
MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { .. } } => {},
|
||||
_ => panic!("Unexpected event"),
|
||||
}
|
||||
match events[1] {
|
||||
|
||||
expect_pending_htlcs_forwardable!(nodes[1]);
|
||||
check_added_monitors!(nodes[1], 1);
|
||||
let events = nodes[1].node.get_and_clear_pending_msg_events();
|
||||
assert_eq!(events.len(), 1);
|
||||
match events[0] {
|
||||
MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed, .. } } => {
|
||||
assert!(update_add_htlcs.is_empty());
|
||||
assert_eq!(update_fail_htlcs.len(), 1);
|
||||
|
@ -3025,6 +3019,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool) {
|
|||
let (_, third_payment_hash) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
|
||||
|
||||
assert!(nodes[2].node.fail_htlc_backwards(&first_payment_hash, 0));
|
||||
expect_pending_htlcs_forwardable!(nodes[2]);
|
||||
check_added_monitors!(nodes[2], 1);
|
||||
let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
|
||||
assert!(updates.update_add_htlcs.is_empty());
|
||||
|
@ -3037,6 +3032,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool) {
|
|||
// Drop the last RAA from 3 -> 2
|
||||
|
||||
assert!(nodes[2].node.fail_htlc_backwards(&second_payment_hash, 0));
|
||||
expect_pending_htlcs_forwardable!(nodes[2]);
|
||||
check_added_monitors!(nodes[2], 1);
|
||||
let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
|
||||
assert!(updates.update_add_htlcs.is_empty());
|
||||
|
@ -3053,6 +3049,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool) {
|
|||
check_added_monitors!(nodes[2], 1);
|
||||
|
||||
assert!(nodes[2].node.fail_htlc_backwards(&third_payment_hash, 0));
|
||||
expect_pending_htlcs_forwardable!(nodes[2]);
|
||||
check_added_monitors!(nodes[2], 1);
|
||||
let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
|
||||
assert!(updates.update_add_htlcs.is_empty());
|
||||
|
@ -3083,7 +3080,15 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool) {
|
|||
nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_raa).unwrap();
|
||||
// One monitor for the new revocation preimage, one as we generate a commitment for
|
||||
// nodes[0] to fail first_payment_hash backwards.
|
||||
check_added_monitors!(nodes[1], 2);
|
||||
check_added_monitors!(nodes[1], 1);
|
||||
let events = nodes[1].node.get_and_clear_pending_events();
|
||||
assert_eq!(events.len(), 1);
|
||||
match events[0] {
|
||||
Event::PendingHTLCsForwardable { .. } => { },
|
||||
_ => panic!("Unexpected event"),
|
||||
};
|
||||
// Deliberately don't process the pending fail-back so they all fail back at once after
|
||||
// block connection just like the !deliver_bs_raa case
|
||||
}
|
||||
|
||||
let mut failed_htlcs = HashSet::new();
|
||||
|
@ -3093,22 +3098,26 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool) {
|
|||
nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
|
||||
|
||||
let events = nodes[1].node.get_and_clear_pending_events();
|
||||
assert_eq!(events.len(), 1);
|
||||
assert_eq!(events.len(), if deliver_bs_raa { 1 } else { 2 });
|
||||
match events[0] {
|
||||
Event::PaymentFailed { ref payment_hash, .. } => {
|
||||
assert_eq!(*payment_hash, fourth_payment_hash);
|
||||
},
|
||||
_ => panic!("Unexpected event"),
|
||||
}
|
||||
|
||||
if !deliver_bs_raa {
|
||||
// If we delivered the RAA already then we already failed first_payment_hash backwards.
|
||||
check_added_monitors!(nodes[1], 1);
|
||||
match events[1] {
|
||||
Event::PendingHTLCsForwardable { .. } => { },
|
||||
_ => panic!("Unexpected event"),
|
||||
};
|
||||
}
|
||||
nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now();
|
||||
nodes[1].node.process_pending_htlc_forwards();
|
||||
check_added_monitors!(nodes[1], 1);
|
||||
|
||||
let events = nodes[1].node.get_and_clear_pending_msg_events();
|
||||
assert_eq!(events.len(), if deliver_bs_raa { 3 } else { 2 });
|
||||
match events[if deliver_bs_raa { 2 } else { 0 }] {
|
||||
match events[if deliver_bs_raa { 1 } else { 0 }] {
|
||||
MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { .. } } => {},
|
||||
_ => panic!("Unexpected event"),
|
||||
}
|
||||
|
@ -3124,80 +3133,50 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool) {
|
|||
_ => panic!("Unexpected event"),
|
||||
}
|
||||
}
|
||||
// Due to the way backwards-failing occurs we do the updates in two steps.
|
||||
let updates = match events[1] {
|
||||
match events[if deliver_bs_raa { 2 } else { 1 }] {
|
||||
MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed, .. } } => {
|
||||
assert!(update_add_htlcs.is_empty());
|
||||
assert_eq!(update_fail_htlcs.len(), 1);
|
||||
assert_eq!(update_fail_htlcs.len(), 3);
|
||||
assert!(update_fulfill_htlcs.is_empty());
|
||||
assert!(update_fail_malformed_htlcs.is_empty());
|
||||
assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
|
||||
|
||||
nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]).unwrap();
|
||||
nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed).unwrap();
|
||||
check_added_monitors!(nodes[0], 1);
|
||||
let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
|
||||
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap();
|
||||
check_added_monitors!(nodes[1], 1);
|
||||
let bs_second_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
|
||||
nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_signed).unwrap();
|
||||
check_added_monitors!(nodes[1], 1);
|
||||
let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
|
||||
nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap();
|
||||
check_added_monitors!(nodes[0], 1);
|
||||
nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[1]).unwrap();
|
||||
nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[2]).unwrap();
|
||||
|
||||
if !deliver_bs_raa {
|
||||
// If we delievered B's RAA we got an unknown preimage error, not something
|
||||
// that we should update our routing table for.
|
||||
let events = nodes[0].node.get_and_clear_pending_msg_events();
|
||||
assert_eq!(events.len(), 1);
|
||||
match events[0] {
|
||||
commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true);
|
||||
|
||||
let events = nodes[0].node.get_and_clear_pending_msg_events();
|
||||
// If we delievered B's RAA we got an unknown preimage error, not something
|
||||
// that we should update our routing table for.
|
||||
assert_eq!(events.len(), if deliver_bs_raa { 2 } else { 3 });
|
||||
for event in events {
|
||||
match event {
|
||||
MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {},
|
||||
_ => panic!("Unexpected event"),
|
||||
}
|
||||
}
|
||||
let events = nodes[0].node.get_and_clear_pending_events();
|
||||
assert_eq!(events.len(), 1);
|
||||
assert_eq!(events.len(), 3);
|
||||
match events[0] {
|
||||
Event::PaymentFailed { ref payment_hash, .. } => {
|
||||
assert!(failed_htlcs.insert(payment_hash.0));
|
||||
},
|
||||
_ => panic!("Unexpected event"),
|
||||
}
|
||||
|
||||
bs_second_update
|
||||
},
|
||||
_ => panic!("Unexpected event"),
|
||||
};
|
||||
|
||||
assert!(updates.update_add_htlcs.is_empty());
|
||||
assert_eq!(updates.update_fail_htlcs.len(), 2);
|
||||
assert!(updates.update_fulfill_htlcs.is_empty());
|
||||
assert!(updates.update_fail_malformed_htlcs.is_empty());
|
||||
nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]).unwrap();
|
||||
nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[1]).unwrap();
|
||||
commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false, true);
|
||||
|
||||
let events = nodes[0].node.get_and_clear_pending_msg_events();
|
||||
assert_eq!(events.len(), 2);
|
||||
for event in events {
|
||||
match event {
|
||||
MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {},
|
||||
_ => panic!("Unexpected event"),
|
||||
}
|
||||
}
|
||||
|
||||
let events = nodes[0].node.get_and_clear_pending_events();
|
||||
assert_eq!(events.len(), 2);
|
||||
match events[0] {
|
||||
Event::PaymentFailed { ref payment_hash, .. } => {
|
||||
assert!(failed_htlcs.insert(payment_hash.0));
|
||||
},
|
||||
_ => panic!("Unexpected event"),
|
||||
}
|
||||
match events[1] {
|
||||
Event::PaymentFailed { ref payment_hash, .. } => {
|
||||
assert!(failed_htlcs.insert(payment_hash.0));
|
||||
match events[1] {
|
||||
Event::PaymentFailed { ref payment_hash, .. } => {
|
||||
assert!(failed_htlcs.insert(payment_hash.0));
|
||||
},
|
||||
_ => panic!("Unexpected event"),
|
||||
}
|
||||
match events[2] {
|
||||
Event::PaymentFailed { ref payment_hash, .. } => {
|
||||
assert!(failed_htlcs.insert(payment_hash.0));
|
||||
},
|
||||
_ => panic!("Unexpected event"),
|
||||
}
|
||||
},
|
||||
_ => panic!("Unexpected event"),
|
||||
}
|
||||
|
@ -3278,15 +3257,7 @@ fn test_force_close_fail_back() {
|
|||
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
|
||||
commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
|
||||
|
||||
let events_1 = nodes[1].node.get_and_clear_pending_events();
|
||||
assert_eq!(events_1.len(), 1);
|
||||
match events_1[0] {
|
||||
Event::PendingHTLCsForwardable { .. } => { },
|
||||
_ => panic!("Unexpected event"),
|
||||
};
|
||||
|
||||
nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now();
|
||||
nodes[1].node.process_pending_htlc_forwards();
|
||||
expect_pending_htlcs_forwardable!(nodes[1]);
|
||||
|
||||
let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events();
|
||||
assert_eq!(events_2.len(), 1);
|
||||
|
@ -4063,15 +4034,7 @@ fn test_drop_messages_peer_disconnect_dual_htlc() {
|
|||
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
|
||||
check_added_monitors!(nodes[1], 1);
|
||||
|
||||
let events_4 = nodes[1].node.get_and_clear_pending_events();
|
||||
assert_eq!(events_4.len(), 1);
|
||||
match events_4[0] {
|
||||
Event::PendingHTLCsForwardable { .. } => { },
|
||||
_ => panic!("Unexpected event"),
|
||||
};
|
||||
|
||||
nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now();
|
||||
nodes[1].node.process_pending_htlc_forwards();
|
||||
expect_pending_htlcs_forwardable!(nodes[1]);
|
||||
|
||||
let events_5 = nodes[1].node.get_and_clear_pending_events();
|
||||
assert_eq!(events_5.len(), 1);
|
||||
|
@ -4612,16 +4575,9 @@ fn test_monitor_update_fail_cs() {
|
|||
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &final_raa).unwrap();
|
||||
check_added_monitors!(nodes[1], 1);
|
||||
|
||||
let mut events = nodes[1].node.get_and_clear_pending_events();
|
||||
assert_eq!(events.len(), 1);
|
||||
match events[0] {
|
||||
Event::PendingHTLCsForwardable { .. } => { },
|
||||
_ => panic!("Unexpected event"),
|
||||
};
|
||||
nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now();
|
||||
nodes[1].node.process_pending_htlc_forwards();
|
||||
expect_pending_htlcs_forwardable!(nodes[1]);
|
||||
|
||||
events = nodes[1].node.get_and_clear_pending_events();
|
||||
let events = nodes[1].node.get_and_clear_pending_events();
|
||||
assert_eq!(events.len(), 1);
|
||||
match events[0] {
|
||||
Event::PaymentReceived { payment_hash, amt } => {
|
||||
|
@ -4648,6 +4604,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
|
|||
|
||||
// Fail the payment backwards, failing the monitor update on nodes[1]'s receipt of the RAA
|
||||
assert!(nodes[2].node.fail_htlc_backwards(&payment_hash_1, 0));
|
||||
expect_pending_htlcs_forwardable!(nodes[2]);
|
||||
check_added_monitors!(nodes[2], 1);
|
||||
|
||||
let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
|
||||
|
@ -4672,15 +4629,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
|
|||
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]).unwrap();
|
||||
commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false);
|
||||
|
||||
let events_1 = nodes[1].node.get_and_clear_pending_events();
|
||||
assert_eq!(events_1.len(), 1);
|
||||
match events_1[0] {
|
||||
Event::PendingHTLCsForwardable { .. } => { },
|
||||
_ => panic!("Unexpected event"),
|
||||
};
|
||||
|
||||
nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now();
|
||||
nodes[1].node.process_pending_htlc_forwards();
|
||||
expect_pending_htlcs_forwardable!(nodes[1]);
|
||||
check_added_monitors!(nodes[1], 0);
|
||||
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
|
||||
|
||||
|
@ -4762,7 +4711,9 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
|
|||
// update_add update.
|
||||
*nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
|
||||
nodes[1].node.test_restore_channel_monitor();
|
||||
check_added_monitors!(nodes[1], 2);
|
||||
check_added_monitors!(nodes[1], 1);
|
||||
expect_pending_htlcs_forwardable!(nodes[1]);
|
||||
check_added_monitors!(nodes[1], 1);
|
||||
|
||||
let mut events_3 = nodes[1].node.get_and_clear_pending_msg_events();
|
||||
if test_ignore_second_cs {
|
||||
|
@ -4850,15 +4801,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
|
|||
commitment_signed_dance!(nodes[2], nodes[1], send_event_b.commitment_msg, false);
|
||||
}
|
||||
|
||||
let events_5 = nodes[2].node.get_and_clear_pending_events();
|
||||
assert_eq!(events_5.len(), 1);
|
||||
match events_5[0] {
|
||||
Event::PendingHTLCsForwardable { .. } => { },
|
||||
_ => panic!("Unexpected event"),
|
||||
};
|
||||
|
||||
nodes[2].node.channel_state.lock().unwrap().next_forward = Instant::now();
|
||||
nodes[2].node.process_pending_htlc_forwards();
|
||||
expect_pending_htlcs_forwardable!(nodes[2]);
|
||||
|
||||
let events_6 = nodes[2].node.get_and_clear_pending_events();
|
||||
assert_eq!(events_6.len(), 1);
|
||||
|
@ -4868,15 +4811,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
|
|||
};
|
||||
|
||||
if test_ignore_second_cs {
|
||||
let events_7 = nodes[1].node.get_and_clear_pending_events();
|
||||
assert_eq!(events_7.len(), 1);
|
||||
match events_7[0] {
|
||||
Event::PendingHTLCsForwardable { .. } => { },
|
||||
_ => panic!("Unexpected event"),
|
||||
};
|
||||
|
||||
nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now();
|
||||
nodes[1].node.process_pending_htlc_forwards();
|
||||
expect_pending_htlcs_forwardable!(nodes[1]);
|
||||
check_added_monitors!(nodes[1], 1);
|
||||
|
||||
send_event = SendEvent::from_node(&nodes[1]);
|
||||
|
@ -4885,15 +4820,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
|
|||
nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event.msgs[0]).unwrap();
|
||||
commitment_signed_dance!(nodes[0], nodes[1], send_event.commitment_msg, false);
|
||||
|
||||
let events_8 = nodes[0].node.get_and_clear_pending_events();
|
||||
assert_eq!(events_8.len(), 1);
|
||||
match events_8[0] {
|
||||
Event::PendingHTLCsForwardable { .. } => { },
|
||||
_ => panic!("Unexpected event"),
|
||||
};
|
||||
|
||||
nodes[0].node.channel_state.lock().unwrap().next_forward = Instant::now();
|
||||
nodes[0].node.process_pending_htlc_forwards();
|
||||
expect_pending_htlcs_forwardable!(nodes[0]);
|
||||
|
||||
let events_9 = nodes[0].node.get_and_clear_pending_events();
|
||||
assert_eq!(events_9.len(), 1);
|
||||
|
@ -5809,6 +5736,7 @@ fn test_duplicate_payment_hash_one_failure_one_success() {
|
|||
check_spends!(htlc_success_txn[1], commitment_txn[0].clone());
|
||||
|
||||
nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![htlc_timeout_tx] }, 200);
|
||||
expect_pending_htlcs_forwardable!(nodes[1]);
|
||||
let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
|
||||
assert!(htlc_updates.update_add_htlcs.is_empty());
|
||||
assert_eq!(htlc_updates.update_fail_htlcs.len(), 1);
|
||||
|
@ -6047,6 +5975,7 @@ fn run_onion_failure_test_with_fail_intercept<F1,F2,F3>(_name: &str, test_case:
|
|||
expect_htlc_forward!(&nodes[2]);
|
||||
expect_event!(&nodes[2], Event::PaymentReceived);
|
||||
callback_node();
|
||||
expect_pending_htlcs_forwardable!(nodes[2]);
|
||||
}
|
||||
|
||||
let update_2_1 = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
|
||||
|
@ -6062,7 +5991,7 @@ fn run_onion_failure_test_with_fail_intercept<F1,F2,F3>(_name: &str, test_case:
|
|||
|
||||
// 2 => 1
|
||||
nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &fail_msg).unwrap();
|
||||
commitment_signed_dance!(nodes[1], nodes[2], update_2_1.commitment_signed, true, true);
|
||||
commitment_signed_dance!(nodes[1], nodes[2], update_2_1.commitment_signed, true);
|
||||
|
||||
// backward fail on 1
|
||||
let update_1_0 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
|
||||
|
@ -6329,7 +6258,11 @@ fn test_onion_failure() {
|
|||
run_onion_failure_test("final_incorrect_cltv_expiry", 1, &nodes, &route, &payment_hash, |_| {}, || {
|
||||
for (_, pending_forwards) in nodes[1].node.channel_state.lock().unwrap().borrow_parts().forward_htlcs.iter_mut() {
|
||||
for f in pending_forwards.iter_mut() {
|
||||
f.forward_info.outgoing_cltv_value += 1;
|
||||
match f {
|
||||
&mut HTLCForwardInfo::AddHTLC { ref mut forward_info, .. } =>
|
||||
forward_info.outgoing_cltv_value += 1,
|
||||
_ => {},
|
||||
}
|
||||
}
|
||||
}
|
||||
}, true, Some(18), None);
|
||||
|
@ -6338,7 +6271,11 @@ fn test_onion_failure() {
|
|||
// violate amt_to_forward > msg.amount_msat
|
||||
for (_, pending_forwards) in nodes[1].node.channel_state.lock().unwrap().borrow_parts().forward_htlcs.iter_mut() {
|
||||
for f in pending_forwards.iter_mut() {
|
||||
f.forward_info.amt_to_forward -= 1;
|
||||
match f {
|
||||
&mut HTLCForwardInfo::AddHTLC { ref mut forward_info, .. } =>
|
||||
forward_info.amt_to_forward -= 1,
|
||||
_ => {},
|
||||
}
|
||||
}
|
||||
}
|
||||
}, true, Some(19), None);
|
||||
|
|
|
@ -1,12 +1,14 @@
|
|||
use ln::channelmanager::PaymentHash;
|
||||
use ln::channelmanager::{PaymentHash, HTLCSource};
|
||||
use ln::msgs;
|
||||
use ln::router::{Route,RouteHop};
|
||||
use util::internal_traits;
|
||||
use util::{byte_utils, internal_traits};
|
||||
use util::chacha20::ChaCha20;
|
||||
use util::errors::APIError;
|
||||
use util::ser::Writeable;
|
||||
use util::errors::{self, APIError};
|
||||
use util::ser::{Readable, Writeable};
|
||||
use util::logger::Logger;
|
||||
|
||||
use bitcoin_hashes::{Hash, HashEngine};
|
||||
use bitcoin_hashes::cmp::fixed_time_eq;
|
||||
use bitcoin_hashes::hmac::{Hmac, HmacEngine};
|
||||
use bitcoin_hashes::sha256::Hash as Sha256;
|
||||
|
||||
|
@ -16,6 +18,8 @@ use secp256k1::ecdh::SharedSecret;
|
|||
use secp256k1;
|
||||
|
||||
use std::ptr;
|
||||
use std::io::Cursor;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub(super) struct OnionKeys {
|
||||
#[cfg(test)]
|
||||
|
@ -261,6 +265,158 @@ pub(super) fn build_first_hop_failure_packet(shared_secret: &[u8], failure_type:
|
|||
encrypt_failure_packet(shared_secret, &failure_packet.encode()[..])
|
||||
}
|
||||
|
||||
struct LogHolder<'a> { logger: &'a Arc<Logger> }
|
||||
/// Process failure we got back from upstream on a payment we sent (implying htlc_source is an
|
||||
/// OutboundRoute).
|
||||
/// Returns update, a boolean indicating that the payment itself failed, and the error code.
|
||||
pub(super) fn process_onion_failure<T: secp256k1::Signing>(secp_ctx: &Secp256k1<T>, logger: &Arc<Logger>, htlc_source: &HTLCSource, mut packet_decrypted: Vec<u8>) -> (Option<msgs::HTLCFailChannelUpdate>, bool, Option<u16>) {
|
||||
if let &HTLCSource::OutboundRoute { ref route, ref session_priv, ref first_hop_htlc_msat } = htlc_source {
|
||||
let mut res = None;
|
||||
let mut htlc_msat = *first_hop_htlc_msat;
|
||||
let mut error_code_ret = None;
|
||||
let mut next_route_hop_ix = 0;
|
||||
let mut is_from_final_node = false;
|
||||
|
||||
// Handle packed channel/node updates for passing back for the route handler
|
||||
construct_onion_keys_callback(secp_ctx, route, session_priv, |shared_secret, _, _, route_hop| {
|
||||
next_route_hop_ix += 1;
|
||||
if res.is_some() { return; }
|
||||
|
||||
let amt_to_forward = htlc_msat - route_hop.fee_msat;
|
||||
htlc_msat = amt_to_forward;
|
||||
|
||||
let ammag = gen_ammag_from_shared_secret(&shared_secret[..]);
|
||||
|
||||
let mut decryption_tmp = Vec::with_capacity(packet_decrypted.len());
|
||||
decryption_tmp.resize(packet_decrypted.len(), 0);
|
||||
let mut chacha = ChaCha20::new(&ammag, &[0u8; 8]);
|
||||
chacha.process(&packet_decrypted, &mut decryption_tmp[..]);
|
||||
packet_decrypted = decryption_tmp;
|
||||
|
||||
is_from_final_node = route.hops.last().unwrap().pubkey == route_hop.pubkey;
|
||||
|
||||
if let Ok(err_packet) = msgs::DecodedOnionErrorPacket::read(&mut Cursor::new(&packet_decrypted)) {
|
||||
let um = gen_um_from_shared_secret(&shared_secret[..]);
|
||||
let mut hmac = HmacEngine::<Sha256>::new(&um);
|
||||
hmac.input(&err_packet.encode()[32..]);
|
||||
|
||||
if fixed_time_eq(&Hmac::from_engine(hmac).into_inner(), &err_packet.hmac) {
|
||||
if let Some(error_code_slice) = err_packet.failuremsg.get(0..2) {
|
||||
const PERM: u16 = 0x4000;
|
||||
const NODE: u16 = 0x2000;
|
||||
const UPDATE: u16 = 0x1000;
|
||||
|
||||
let error_code = byte_utils::slice_to_be16(&error_code_slice);
|
||||
error_code_ret = Some(error_code);
|
||||
|
||||
let (debug_field, debug_field_size) = errors::get_onion_debug_field(error_code);
|
||||
|
||||
// indicate that payment parameter has failed and no need to
|
||||
// update Route object
|
||||
let payment_failed = (match error_code & 0xff {
|
||||
15|16|17|18|19 => true,
|
||||
_ => false,
|
||||
} && is_from_final_node) // PERM bit observed below even this error is from the intermediate nodes
|
||||
|| error_code == 21; // Special case error 21 as the Route object is bogus, TODO: Maybe fail the node if the CLTV was reasonable?
|
||||
|
||||
let mut fail_channel_update = None;
|
||||
|
||||
if error_code & NODE == NODE {
|
||||
fail_channel_update = Some(msgs::HTLCFailChannelUpdate::NodeFailure { node_id: route_hop.pubkey, is_permanent: error_code & PERM == PERM });
|
||||
}
|
||||
else if error_code & PERM == PERM {
|
||||
fail_channel_update = if payment_failed {None} else {Some(msgs::HTLCFailChannelUpdate::ChannelClosed {
|
||||
short_channel_id: route.hops[next_route_hop_ix - if next_route_hop_ix == route.hops.len() { 1 } else { 0 }].short_channel_id,
|
||||
is_permanent: true,
|
||||
})};
|
||||
}
|
||||
else if error_code & UPDATE == UPDATE {
|
||||
if let Some(update_len_slice) = err_packet.failuremsg.get(debug_field_size+2..debug_field_size+4) {
|
||||
let update_len = byte_utils::slice_to_be16(&update_len_slice) as usize;
|
||||
if let Some(update_slice) = err_packet.failuremsg.get(debug_field_size + 4..debug_field_size + 4 + update_len) {
|
||||
if let Ok(chan_update) = msgs::ChannelUpdate::read(&mut Cursor::new(&update_slice)) {
|
||||
// if channel_update should NOT have caused the failure:
|
||||
// MAY treat the channel_update as invalid.
|
||||
let is_chan_update_invalid = match error_code & 0xff {
|
||||
7 => false,
|
||||
11 => amt_to_forward > chan_update.contents.htlc_minimum_msat,
|
||||
12 => {
|
||||
let new_fee = amt_to_forward.checked_mul(chan_update.contents.fee_proportional_millionths as u64).and_then(|prop_fee| { (prop_fee / 1000000).checked_add(chan_update.contents.fee_base_msat as u64) });
|
||||
new_fee.is_some() && route_hop.fee_msat >= new_fee.unwrap()
|
||||
}
|
||||
13 => route_hop.cltv_expiry_delta as u16 >= chan_update.contents.cltv_expiry_delta,
|
||||
14 => false, // expiry_too_soon; always valid?
|
||||
20 => chan_update.contents.flags & 2 == 0,
|
||||
_ => false, // unknown error code; take channel_update as valid
|
||||
};
|
||||
fail_channel_update = if is_chan_update_invalid {
|
||||
// This probably indicates the node which forwarded
|
||||
// to the node in question corrupted something.
|
||||
Some(msgs::HTLCFailChannelUpdate::ChannelClosed {
|
||||
short_channel_id: route_hop.short_channel_id,
|
||||
is_permanent: true,
|
||||
})
|
||||
} else {
|
||||
Some(msgs::HTLCFailChannelUpdate::ChannelUpdateMessage {
|
||||
msg: chan_update,
|
||||
})
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
if fail_channel_update.is_none() {
|
||||
// They provided an UPDATE which was obviously bogus, not worth
|
||||
// trying to relay through them anymore.
|
||||
fail_channel_update = Some(msgs::HTLCFailChannelUpdate::NodeFailure {
|
||||
node_id: route_hop.pubkey,
|
||||
is_permanent: true,
|
||||
});
|
||||
}
|
||||
} else if !payment_failed {
|
||||
// We can't understand their error messages and they failed to
|
||||
// forward...they probably can't understand our forwards so its
|
||||
// really not worth trying any further.
|
||||
fail_channel_update = Some(msgs::HTLCFailChannelUpdate::NodeFailure {
|
||||
node_id: route_hop.pubkey,
|
||||
is_permanent: true,
|
||||
});
|
||||
}
|
||||
|
||||
// TODO: Here (and a few other places) we assume that BADONION errors
|
||||
// are always "sourced" from the node previous to the one which failed
|
||||
// to decode the onion.
|
||||
res = Some((fail_channel_update, !(error_code & PERM == PERM && is_from_final_node)));
|
||||
|
||||
let (description, title) = errors::get_onion_error_description(error_code);
|
||||
if debug_field_size > 0 && err_packet.failuremsg.len() >= 4 + debug_field_size {
|
||||
let log_holder = LogHolder { logger };
|
||||
log_warn!(log_holder, "Onion Error[{}({:#x}) {}({})] {}", title, error_code, debug_field, log_bytes!(&err_packet.failuremsg[4..4+debug_field_size]), description);
|
||||
}
|
||||
else {
|
||||
let log_holder = LogHolder { logger };
|
||||
log_warn!(log_holder, "Onion Error[{}({:#x})] {}", title, error_code, description);
|
||||
}
|
||||
} else {
|
||||
// Useless packet that we can't use but it passed HMAC, so it
|
||||
// definitely came from the peer in question
|
||||
res = Some((Some(msgs::HTLCFailChannelUpdate::NodeFailure {
|
||||
node_id: route_hop.pubkey,
|
||||
is_permanent: true,
|
||||
}), !is_from_final_node));
|
||||
}
|
||||
}
|
||||
}
|
||||
}).expect("Route that we sent via spontaneously grew invalid keys in the middle of it?");
|
||||
if let Some((channel_update, payment_retryable)) = res {
|
||||
(channel_update, payment_retryable, error_code_ret)
|
||||
} else {
|
||||
// only not set either packet unparseable or hmac does not match with any
|
||||
// payment not retryable only when garbage is from the final node
|
||||
(None, !is_from_final_node, None)
|
||||
}
|
||||
} else { unreachable!(); }
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use ln::channelmanager::PaymentHash;
|
||||
|
|
Loading…
Add table
Reference in a new issue