mirror of
https://github.com/lightningdevkit/rust-lightning.git
synced 2025-02-25 07:17:40 +01:00
Merge pull request #434 from TheBlueMatt/2019-12-var-len-onion
TLV-based Variable Length Onion
This commit is contained in:
commit
3c9e8c9d77
13 changed files with 977 additions and 184 deletions
|
@ -34,8 +34,8 @@ GEN_TEST NodeAnnouncement test_msg_exact ""
|
|||
|
||||
GEN_TEST UpdateAddHTLC test_msg_hole ", 85, 33"
|
||||
GEN_TEST ErrorMessage test_msg_hole ", 32, 2"
|
||||
GEN_TEST OnionHopData test_msg_hole ", 1+8+8+4, 12"
|
||||
|
||||
GEN_TEST Init test_msg_simple ""
|
||||
GEN_TEST OnionHopData test_msg_simple ""
|
||||
GEN_TEST Ping test_msg_simple ""
|
||||
GEN_TEST Pong test_msg_simple ""
|
||||
|
|
|
@ -20,7 +20,7 @@ pub mod msg_channel_update;
|
|||
pub mod msg_node_announcement;
|
||||
pub mod msg_update_add_htlc;
|
||||
pub mod msg_error_message;
|
||||
pub mod msg_onion_hop_data;
|
||||
pub mod msg_init;
|
||||
pub mod msg_onion_hop_data;
|
||||
pub mod msg_ping;
|
||||
pub mod msg_pong;
|
||||
|
|
|
@ -7,7 +7,7 @@ use msg_targets::utils::VecWriter;
|
|||
|
||||
#[inline]
|
||||
pub fn do_test(data: &[u8]) {
|
||||
test_msg_hole!(msgs::OnionHopData, data, 1+8+8+4, 12);
|
||||
test_msg_simple!(msgs::OnionHopData, data);
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
|
|
|
@ -13,6 +13,15 @@ impl Writer for VecWriter {
|
|||
}
|
||||
}
|
||||
|
||||
// We attempt to test the strictest behavior we can for a given message, however, some messages
|
||||
// have different expected behavior. You can see which messages have which behavior in
|
||||
// gen_target.sh, but, in general, the *_announcement messages have to round-trip exactly (as
|
||||
// otherwise we'd invalidate the signatures), most messages just need to round-trip up to the
|
||||
// amount of data we know how to interpret, and some messages we may throw out invalid stuff (eg
|
||||
// if an error message isn't valid UTF-8 we cant String-ize it), so they wont roundtrip correctly.
|
||||
|
||||
// Tests a message that must survive roundtrip exactly, though may not empty the read buffer
|
||||
// entirely
|
||||
#[macro_export]
|
||||
macro_rules! test_msg {
|
||||
($MsgType: path, $data: ident) => {
|
||||
|
@ -31,6 +40,8 @@ macro_rules! test_msg {
|
|||
}
|
||||
}
|
||||
|
||||
// Tests a message that may lose data on roundtrip, but shoulnd't lose data compared to our
|
||||
// re-serialization.
|
||||
#[macro_export]
|
||||
macro_rules! test_msg_simple {
|
||||
($MsgType: path, $data: ident) => {
|
||||
|
@ -40,11 +51,18 @@ macro_rules! test_msg_simple {
|
|||
if let Ok(msg) = <$MsgType as Readable<::std::io::Cursor<&[u8]>>>::read(&mut r) {
|
||||
let mut w = VecWriter(Vec::new());
|
||||
msg.write(&mut w).unwrap();
|
||||
|
||||
let msg = <$MsgType as Readable<::std::io::Cursor<&[u8]>>>::read(&mut ::std::io::Cursor::new(&w.0)).unwrap();
|
||||
let mut w_two = VecWriter(Vec::new());
|
||||
msg.write(&mut w_two).unwrap();
|
||||
assert_eq!(&w.0[..], &w_two.0[..]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests a message that must survive roundtrip exactly, and must exactly empty the read buffer and
|
||||
// split it back out on re-serialization.
|
||||
#[macro_export]
|
||||
macro_rules! test_msg_exact {
|
||||
($MsgType: path, $data: ident) => {
|
||||
|
@ -54,13 +72,14 @@ macro_rules! test_msg_exact {
|
|||
if let Ok(msg) = <$MsgType as Readable<::std::io::Cursor<&[u8]>>>::read(&mut r) {
|
||||
let mut w = VecWriter(Vec::new());
|
||||
msg.write(&mut w).unwrap();
|
||||
|
||||
assert_eq!(&r.into_inner()[..], &w.0[..]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests a message that must survive roundtrip exactly, modulo one "hole" which may be set to 0s on
|
||||
// re-serialization.
|
||||
#[macro_export]
|
||||
macro_rules! test_msg_hole {
|
||||
($MsgType: path, $data: ident, $hole: expr, $hole_len: expr) => {
|
||||
|
|
|
@ -38,21 +38,19 @@ use chain::keysinterface::{ChannelKeys, KeysInterface, InMemoryChannelKeys};
|
|||
use util::config::UserConfig;
|
||||
use util::{byte_utils, events};
|
||||
use util::ser::{Readable, ReadableArgs, Writeable, Writer};
|
||||
use util::chacha20::ChaCha20;
|
||||
use util::chacha20::{ChaCha20, ChaChaReader};
|
||||
use util::logger::Logger;
|
||||
use util::errors::APIError;
|
||||
|
||||
use std::{cmp, mem};
|
||||
use std::collections::{HashMap, hash_map, HashSet};
|
||||
use std::io::Cursor;
|
||||
use std::io::{Cursor, Read};
|
||||
use std::sync::{Arc, Mutex, MutexGuard, RwLock};
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::time::Duration;
|
||||
use std::marker::{Sync, Send};
|
||||
use std::ops::Deref;
|
||||
|
||||
const SIXTY_FIVE_ZEROS: [u8; 65] = [0; 65];
|
||||
|
||||
// We hold various information about HTLC relay in the HTLC objects in Channel itself:
|
||||
//
|
||||
// Upon receipt of an HTLC from a peer, we'll give it a PendingHTLCStatus indicating if it should
|
||||
|
@ -906,22 +904,30 @@ impl<ChanSigner: ChannelKeys, M: Deref> ChannelManager<ChanSigner, M> where M::T
|
|||
}
|
||||
|
||||
let mut chacha = ChaCha20::new(&rho, &[0u8; 8]);
|
||||
let next_hop_data = {
|
||||
let mut decoded = [0; 65];
|
||||
chacha.process(&msg.onion_routing_packet.hop_data[0..65], &mut decoded);
|
||||
match msgs::OnionHopData::read(&mut Cursor::new(&decoded[..])) {
|
||||
let mut chacha_stream = ChaChaReader { chacha: &mut chacha, read: Cursor::new(&msg.onion_routing_packet.hop_data[..]) };
|
||||
let (next_hop_data, next_hop_hmac) = {
|
||||
match msgs::OnionHopData::read(&mut chacha_stream) {
|
||||
Err(err) => {
|
||||
let error_code = match err {
|
||||
msgs::DecodeError::UnknownVersion => 0x4000 | 1, // unknown realm byte
|
||||
msgs::DecodeError::UnknownRequiredFeature|
|
||||
msgs::DecodeError::InvalidValue|
|
||||
msgs::DecodeError::ShortRead => 0x4000 | 22, // invalid_onion_payload
|
||||
_ => 0x2000 | 2, // Should never happen
|
||||
};
|
||||
return_err!("Unable to decode our hop data", error_code, &[0;0]);
|
||||
},
|
||||
Ok(msg) => msg
|
||||
Ok(msg) => {
|
||||
let mut hmac = [0; 32];
|
||||
if let Err(_) = chacha_stream.read_exact(&mut hmac[..]) {
|
||||
return_err!("Unable to decode hop data", 0x4000 | 22, &[0;0]);
|
||||
}
|
||||
(msg, hmac)
|
||||
},
|
||||
}
|
||||
};
|
||||
|
||||
let pending_forward_info = if next_hop_data.hmac == [0; 32] {
|
||||
let pending_forward_info = if next_hop_hmac == [0; 32] {
|
||||
#[cfg(test)]
|
||||
{
|
||||
// In tests, make sure that the initial onion pcket data is, at least, non-0.
|
||||
|
@ -931,10 +937,11 @@ impl<ChanSigner: ChannelKeys, M: Deref> ChannelManager<ChanSigner, M> where M::T
|
|||
// as-is (and were originally 0s).
|
||||
// Of course reverse path calculation is still pretty easy given naive routing
|
||||
// algorithms, but this fixes the most-obvious case.
|
||||
let mut new_packet_data = [0; 19*65];
|
||||
chacha.process(&msg.onion_routing_packet.hop_data[65..], &mut new_packet_data[0..19*65]);
|
||||
assert_ne!(new_packet_data[0..65], [0; 65][..]);
|
||||
assert_ne!(new_packet_data[..], [0; 19*65][..]);
|
||||
let mut next_bytes = [0; 32];
|
||||
chacha_stream.read_exact(&mut next_bytes).unwrap();
|
||||
assert_ne!(next_bytes[..], [0; 32][..]);
|
||||
chacha_stream.read_exact(&mut next_bytes).unwrap();
|
||||
assert_ne!(next_bytes[..], [0; 32][..]);
|
||||
}
|
||||
|
||||
// OUR PAYMENT!
|
||||
|
@ -943,11 +950,11 @@ impl<ChanSigner: ChannelKeys, M: Deref> ChannelManager<ChanSigner, M> where M::T
|
|||
return_err!("The final CLTV expiry is too soon to handle", 17, &[0;0]);
|
||||
}
|
||||
// final_incorrect_htlc_amount
|
||||
if next_hop_data.data.amt_to_forward > msg.amount_msat {
|
||||
if next_hop_data.amt_to_forward > msg.amount_msat {
|
||||
return_err!("Upstream node sent less than we were supposed to receive in payment", 19, &byte_utils::be64_to_array(msg.amount_msat));
|
||||
}
|
||||
// final_incorrect_cltv_expiry
|
||||
if next_hop_data.data.outgoing_cltv_value != msg.cltv_expiry {
|
||||
if next_hop_data.outgoing_cltv_value != msg.cltv_expiry {
|
||||
return_err!("Upstream node set CLTV to the wrong value", 18, &byte_utils::be32_to_array(msg.cltv_expiry));
|
||||
}
|
||||
|
||||
|
@ -961,13 +968,24 @@ impl<ChanSigner: ChannelKeys, M: Deref> ChannelManager<ChanSigner, M> where M::T
|
|||
payment_hash: msg.payment_hash.clone(),
|
||||
short_channel_id: 0,
|
||||
incoming_shared_secret: shared_secret,
|
||||
amt_to_forward: next_hop_data.data.amt_to_forward,
|
||||
outgoing_cltv_value: next_hop_data.data.outgoing_cltv_value,
|
||||
amt_to_forward: next_hop_data.amt_to_forward,
|
||||
outgoing_cltv_value: next_hop_data.outgoing_cltv_value,
|
||||
})
|
||||
} else {
|
||||
let mut new_packet_data = [0; 20*65];
|
||||
chacha.process(&msg.onion_routing_packet.hop_data[65..], &mut new_packet_data[0..19*65]);
|
||||
chacha.process(&SIXTY_FIVE_ZEROS[..], &mut new_packet_data[19*65..]);
|
||||
let read_pos = chacha_stream.read(&mut new_packet_data).unwrap();
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
// Check two things:
|
||||
// a) that the behavior of our stream here will return Ok(0) even if the TLV
|
||||
// read above emptied out our buffer and the unwrap() wont needlessly panic
|
||||
// b) that we didn't somehow magically end up with extra data.
|
||||
let mut t = [0; 1];
|
||||
debug_assert!(chacha_stream.read(&mut t).unwrap() == 0);
|
||||
}
|
||||
// Once we've emptied the set of bytes our peer gave us, encrypt 0 bytes until we
|
||||
// fill the onion hop data we'll forward to our next-hop peer.
|
||||
chacha_stream.chacha.process_in_place(&mut new_packet_data[read_pos..]);
|
||||
|
||||
let mut new_pubkey = msg.onion_routing_packet.public_key.unwrap();
|
||||
|
||||
|
@ -986,16 +1004,24 @@ impl<ChanSigner: ChannelKeys, M: Deref> ChannelManager<ChanSigner, M> where M::T
|
|||
version: 0,
|
||||
public_key,
|
||||
hop_data: new_packet_data,
|
||||
hmac: next_hop_data.hmac.clone(),
|
||||
hmac: next_hop_hmac.clone(),
|
||||
};
|
||||
|
||||
let short_channel_id = match next_hop_data.format {
|
||||
msgs::OnionHopDataFormat::Legacy { short_channel_id } => short_channel_id,
|
||||
msgs::OnionHopDataFormat::NonFinalNode { short_channel_id } => short_channel_id,
|
||||
msgs::OnionHopDataFormat::FinalNode => {
|
||||
return_err!("Final Node OnionHopData provided for us as an intermediary node", 0x4000 | 22, &[0;0]);
|
||||
},
|
||||
};
|
||||
|
||||
PendingHTLCStatus::Forward(PendingForwardHTLCInfo {
|
||||
onion_packet: Some(outgoing_packet),
|
||||
payment_hash: msg.payment_hash.clone(),
|
||||
short_channel_id: next_hop_data.data.short_channel_id,
|
||||
short_channel_id: short_channel_id,
|
||||
incoming_shared_secret: shared_secret,
|
||||
amt_to_forward: next_hop_data.data.amt_to_forward,
|
||||
outgoing_cltv_value: next_hop_data.data.outgoing_cltv_value,
|
||||
amt_to_forward: next_hop_data.amt_to_forward,
|
||||
outgoing_cltv_value: next_hop_data.outgoing_cltv_value,
|
||||
})
|
||||
};
|
||||
|
||||
|
@ -1137,6 +1163,9 @@ impl<ChanSigner: ChannelKeys, M: Deref> ChannelManager<ChanSigner, M> where M::T
|
|||
let onion_keys = secp_call!(onion_utils::construct_onion_keys(&self.secp_ctx, &route, &session_priv),
|
||||
APIError::RouteError{err: "Pubkey along hop was maliciously selected"});
|
||||
let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route, cur_height)?;
|
||||
if onion_utils::route_size_insane(&onion_payloads) {
|
||||
return Err(APIError::RouteError{err: "Route size too large considering onion data"});
|
||||
}
|
||||
let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, prng_seed, &payment_hash);
|
||||
|
||||
let _ = self.total_consistency_lock.read().unwrap();
|
||||
|
|
|
@ -29,6 +29,10 @@ mod sealed { // You should just use the type aliases instead.
|
|||
pub trait UpfrontShutdownScript: Context {}
|
||||
impl UpfrontShutdownScript for InitContext {}
|
||||
impl UpfrontShutdownScript for NodeContext {}
|
||||
|
||||
pub trait VariableLengthOnion: Context {}
|
||||
impl VariableLengthOnion for InitContext {}
|
||||
impl VariableLengthOnion for NodeContext {}
|
||||
}
|
||||
|
||||
/// Tracks the set of features which a node implements, templated by the context in which it
|
||||
|
@ -69,7 +73,7 @@ impl InitFeatures {
|
|||
/// Create a Features with the features we support
|
||||
pub fn supported() -> InitFeatures {
|
||||
InitFeatures {
|
||||
flags: vec![2 | 1 << 5],
|
||||
flags: vec![2 | 1 << 5, 1 << (9-8)],
|
||||
mark: PhantomData,
|
||||
}
|
||||
}
|
||||
|
@ -132,14 +136,14 @@ impl NodeFeatures {
|
|||
#[cfg(not(feature = "fuzztarget"))]
|
||||
pub(crate) fn supported() -> NodeFeatures {
|
||||
NodeFeatures {
|
||||
flags: vec![2 | 1 << 5],
|
||||
flags: vec![2 | 1 << 5, 1 << (9-8)],
|
||||
mark: PhantomData,
|
||||
}
|
||||
}
|
||||
#[cfg(feature = "fuzztarget")]
|
||||
pub fn supported() -> NodeFeatures {
|
||||
NodeFeatures {
|
||||
flags: vec![2 | 1 << 5],
|
||||
flags: vec![2 | 1 << 5, 1 << (9-8)],
|
||||
mark: PhantomData,
|
||||
}
|
||||
}
|
||||
|
@ -182,13 +186,21 @@ impl<T: sealed::Context> Features<T> {
|
|||
|
||||
pub(crate) fn requires_unknown_bits(&self) -> bool {
|
||||
self.flags.iter().enumerate().any(|(idx, &byte)| {
|
||||
( idx != 0 && (byte & 0x55) != 0 ) || ( idx == 0 && (byte & 0x14) != 0 )
|
||||
(match idx {
|
||||
0 => (byte & 0b00010100),
|
||||
1 => (byte & 0b01010100),
|
||||
_ => (byte & 0b01010101),
|
||||
}) != 0
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn supports_unknown_bits(&self) -> bool {
|
||||
self.flags.iter().enumerate().any(|(idx, &byte)| {
|
||||
( idx != 0 && byte != 0 ) || ( idx == 0 && (byte & 0xc4) != 0 )
|
||||
(match idx {
|
||||
0 => (byte & 0b11000100),
|
||||
1 => (byte & 0b11111100),
|
||||
_ => byte,
|
||||
}) != 0
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -232,6 +244,12 @@ impl<T: sealed::UpfrontShutdownScript> Features<T> {
|
|||
}
|
||||
}
|
||||
|
||||
impl<T: sealed::VariableLengthOnion> Features<T> {
|
||||
pub(crate) fn supports_variable_length_onion(&self) -> bool {
|
||||
self.flags.len() > 1 && (self.flags[1] & 3) != 0
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: sealed::InitialRoutingSync> Features<T> {
|
||||
pub(crate) fn initial_routing_sync(&self) -> bool {
|
||||
self.flags.len() > 0 && (self.flags[0] & (1 << 3)) != 0
|
||||
|
|
|
@ -18,7 +18,7 @@ use util::enforcing_trait_impls::EnforcingChannelKeys;
|
|||
use util::test_utils;
|
||||
use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
|
||||
use util::errors::APIError;
|
||||
use util::ser::{Writeable, ReadableArgs};
|
||||
use util::ser::{Writeable, Writer, ReadableArgs};
|
||||
use util::config::UserConfig;
|
||||
use util::logger::Logger;
|
||||
|
||||
|
@ -44,7 +44,7 @@ use std::collections::{BTreeSet, HashMap, HashSet};
|
|||
use std::default::Default;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::mem;
|
||||
use std::{mem, io};
|
||||
|
||||
use rand::{thread_rng, Rng};
|
||||
|
||||
|
@ -5007,6 +5007,20 @@ impl msgs::ChannelUpdate {
|
|||
}
|
||||
}
|
||||
|
||||
struct BogusOnionHopData {
|
||||
data: Vec<u8>
|
||||
}
|
||||
impl BogusOnionHopData {
|
||||
fn new(orig: msgs::OnionHopData) -> Self {
|
||||
Self { data: orig.encode() }
|
||||
}
|
||||
}
|
||||
impl Writeable for BogusOnionHopData {
|
||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
|
||||
writer.write_all(&self.data[..])
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_onion_failure() {
|
||||
use ln::msgs::ChannelUpdate;
|
||||
|
@ -5036,9 +5050,15 @@ fn test_onion_failure() {
|
|||
let cur_height = nodes[0].node.latest_block_height.load(Ordering::Acquire) as u32 + 1;
|
||||
let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route, &session_priv).unwrap();
|
||||
let (mut onion_payloads, _htlc_msat, _htlc_cltv) = onion_utils::build_onion_payloads(&route, cur_height).unwrap();
|
||||
onion_payloads[0].realm = 3;
|
||||
msg.onion_routing_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash);
|
||||
}, ||{}, true, Some(PERM|1), Some(msgs::HTLCFailChannelUpdate::ChannelClosed{short_channel_id: channels[1].0.contents.short_channel_id, is_permanent: true}));//XXX incremented channels idx here
|
||||
let mut new_payloads = Vec::new();
|
||||
for payload in onion_payloads.drain(..) {
|
||||
new_payloads.push(BogusOnionHopData::new(payload));
|
||||
}
|
||||
// break the first (non-final) hop payload by swapping the realm (0) byte for a byte
|
||||
// describing a length-1 TLV payload, which is obviously bogus.
|
||||
new_payloads[0].data[0] = 1;
|
||||
msg.onion_routing_packet = onion_utils::construct_onion_packet_bogus_hopdata(new_payloads, onion_keys, [0; 32], &payment_hash);
|
||||
}, ||{}, true, Some(PERM|22), Some(msgs::HTLCFailChannelUpdate::ChannelClosed{short_channel_id: channels[1].0.contents.short_channel_id, is_permanent: true}));//XXX incremented channels idx here
|
||||
|
||||
// final node failure
|
||||
run_onion_failure_test("invalid_realm", 3, &nodes, &route, &payment_hash, |msg| {
|
||||
|
@ -5046,9 +5066,15 @@ fn test_onion_failure() {
|
|||
let cur_height = nodes[0].node.latest_block_height.load(Ordering::Acquire) as u32 + 1;
|
||||
let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route, &session_priv).unwrap();
|
||||
let (mut onion_payloads, _htlc_msat, _htlc_cltv) = onion_utils::build_onion_payloads(&route, cur_height).unwrap();
|
||||
onion_payloads[1].realm = 3;
|
||||
msg.onion_routing_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash);
|
||||
}, ||{}, false, Some(PERM|1), Some(msgs::HTLCFailChannelUpdate::ChannelClosed{short_channel_id: channels[1].0.contents.short_channel_id, is_permanent: true}));
|
||||
let mut new_payloads = Vec::new();
|
||||
for payload in onion_payloads.drain(..) {
|
||||
new_payloads.push(BogusOnionHopData::new(payload));
|
||||
}
|
||||
// break the last-hop payload by swapping the realm (0) byte for a byte describing a
|
||||
// length-1 TLV payload, which is obviously bogus.
|
||||
new_payloads[1].data[0] = 1;
|
||||
msg.onion_routing_packet = onion_utils::construct_onion_packet_bogus_hopdata(new_payloads, onion_keys, [0; 32], &payment_hash);
|
||||
}, ||{}, false, Some(PERM|22), Some(msgs::HTLCFailChannelUpdate::ChannelClosed{short_channel_id: channels[1].0.contents.short_channel_id, is_permanent: true}));
|
||||
|
||||
// the following three with run_onion_failure_test_with_fail_intercept() test only the origin node
|
||||
// receiving simulated fail messages
|
||||
|
|
|
@ -29,7 +29,7 @@ use std::io::Read;
|
|||
use std::result::Result;
|
||||
|
||||
use util::events;
|
||||
use util::ser::{Readable, Writeable, Writer};
|
||||
use util::ser::{Readable, Writeable, Writer, FixedLengthReader, HighZeroBytesDroppedVarInt};
|
||||
|
||||
use ln::channelmanager::{PaymentPreimage, PaymentHash};
|
||||
|
||||
|
@ -39,10 +39,11 @@ pub enum DecodeError {
|
|||
/// A version byte specified something we don't know how to handle.
|
||||
/// Includes unknown realm byte in an OnionHopData packet
|
||||
UnknownVersion,
|
||||
/// Unknown feature mandating we fail to parse message
|
||||
/// Unknown feature mandating we fail to parse message (eg TLV with an even, unknown type)
|
||||
UnknownRequiredFeature,
|
||||
/// Value was invalid, eg a byte which was supposed to be a bool was something other than a 0
|
||||
/// or 1, a public key/private key/signature was invalid, text wasn't UTF-8, etc
|
||||
/// or 1, a public key/private key/signature was invalid, text wasn't UTF-8, TLV was
|
||||
/// syntactically incorrect, etc
|
||||
InvalidValue,
|
||||
/// Buffer too short
|
||||
ShortRead,
|
||||
|
@ -608,22 +609,25 @@ pub trait RoutingMessageHandler : Send + Sync {
|
|||
fn should_request_full_sync(&self, node_id: &PublicKey) -> bool;
|
||||
}
|
||||
|
||||
pub(crate) struct OnionRealm0HopData {
|
||||
pub(crate) short_channel_id: u64,
|
||||
pub(crate) amt_to_forward: u64,
|
||||
pub(crate) outgoing_cltv_value: u32,
|
||||
// 12 bytes of 0-padding
|
||||
}
|
||||
|
||||
mod fuzzy_internal_msgs {
|
||||
// These types aren't intended to be pub, but are exposed for direct fuzzing (as we deserialize
|
||||
// them from untrusted input):
|
||||
|
||||
use super::OnionRealm0HopData;
|
||||
pub(crate) enum OnionHopDataFormat {
|
||||
Legacy { // aka Realm-0
|
||||
short_channel_id: u64,
|
||||
},
|
||||
NonFinalNode {
|
||||
short_channel_id: u64,
|
||||
},
|
||||
FinalNode,
|
||||
}
|
||||
|
||||
pub struct OnionHopData {
|
||||
pub(crate) realm: u8,
|
||||
pub(crate) data: OnionRealm0HopData,
|
||||
pub(crate) hmac: [u8; 32],
|
||||
pub(crate) format: OnionHopDataFormat,
|
||||
pub(crate) amt_to_forward: u64,
|
||||
pub(crate) outgoing_cltv_value: u32,
|
||||
// 12 bytes of 0-padding for Legacy format
|
||||
}
|
||||
|
||||
pub struct DecodedOnionErrorPacket {
|
||||
|
@ -960,53 +964,78 @@ impl_writeable!(UpdateAddHTLC, 32+8+8+32+4+1366, {
|
|||
onion_routing_packet
|
||||
});
|
||||
|
||||
impl Writeable for OnionRealm0HopData {
|
||||
fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
|
||||
w.size_hint(32);
|
||||
self.short_channel_id.write(w)?;
|
||||
self.amt_to_forward.write(w)?;
|
||||
self.outgoing_cltv_value.write(w)?;
|
||||
w.write_all(&[0;12])?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<R: Read> Readable<R> for OnionRealm0HopData {
|
||||
fn read(r: &mut R) -> Result<Self, DecodeError> {
|
||||
Ok(OnionRealm0HopData {
|
||||
short_channel_id: Readable::read(r)?,
|
||||
amt_to_forward: Readable::read(r)?,
|
||||
outgoing_cltv_value: {
|
||||
let v: u32 = Readable::read(r)?;
|
||||
r.read_exact(&mut [0; 12])?;
|
||||
v
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Writeable for OnionHopData {
|
||||
fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
|
||||
w.size_hint(65);
|
||||
self.realm.write(w)?;
|
||||
self.data.write(w)?;
|
||||
self.hmac.write(w)?;
|
||||
w.size_hint(33);
|
||||
match self.format {
|
||||
OnionHopDataFormat::Legacy { short_channel_id } => {
|
||||
0u8.write(w)?;
|
||||
short_channel_id.write(w)?;
|
||||
self.amt_to_forward.write(w)?;
|
||||
self.outgoing_cltv_value.write(w)?;
|
||||
w.write_all(&[0;12])?;
|
||||
},
|
||||
OnionHopDataFormat::NonFinalNode { short_channel_id } => {
|
||||
encode_varint_length_prefixed_tlv!(w, {
|
||||
(2, HighZeroBytesDroppedVarInt(self.amt_to_forward)),
|
||||
(4, HighZeroBytesDroppedVarInt(self.outgoing_cltv_value)),
|
||||
(6, short_channel_id)
|
||||
});
|
||||
},
|
||||
OnionHopDataFormat::FinalNode => {
|
||||
encode_varint_length_prefixed_tlv!(w, {
|
||||
(2, HighZeroBytesDroppedVarInt(self.amt_to_forward)),
|
||||
(4, HighZeroBytesDroppedVarInt(self.outgoing_cltv_value))
|
||||
});
|
||||
},
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<R: Read> Readable<R> for OnionHopData {
|
||||
fn read(r: &mut R) -> Result<Self, DecodeError> {
|
||||
Ok(OnionHopData {
|
||||
realm: {
|
||||
let r: u8 = Readable::read(r)?;
|
||||
if r != 0 {
|
||||
return Err(DecodeError::UnknownVersion);
|
||||
fn read(mut r: &mut R) -> Result<Self, DecodeError> {
|
||||
use bitcoin::consensus::encode::{Decodable, Error, VarInt};
|
||||
let v: VarInt = Decodable::consensus_decode(&mut r)
|
||||
.map_err(|e| match e {
|
||||
Error::Io(ioe) => DecodeError::from(ioe),
|
||||
_ => DecodeError::InvalidValue
|
||||
})?;
|
||||
const LEGACY_ONION_HOP_FLAG: u64 = 0;
|
||||
let (format, amt, cltv_value) = if v.0 != LEGACY_ONION_HOP_FLAG {
|
||||
let mut rd = FixedLengthReader::new(r, v.0);
|
||||
let mut amt = HighZeroBytesDroppedVarInt(0u64);
|
||||
let mut cltv_value = HighZeroBytesDroppedVarInt(0u32);
|
||||
let mut short_id: Option<u64> = None;
|
||||
decode_tlv!(&mut rd, {
|
||||
(2, amt),
|
||||
(4, cltv_value)
|
||||
}, {
|
||||
(6, short_id)
|
||||
});
|
||||
rd.eat_remaining().map_err(|_| DecodeError::ShortRead)?;
|
||||
let format = if let Some(short_channel_id) = short_id {
|
||||
OnionHopDataFormat::NonFinalNode {
|
||||
short_channel_id,
|
||||
}
|
||||
r
|
||||
},
|
||||
data: Readable::read(r)?,
|
||||
hmac: Readable::read(r)?,
|
||||
} else {
|
||||
OnionHopDataFormat::FinalNode
|
||||
};
|
||||
(format, amt.0, cltv_value.0)
|
||||
} else {
|
||||
let format = OnionHopDataFormat::Legacy {
|
||||
short_channel_id: Readable::read(r)?,
|
||||
};
|
||||
let amt: u64 = Readable::read(r)?;
|
||||
let cltv_value: u32 = Readable::read(r)?;
|
||||
r.read_exact(&mut [0; 12])?;
|
||||
(format, amt, cltv_value)
|
||||
};
|
||||
|
||||
Ok(OnionHopData {
|
||||
format,
|
||||
amt_to_forward: amt,
|
||||
outgoing_cltv_value: cltv_value,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -1292,9 +1321,9 @@ impl_writeable_len_match!(NodeAnnouncement, {
|
|||
mod tests {
|
||||
use hex;
|
||||
use ln::msgs;
|
||||
use ln::msgs::{ChannelFeatures, InitFeatures, NodeFeatures, OptionalField, OnionErrorPacket};
|
||||
use ln::msgs::{ChannelFeatures, InitFeatures, NodeFeatures, OptionalField, OnionErrorPacket, OnionHopDataFormat};
|
||||
use ln::channelmanager::{PaymentPreimage, PaymentHash};
|
||||
use util::ser::Writeable;
|
||||
use util::ser::{Writeable, Readable};
|
||||
|
||||
use bitcoin_hashes::sha256d::Hash as Sha256dHash;
|
||||
use bitcoin_hashes::hex::FromHex;
|
||||
|
@ -1306,6 +1335,8 @@ mod tests {
|
|||
use secp256k1::key::{PublicKey,SecretKey};
|
||||
use secp256k1::{Secp256k1, Message};
|
||||
|
||||
use std::io::Cursor;
|
||||
|
||||
#[test]
|
||||
fn encoding_channel_reestablish_no_secret() {
|
||||
let cr = msgs::ChannelReestablish {
|
||||
|
@ -1945,4 +1976,54 @@ mod tests {
|
|||
let target_value = hex::decode("004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").unwrap();
|
||||
assert_eq!(encoded_value, target_value);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn encoding_legacy_onion_hop_data() {
|
||||
let msg = msgs::OnionHopData {
|
||||
format: OnionHopDataFormat::Legacy {
|
||||
short_channel_id: 0xdeadbeef1bad1dea,
|
||||
},
|
||||
amt_to_forward: 0x0badf00d01020304,
|
||||
outgoing_cltv_value: 0xffffffff,
|
||||
};
|
||||
let encoded_value = msg.encode();
|
||||
let target_value = hex::decode("00deadbeef1bad1dea0badf00d01020304ffffffff000000000000000000000000").unwrap();
|
||||
assert_eq!(encoded_value, target_value);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn encoding_nonfinal_onion_hop_data() {
|
||||
let mut msg = msgs::OnionHopData {
|
||||
format: OnionHopDataFormat::NonFinalNode {
|
||||
short_channel_id: 0xdeadbeef1bad1dea,
|
||||
},
|
||||
amt_to_forward: 0x0badf00d01020304,
|
||||
outgoing_cltv_value: 0xffffffff,
|
||||
};
|
||||
let encoded_value = msg.encode();
|
||||
let target_value = hex::decode("1a02080badf00d010203040404ffffffff0608deadbeef1bad1dea").unwrap();
|
||||
assert_eq!(encoded_value, target_value);
|
||||
msg = Readable::read(&mut Cursor::new(&target_value[..])).unwrap();
|
||||
if let OnionHopDataFormat::NonFinalNode { short_channel_id } = msg.format {
|
||||
assert_eq!(short_channel_id, 0xdeadbeef1bad1dea);
|
||||
} else { panic!(); }
|
||||
assert_eq!(msg.amt_to_forward, 0x0badf00d01020304);
|
||||
assert_eq!(msg.outgoing_cltv_value, 0xffffffff);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn encoding_final_onion_hop_data() {
|
||||
let mut msg = msgs::OnionHopData {
|
||||
format: OnionHopDataFormat::FinalNode,
|
||||
amt_to_forward: 0x0badf00d01020304,
|
||||
outgoing_cltv_value: 0xffffffff,
|
||||
};
|
||||
let encoded_value = msg.encode();
|
||||
let target_value = hex::decode("1002080badf00d010203040404ffffffff").unwrap();
|
||||
assert_eq!(encoded_value, target_value);
|
||||
msg = Readable::read(&mut Cursor::new(&target_value[..])).unwrap();
|
||||
if let OnionHopDataFormat::FinalNode = msg.format { } else { panic!(); }
|
||||
assert_eq!(msg.amt_to_forward, 0x0badf00d01020304);
|
||||
assert_eq!(msg.outgoing_cltv_value, 0xffffffff);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,7 +4,7 @@ use ln::router::{Route,RouteHop};
|
|||
use util::byte_utils;
|
||||
use util::chacha20::ChaCha20;
|
||||
use util::errors::{self, APIError};
|
||||
use util::ser::{Readable, Writeable};
|
||||
use util::ser::{Readable, Writeable, LengthCalculatingWriter};
|
||||
use util::logger::{Logger, LogHolder};
|
||||
|
||||
use bitcoin_hashes::{Hash, HashEngine};
|
||||
|
@ -114,20 +114,28 @@ pub(super) fn build_onion_payloads(route: &Route, starting_htlc_offset: u32) ->
|
|||
let mut last_short_channel_id = 0;
|
||||
let mut res: Vec<msgs::OnionHopData> = Vec::with_capacity(route.hops.len());
|
||||
|
||||
for hop in route.hops.iter().rev() {
|
||||
for (idx, hop) in route.hops.iter().rev().enumerate() {
|
||||
// First hop gets special values so that it can check, on receipt, that everything is
|
||||
// exactly as it should be (and the next hop isn't trying to probe to find out if we're
|
||||
// the intended recipient).
|
||||
let value_msat = if cur_value_msat == 0 { hop.fee_msat } else { cur_value_msat };
|
||||
let cltv = if cur_cltv == starting_htlc_offset { hop.cltv_expiry_delta + starting_htlc_offset } else { cur_cltv };
|
||||
res.insert(0, msgs::OnionHopData {
|
||||
realm: 0,
|
||||
data: msgs::OnionRealm0HopData {
|
||||
short_channel_id: last_short_channel_id,
|
||||
amt_to_forward: value_msat,
|
||||
outgoing_cltv_value: cltv,
|
||||
format: if hop.node_features.supports_variable_length_onion() {
|
||||
if idx == 0 {
|
||||
msgs::OnionHopDataFormat::FinalNode
|
||||
} else {
|
||||
msgs::OnionHopDataFormat::NonFinalNode {
|
||||
short_channel_id: last_short_channel_id,
|
||||
}
|
||||
}
|
||||
} else {
|
||||
msgs::OnionHopDataFormat::Legacy {
|
||||
short_channel_id: last_short_channel_id,
|
||||
}
|
||||
},
|
||||
hmac: [0; 32],
|
||||
amt_to_forward: value_msat,
|
||||
outgoing_cltv_value: cltv,
|
||||
});
|
||||
cur_value_msat += hop.fee_msat;
|
||||
if cur_value_msat >= 21000000 * 100000000 * 1000 {
|
||||
|
@ -142,68 +150,96 @@ pub(super) fn build_onion_payloads(route: &Route, starting_htlc_offset: u32) ->
|
|||
Ok((res, cur_value_msat, cur_cltv))
|
||||
}
|
||||
|
||||
/// Length of the onion data packet. Before TLV-based onions this was 20 65-byte hops, though now
|
||||
/// the hops can be of variable length.
|
||||
pub(crate) const ONION_DATA_LEN: usize = 20*65;
|
||||
|
||||
#[inline]
|
||||
fn shift_arr_right(arr: &mut [u8; 20*65]) {
|
||||
for i in (65..20*65).rev() {
|
||||
arr[i] = arr[i-65];
|
||||
fn shift_arr_right(arr: &mut [u8; ONION_DATA_LEN], amt: usize) {
|
||||
for i in (amt..ONION_DATA_LEN).rev() {
|
||||
arr[i] = arr[i-amt];
|
||||
}
|
||||
for i in 0..65 {
|
||||
for i in 0..amt {
|
||||
arr[i] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn xor_bufs(dst: &mut[u8], src: &[u8]) {
|
||||
assert_eq!(dst.len(), src.len());
|
||||
|
||||
for i in 0..dst.len() {
|
||||
dst[i] ^= src[i];
|
||||
pub(super) fn route_size_insane(payloads: &Vec<msgs::OnionHopData>) -> bool {
|
||||
let mut len = 0;
|
||||
for payload in payloads.iter() {
|
||||
let mut payload_len = LengthCalculatingWriter(0);
|
||||
payload.write(&mut payload_len).expect("Failed to calculate length");
|
||||
assert!(payload_len.0 + 32 < ONION_DATA_LEN);
|
||||
len += payload_len.0 + 32;
|
||||
if len > ONION_DATA_LEN {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
|
||||
const ZERO:[u8; 21*65] = [0; 21*65];
|
||||
/// panics if route_size_insane(paylods)
|
||||
pub(super) fn construct_onion_packet(payloads: Vec<msgs::OnionHopData>, onion_keys: Vec<OnionKeys>, prng_seed: [u8; 32], associated_data: &PaymentHash) -> msgs::OnionPacket {
|
||||
let mut packet_data = [0; 20*65];
|
||||
let mut packet_data = [0; ONION_DATA_LEN];
|
||||
|
||||
let mut chacha = ChaCha20::new(&prng_seed, &[0; 8]);
|
||||
chacha.process(&[0; 20*65], &mut packet_data);
|
||||
chacha.process(&[0; ONION_DATA_LEN], &mut packet_data);
|
||||
|
||||
construct_onion_packet_with_init_noise(payloads, onion_keys, packet_data, associated_data)
|
||||
}
|
||||
|
||||
fn construct_onion_packet_with_init_noise(mut payloads: Vec<msgs::OnionHopData>, onion_keys: Vec<OnionKeys>, mut packet_data: [u8; 20*65], associated_data: &PaymentHash) -> msgs::OnionPacket {
|
||||
let mut buf = Vec::with_capacity(21*65);
|
||||
buf.resize(21*65, 0);
|
||||
#[cfg(test)]
|
||||
// Used in testing to write bogus OnionHopDatas, which is otherwise not representable in
|
||||
// msgs::OnionHopData.
|
||||
pub(super) fn construct_onion_packet_bogus_hopdata<HD: Writeable>(payloads: Vec<HD>, onion_keys: Vec<OnionKeys>, prng_seed: [u8; 32], associated_data: &PaymentHash) -> msgs::OnionPacket {
|
||||
let mut packet_data = [0; ONION_DATA_LEN];
|
||||
|
||||
let mut chacha = ChaCha20::new(&prng_seed, &[0; 8]);
|
||||
chacha.process(&[0; ONION_DATA_LEN], &mut packet_data);
|
||||
|
||||
construct_onion_packet_with_init_noise(payloads, onion_keys, packet_data, associated_data)
|
||||
}
|
||||
|
||||
/// panics if route_size_insane(paylods)
|
||||
fn construct_onion_packet_with_init_noise<HD: Writeable>(mut payloads: Vec<HD>, onion_keys: Vec<OnionKeys>, mut packet_data: [u8; ONION_DATA_LEN], associated_data: &PaymentHash) -> msgs::OnionPacket {
|
||||
let filler = {
|
||||
let iters = payloads.len() - 1;
|
||||
let end_len = iters * 65;
|
||||
let mut res = Vec::with_capacity(end_len);
|
||||
res.resize(end_len, 0);
|
||||
const ONION_HOP_DATA_LEN: usize = 65; // We may decrease this eventually after TLV is common
|
||||
let mut res = Vec::with_capacity(ONION_HOP_DATA_LEN * (payloads.len() - 1));
|
||||
|
||||
let mut pos = 0;
|
||||
for (i, (payload, keys)) in payloads.iter().zip(onion_keys.iter()).enumerate() {
|
||||
if i == payloads.len() - 1 { break; }
|
||||
|
||||
for (i, keys) in onion_keys.iter().enumerate() {
|
||||
if i == payloads.len() - 1 { continue; }
|
||||
let mut chacha = ChaCha20::new(&keys.rho, &[0u8; 8]);
|
||||
chacha.process(&ZERO, &mut buf); // We don't have a seek function :(
|
||||
xor_bufs(&mut res[0..(i + 1)*65], &buf[(20 - i)*65..21*65]);
|
||||
for _ in 0..(ONION_DATA_LEN - pos) { // TODO: Batch this.
|
||||
let mut dummy = [0; 1];
|
||||
chacha.process_in_place(&mut dummy); // We don't have a seek function :(
|
||||
}
|
||||
|
||||
let mut payload_len = LengthCalculatingWriter(0);
|
||||
payload.write(&mut payload_len).expect("Failed to calculate length");
|
||||
pos += payload_len.0 + 32;
|
||||
assert!(pos <= ONION_DATA_LEN);
|
||||
|
||||
res.resize(pos, 0u8);
|
||||
chacha.process_in_place(&mut res);
|
||||
}
|
||||
res
|
||||
};
|
||||
|
||||
let mut hmac_res = [0; 32];
|
||||
|
||||
for (i, (payload, keys)) in payloads.iter_mut().zip(onion_keys.iter()).rev().enumerate() {
|
||||
shift_arr_right(&mut packet_data);
|
||||
payload.hmac = hmac_res;
|
||||
packet_data[0..65].copy_from_slice(&payload.encode()[..]);
|
||||
let mut payload_len = LengthCalculatingWriter(0);
|
||||
payload.write(&mut payload_len).expect("Failed to calculate length");
|
||||
shift_arr_right(&mut packet_data, payload_len.0 + 32);
|
||||
packet_data[0..payload_len.0].copy_from_slice(&payload.encode()[..]);
|
||||
packet_data[payload_len.0..(payload_len.0 + 32)].copy_from_slice(&hmac_res);
|
||||
|
||||
let mut chacha = ChaCha20::new(&keys.rho, &[0u8; 8]);
|
||||
chacha.process(&packet_data, &mut buf[0..20*65]);
|
||||
packet_data[..].copy_from_slice(&buf[0..20*65]);
|
||||
chacha.process_in_place(&mut packet_data);
|
||||
|
||||
if i == 0 {
|
||||
packet_data[20*65 - filler.len()..20*65].copy_from_slice(&filler[..]);
|
||||
packet_data[ONION_DATA_LEN - filler.len()..ONION_DATA_LEN].copy_from_slice(&filler[..]);
|
||||
}
|
||||
|
||||
let mut hmac = HmacEngine::<Sha256>::new(&keys.mu);
|
||||
|
@ -212,7 +248,7 @@ fn construct_onion_packet_with_init_noise(mut payloads: Vec<msgs::OnionHopData>,
|
|||
hmac_res = Hmac::from_engine(hmac).into_inner();
|
||||
}
|
||||
|
||||
msgs::OnionPacket{
|
||||
msgs::OnionPacket {
|
||||
version: 0,
|
||||
public_key: Ok(onion_keys.first().unwrap().ephemeral_pubkey),
|
||||
hop_data: packet_data,
|
||||
|
@ -428,7 +464,7 @@ mod tests {
|
|||
use ln::features::{ChannelFeatures, NodeFeatures};
|
||||
use ln::router::{Route, RouteHop};
|
||||
use ln::msgs;
|
||||
use util::ser::Writeable;
|
||||
use util::ser::{Writeable, Writer};
|
||||
|
||||
use hex;
|
||||
|
||||
|
@ -480,7 +516,7 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn onion_vectors() {
|
||||
// Packet creation test vectors from BOLT 4
|
||||
// Legacy packet creation test vectors from BOLT 4
|
||||
let onion_keys = build_test_onion_keys();
|
||||
|
||||
assert_eq!(onion_keys[0].shared_secret[..], hex::decode("53eb63ea8a3fec3b3cd433b85cd62a4b145e1dda09391b348c4e1cd36a03ea66").unwrap()[..]);
|
||||
|
@ -516,54 +552,43 @@ mod tests {
|
|||
// Test vectors below are flat-out wrong: they claim to set outgoing_cltv_value to non-0 :/
|
||||
let payloads = vec!(
|
||||
msgs::OnionHopData {
|
||||
realm: 0,
|
||||
data: msgs::OnionRealm0HopData {
|
||||
format: msgs::OnionHopDataFormat::Legacy {
|
||||
short_channel_id: 0,
|
||||
amt_to_forward: 0,
|
||||
outgoing_cltv_value: 0,
|
||||
},
|
||||
hmac: [0; 32],
|
||||
amt_to_forward: 0,
|
||||
outgoing_cltv_value: 0,
|
||||
},
|
||||
msgs::OnionHopData {
|
||||
realm: 0,
|
||||
data: msgs::OnionRealm0HopData {
|
||||
format: msgs::OnionHopDataFormat::Legacy {
|
||||
short_channel_id: 0x0101010101010101,
|
||||
amt_to_forward: 0x0100000001,
|
||||
outgoing_cltv_value: 0,
|
||||
},
|
||||
hmac: [0; 32],
|
||||
amt_to_forward: 0x0100000001,
|
||||
outgoing_cltv_value: 0,
|
||||
},
|
||||
msgs::OnionHopData {
|
||||
realm: 0,
|
||||
data: msgs::OnionRealm0HopData {
|
||||
format: msgs::OnionHopDataFormat::Legacy {
|
||||
short_channel_id: 0x0202020202020202,
|
||||
amt_to_forward: 0x0200000002,
|
||||
outgoing_cltv_value: 0,
|
||||
},
|
||||
hmac: [0; 32],
|
||||
amt_to_forward: 0x0200000002,
|
||||
outgoing_cltv_value: 0,
|
||||
},
|
||||
msgs::OnionHopData {
|
||||
realm: 0,
|
||||
data: msgs::OnionRealm0HopData {
|
||||
format: msgs::OnionHopDataFormat::Legacy {
|
||||
short_channel_id: 0x0303030303030303,
|
||||
amt_to_forward: 0x0300000003,
|
||||
outgoing_cltv_value: 0,
|
||||
},
|
||||
hmac: [0; 32],
|
||||
amt_to_forward: 0x0300000003,
|
||||
outgoing_cltv_value: 0,
|
||||
},
|
||||
msgs::OnionHopData {
|
||||
realm: 0,
|
||||
data: msgs::OnionRealm0HopData {
|
||||
format: msgs::OnionHopDataFormat::Legacy {
|
||||
short_channel_id: 0x0404040404040404,
|
||||
amt_to_forward: 0x0400000004,
|
||||
outgoing_cltv_value: 0,
|
||||
},
|
||||
hmac: [0; 32],
|
||||
amt_to_forward: 0x0400000004,
|
||||
outgoing_cltv_value: 0,
|
||||
},
|
||||
);
|
||||
|
||||
let packet = [0; 20*65];
|
||||
let packet = super::construct_onion_packet_with_init_noise(payloads, onion_keys, packet, &PaymentHash([0x42; 32]));
|
||||
let packet = super::construct_onion_packet_with_init_noise(payloads, onion_keys, [0; super::ONION_DATA_LEN], &PaymentHash([0x42; 32]));
|
||||
// Just check the final packet encoding, as it includes all the per-hop vectors in it
|
||||
// anyway...
|
||||
assert_eq!(packet.encode(), hex::decode("0002eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619e5f14350c2a76fc232b5e46d421e9615471ab9e0bc887beff8c95fdb878f7b3a716a996c7845c93d90e4ecbb9bde4ece2f69425c99e4bc820e44485455f135edc0d10f7d61ab590531cf08000179a333a347f8b4072f216400406bdf3bf038659793d4a1fd7b246979e3150a0a4cb052c9ec69acf0f48c3d39cd55675fe717cb7d80ce721caad69320c3a469a202f1e468c67eaf7a7cd8226d0fd32f7b48084dca885d56047694762b67021713ca673929c163ec36e04e40ca8e1c6d17569419d3039d9a1ec866abe044a9ad635778b961fc0776dc832b3a451bd5d35072d2269cf9b040f6b7a7dad84fb114ed413b1426cb96ceaf83825665ed5a1d002c1687f92465b49ed4c7f0218ff8c6c7dd7221d589c65b3b9aaa71a41484b122846c7c7b57e02e679ea8469b70e14fe4f70fee4d87b910cf144be6fe48eef24da475c0b0bcc6565ae82cd3f4e3b24c76eaa5616c6111343306ab35c1fe5ca4a77c0e314ed7dba39d6f1e0de791719c241a939cc493bea2bae1c1e932679ea94d29084278513c77b899cc98059d06a27d171b0dbdf6bee13ddc4fc17a0c4d2827d488436b57baa167544138ca2e64a11b43ac8a06cd0c2fba2d4d900ed2d9205305e2d7383cc98dacb078133de5f6fb6bed2ef26ba92cea28aafc3b9948dd9ae5559e8bd6920b8cea462aa445ca6a95e0e7ba52961b181c79e73bd581821df2b10173727a810c92b83b5ba4a0403eb710d2ca10689a35bec6c3a708e9e92f7d78ff3c5d9989574b00c6736f84c199256e76e19e78f0c98a9d580b4a658c84fc8f2096c2fbea8f5f8c59d0fdacb3be2802ef802abbecb3aba4acaac69a0e965abd8981e9896b1f6ef9d60f7a164b371af869fd0e48073742825e9434fc54da837e120266d53302954843538ea7c6c3dbfb4ff3b2fdbe244437f2a153ccf7bdb4c92aa08102d4f3cff2ae5ef86fab4653595e6a5837fa2f3e29f27a9cde5966843fb847a4a61f1e76c281fe8bb2b0a181d096100db5a1a5ce7a910238251a43ca556712eaadea167fb4d7d75825e440f3ecd782036d7574df8bceacb397abefc5f5254d2722215c53ff54af8299aaaad642c6d72a14d27882d9bbd539e1cc7a527526ba89b8c037ad09120e98ab042d3e8652b31ae0e478516bfaf88efca9f3676ffe99d2819dcaeb7610a626695f53117665d267d3f7abebd6bbd6733f645c72c389f03855bdf1e4b8075b516569b118233a0f0971d24b83113c0b096f5216a207ca99a7cddc81c130923fe3d91e7508c9ac5f2e914ff5dccab9e558566fa14efb34ac98d878580814b94b73acbfde9072f30b881f7f0fff42d4045d1ace6322d86a97d164aa84d93a60498065cc7c20e636f5862dc81531a88c60305a2e59a985be327a6902e4bed986dbf4a0b50c217af0ea7fdf9ab37f9ea1a1aaa72f54cf40154ea9b269f1a7c09f9f43245109431a175d50e2db0132337baa0ef97eed0fcf20489da36b79a1172faccc2f7ded7c60e00694282d93359c4682135642bc81f433574aa8ef0c97b4ade7ca372c5ffc23c7eddd839bab4e0f14d6df15c9dbeab176bec8b5701cf054eb3072f6dadc98f88819042bf10c407516ee58bce33fbe3b3d86a54255e577db4598e30a135361528c101683a5fcde7e8ba53f3456254be8f45fe3a56120ae96ea3773631fcb3873aa3abd91bcff00bd38bd43697a2e789e00da6077482e7b1b1a677b5afae4c54e6cbdf7377b694eb7d7a5b913476a5be923322d3de06060fd5e819635232a2cf4f0731da13b8546d1d6d4f8d75b9fce6c2341a71b0ea6f780df54bfdb0dd5cd9855179f602f9172307c7268724c3618e6817abd793adc214a0dc0bc616816632f27ea336fb56dfd").unwrap());
|
||||
|
@ -592,4 +617,59 @@ mod tests {
|
|||
let onion_packet_5 = super::encrypt_failure_packet(&onion_keys[0].shared_secret[..], &onion_packet_4.data[..]);
|
||||
assert_eq!(onion_packet_5.data, hex::decode("9c5add3963fc7f6ed7f148623c84134b5647e1306419dbe2174e523fa9e2fbed3a06a19f899145610741c83ad40b7712aefaddec8c6baf7325d92ea4ca4d1df8bce517f7e54554608bf2bd8071a4f52a7a2f7ffbb1413edad81eeea5785aa9d990f2865dc23b4bc3c301a94eec4eabebca66be5cf638f693ec256aec514620cc28ee4a94bd9565bc4d4962b9d3641d4278fb319ed2b84de5b665f307a2db0f7fbb757366067d88c50f7e829138fde4f78d39b5b5802f1b92a8a820865af5cc79f9f30bc3f461c66af95d13e5e1f0381c184572a91dee1c849048a647a1158cf884064deddbf1b0b88dfe2f791428d0ba0f6fb2f04e14081f69165ae66d9297c118f0907705c9c4954a199bae0bb96fad763d690e7daa6cfda59ba7f2c8d11448b604d12d").unwrap());
|
||||
}
|
||||
|
||||
struct RawOnionHopData {
|
||||
data: Vec<u8>
|
||||
}
|
||||
impl RawOnionHopData {
|
||||
fn new(orig: msgs::OnionHopData) -> Self {
|
||||
Self { data: orig.encode() }
|
||||
}
|
||||
}
|
||||
impl Writeable for RawOnionHopData {
|
||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
|
||||
writer.write_all(&self.data[..])
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn variable_length_onion_vectors() {
|
||||
// Packet creation test vectors from BOLT 4 (as of this writing at
|
||||
// bolt04/onion-test-multi-frame.json in the spec repo).
|
||||
// Note that we use he RawOnionHopData for everything except Legacy hops, as even the hops
|
||||
// with "type": "tlv" are not valid TLV (they were for a previous version of TLV that
|
||||
// didn't move forward), and, thus, cannot be directly represented in our in-memory enums.
|
||||
let onion_keys = build_test_onion_keys();
|
||||
|
||||
let payloads = vec!(
|
||||
RawOnionHopData::new(msgs::OnionHopData {
|
||||
format: msgs::OnionHopDataFormat::Legacy {
|
||||
short_channel_id: 0,
|
||||
},
|
||||
amt_to_forward: 0,
|
||||
outgoing_cltv_value: 0,
|
||||
}),
|
||||
RawOnionHopData {
|
||||
data: hex::decode("140101010101010101000000000000000100000001").unwrap(),
|
||||
},
|
||||
RawOnionHopData {
|
||||
data: hex::decode("fd0100000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeafb0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff").unwrap(),
|
||||
},
|
||||
RawOnionHopData {
|
||||
data: hex::decode("140303030303030303000000000000000300000003").unwrap(),
|
||||
},
|
||||
RawOnionHopData::new(msgs::OnionHopData {
|
||||
format: msgs::OnionHopDataFormat::Legacy {
|
||||
short_channel_id: 0x0404040404040404,
|
||||
},
|
||||
amt_to_forward: 4,
|
||||
outgoing_cltv_value: 4,
|
||||
}),
|
||||
);
|
||||
|
||||
let packet = super::construct_onion_packet_with_init_noise(payloads, onion_keys, [0; super::ONION_DATA_LEN], &PaymentHash([0x42; 32]));
|
||||
// Just check the final packet encoding, as it includes all the per-hop vectors in it
|
||||
// anyway...
|
||||
assert_eq!(packet.encode(), hex::decode("0002eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619e5f14350c2a76fc232b5e46d421e9615471ab9e0bc887beff8c95fdb878f7b3a71a060daf367132b378b3a3883c0e2c0e026b8900b2b5cdbc784e1a3bb913f88a9c50f7d61ab590531cf08000178a333a347f8b4072ed056f820f77774345e183a342ec4729f3d84accf515e88adddb85ecc08daba68404bae9a8e8d7178977d7094a1ae549f89338c0777551f874159eb42d3a59fb9285ad4e24883f27de23942ec966611e99bee1cee503455be9e8e642cef6cef7b9864130f692283f8a973d47a8f1c1726b6e59969385975c766e35737c8d76388b64f748ee7943ffb0e2ee45c57a1abc40762ae598723d21bd184e2b338f68ebff47219357bd19cd7e01e2337b806ef4d717888e129e59cd3dc31e6201ccb2fd6d7499836f37a993262468bcb3a4dcd03a22818aca49c6b7b9b8e9e870045631d8e039b066ff86e0d1b7291f71cefa7264c70404a8e538b566c17ccc5feab231401e6c08a01bd5edfc1aa8e3e533b96e82d1f91118d508924b923531929aea889fcdf050597c681185f336b1da63b0939aa2b7c50b21b5eb7b6ad66c81fab98a3cdf73f658149e7e9ced4edde5d38c9b8f92e16f6b4ab13d7fca6a0e4ecc9f9de611a90da6e99c39551094c56e3196f282c5dffd9fc4b2fc12f3bca8e6fe47eb45fbdd3be21a8a8d200797eae3c9a0497132f92410d804977408494dff49dd3d8bce248e0b74fd9e6f0f7102c25ddfa02bd9ad9f746abbfa337ef811d5345a9e16b60de1767b209645ba40bd1f9a5f75bc04feca9b27c5554be4fe83fac2cb83aa447a817bb85ae966c68b420063833fada375e2f515965e687a45699632902672c654d1d18d7bcbf55e8fa57f63f2da449f8e1e606e8722df081e5f193fc4179feb99ad22819afdeef211f7c54afdba92aeef0c00b7bc2b65a4813c01f907a8377585708f2d4c940a25328e585714c8ded0a9a4d7a6de1027c1cb7a0198cd3db68b58c0704dfd0cfbe624e9cd18cc0ae5d96697bb476708b9ee0403d211e64e0d5a7683a7a9a140c02f0ff1c6e67a302941b4052bdea8a63e70a3ad62c5b89c698f1fd3c7685cb49705096cad702d02d93bcb1c27a409f4c9bddec001205ca4a2740f19b50900be81c7e847f1a863deea8d35701f1355cad8db57b1d4eb2ab4e29587734785abfb46ddede71928213d7d089dfdeda052827f459f1688cc0935bd47e7bcec27427c8376dcce7e22699567c0d145f8a7db33f6758815f1f15f9f7a9760dec4f34ae095edda4c64e9735bdd029c4e32c2ee31ba47ec5e6bdb97813d52dbd15b4e0b7a2c7f790ae64104d99f38c127f0a093288fa34144adb16b8968d4fa7656fcec99de8503dd46d3b03620a71c7cd085364abd30dccf7fbda25a1cdc102600149c9af1c97aa0372cd2e1909f28ac5c686f432b310e79528c9b8b9e8f314c1e74621ce6308ad2278b81d460892e0d9dd38b7c76d58be6dfd10ae7583ee1e7ef5b3f6f78dc60af0950df1b00cc55b6d178ba2e476bea0eaeef49323b83f05804159e7aef4eed4cc60dd07be76f067dfd0bcfb0b806b69ba921336a20c43c832d0cab8fa3ddeb29e3bf07b0d98a112eb07802756235a49d44a8b82a950d84e95e01971f0e106ccb337f07384e21620e0ad39e16ed9edca123226cf55ac44f449eeb53e38a7f27d101806e4823e4efcc887414240ee6826c4a5cb1c6443ad36ebf905a435c1d9054e54173911b17b5b40f60b3d9fd5f12eac54ca1e20191f5f18544d5fd3d665e9bcef96fb44b76110aa64d9db4c86c9513cbdad546538e8aec521fbe83ceac5e74a15629f1ed0b870a1d0d1e5680b6d6100d1bd3f3b9043bd35b8919c4088f1949b8be89e4701eb870f8ed64fafa446c78df3ea").unwrap());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -12,13 +12,13 @@ use ln::features::InitFeatures;
|
|||
use ln::msgs;
|
||||
use ln::msgs::ChannelMessageHandler;
|
||||
use ln::channelmanager::{SimpleArcChannelManager, SimpleRefChannelManager};
|
||||
use util::ser::VecWriter;
|
||||
use ln::peer_channel_encryptor::{PeerChannelEncryptor,NextNoiseStep};
|
||||
use ln::wire;
|
||||
use ln::wire::Encode;
|
||||
use util::byte_utils;
|
||||
use util::events::{MessageSendEvent, MessageSendEventsProvider};
|
||||
use util::logger::Logger;
|
||||
use util::ser::Writer;
|
||||
|
||||
use std::collections::{HashMap,hash_map,HashSet,LinkedList};
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
@ -192,17 +192,6 @@ pub struct PeerManager<Descriptor: SocketDescriptor, CM: Deref> where CM::Target
|
|||
logger: Arc<Logger>,
|
||||
}
|
||||
|
||||
struct VecWriter(Vec<u8>);
|
||||
impl Writer for VecWriter {
|
||||
fn write_all(&mut self, buf: &[u8]) -> Result<(), ::std::io::Error> {
|
||||
self.0.extend_from_slice(buf);
|
||||
Ok(())
|
||||
}
|
||||
fn size_hint(&mut self, size: usize) {
|
||||
self.0.reserve_exact(size);
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! encode_msg {
|
||||
($msg: expr) => {{
|
||||
let mut buffer = VecWriter(Vec::new());
|
||||
|
|
|
@ -9,6 +9,8 @@
|
|||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
use std::io;
|
||||
|
||||
#[cfg(not(feature = "fuzztarget"))]
|
||||
mod real_chacha {
|
||||
use std::cmp;
|
||||
|
@ -54,6 +56,8 @@ mod real_chacha {
|
|||
}
|
||||
}
|
||||
|
||||
const BLOCK_SIZE: usize = 64;
|
||||
|
||||
#[derive(Clone,Copy)]
|
||||
struct ChaChaState {
|
||||
a: u32x4,
|
||||
|
@ -65,7 +69,7 @@ mod real_chacha {
|
|||
#[derive(Copy)]
|
||||
pub struct ChaCha20 {
|
||||
state : ChaChaState,
|
||||
output : [u8; 64],
|
||||
output : [u8; BLOCK_SIZE],
|
||||
offset : usize,
|
||||
}
|
||||
|
||||
|
@ -133,7 +137,7 @@ mod real_chacha {
|
|||
assert!(key.len() == 16 || key.len() == 32);
|
||||
assert!(nonce.len() == 8 || nonce.len() == 12);
|
||||
|
||||
ChaCha20{ state: ChaCha20::expand(key, nonce), output: [0u8; 64], offset: 64 }
|
||||
ChaCha20{ state: ChaCha20::expand(key, nonce), output: [0u8; BLOCK_SIZE], offset: 64 }
|
||||
}
|
||||
|
||||
fn expand(key: &[u8], nonce: &[u8]) -> ChaChaState {
|
||||
|
@ -195,7 +199,7 @@ mod real_chacha {
|
|||
}
|
||||
}
|
||||
|
||||
// put the the next 64 keystream bytes into self.output
|
||||
// put the the next BLOCK_SIZE keystream bytes into self.output
|
||||
fn update(&mut self) {
|
||||
let mut state = self.state;
|
||||
|
||||
|
@ -232,12 +236,12 @@ mod real_chacha {
|
|||
while i < len {
|
||||
// If there is no keystream available in the output buffer,
|
||||
// generate the next block.
|
||||
if self.offset == 64 {
|
||||
if self.offset == BLOCK_SIZE {
|
||||
self.update();
|
||||
}
|
||||
|
||||
// Process the min(available keystream, remaining input length).
|
||||
let count = cmp::min(64 - self.offset, len - i);
|
||||
let count = cmp::min(BLOCK_SIZE - self.offset, len - i);
|
||||
// explicitly assert lengths to avoid bounds checks:
|
||||
assert!(output.len() >= i + count);
|
||||
assert!(input.len() >= i + count);
|
||||
|
@ -249,6 +253,29 @@ mod real_chacha {
|
|||
self.offset += count;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn process_in_place(&mut self, input_output: &mut [u8]) {
|
||||
let len = input_output.len();
|
||||
let mut i = 0;
|
||||
while i < len {
|
||||
// If there is no keystream available in the output buffer,
|
||||
// generate the next block.
|
||||
if self.offset == BLOCK_SIZE {
|
||||
self.update();
|
||||
}
|
||||
|
||||
// Process the min(available keystream, remaining input length).
|
||||
let count = cmp::min(BLOCK_SIZE - self.offset, len - i);
|
||||
// explicitly assert lengths to avoid bounds checks:
|
||||
assert!(input_output.len() >= i + count);
|
||||
assert!(self.output.len() >= self.offset + count);
|
||||
for j in 0..count {
|
||||
input_output[i + j] ^= self.output[self.offset + j];
|
||||
}
|
||||
i += count;
|
||||
self.offset += count;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#[cfg(not(feature = "fuzztarget"))]
|
||||
|
@ -268,11 +295,27 @@ mod fuzzy_chacha {
|
|||
pub fn process(&mut self, input: &[u8], output: &mut [u8]) {
|
||||
output.copy_from_slice(input);
|
||||
}
|
||||
|
||||
pub fn process_in_place(&mut self, _input_output: &mut [u8]) {}
|
||||
}
|
||||
}
|
||||
#[cfg(feature = "fuzztarget")]
|
||||
pub use self::fuzzy_chacha::ChaCha20;
|
||||
|
||||
pub(crate) struct ChaChaReader<'a, R: io::Read> {
|
||||
pub chacha: &'a mut ChaCha20,
|
||||
pub read: R,
|
||||
}
|
||||
impl<'a, R: io::Read> io::Read for ChaChaReader<'a, R> {
|
||||
fn read(&mut self, dest: &mut [u8]) -> Result<usize, io::Error> {
|
||||
let res = self.read.read(dest)?;
|
||||
if res > 0 {
|
||||
self.chacha.process_in_place(&mut dest[0..res]);
|
||||
}
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use std::iter::repeat;
|
||||
|
|
|
@ -6,6 +6,7 @@ use std::io::{Read, Write};
|
|||
use std::collections::HashMap;
|
||||
use std::hash::Hash;
|
||||
use std::sync::Mutex;
|
||||
use std::cmp;
|
||||
|
||||
use secp256k1::Signature;
|
||||
use secp256k1::key::{PublicKey, SecretKey};
|
||||
|
@ -56,7 +57,7 @@ impl<'a, W: Writer + 'a> Write for WriterWriteAdaptor<'a, W> {
|
|||
}
|
||||
}
|
||||
|
||||
struct VecWriter(Vec<u8>);
|
||||
pub(crate) struct VecWriter(pub Vec<u8>);
|
||||
impl Writer for VecWriter {
|
||||
fn write_all(&mut self, buf: &[u8]) -> Result<(), ::std::io::Error> {
|
||||
self.0.extend_from_slice(buf);
|
||||
|
@ -67,6 +68,85 @@ impl Writer for VecWriter {
|
|||
}
|
||||
}
|
||||
|
||||
/// Writer that only tracks the amount of data written - useful if you need to calculate the length
|
||||
/// of some data when serialized but don't yet need the full data.
|
||||
pub(crate) struct LengthCalculatingWriter(pub usize);
|
||||
impl Writer for LengthCalculatingWriter {
|
||||
#[inline]
|
||||
fn write_all(&mut self, buf: &[u8]) -> Result<(), ::std::io::Error> {
|
||||
self.0 += buf.len();
|
||||
Ok(())
|
||||
}
|
||||
#[inline]
|
||||
fn size_hint(&mut self, _size: usize) {}
|
||||
}
|
||||
|
||||
/// Essentially std::io::Take but a bit simpler and with a method to walk the underlying stream
|
||||
/// forward to ensure we always consume exactly the fixed length specified.
|
||||
pub(crate) struct FixedLengthReader<R: Read> {
|
||||
read: R,
|
||||
bytes_read: u64,
|
||||
total_bytes: u64,
|
||||
}
|
||||
impl<R: Read> FixedLengthReader<R> {
|
||||
pub fn new(read: R, total_bytes: u64) -> Self {
|
||||
Self { read, bytes_read: 0, total_bytes }
|
||||
}
|
||||
|
||||
pub fn bytes_remain(&mut self) -> bool {
|
||||
self.bytes_read != self.total_bytes
|
||||
}
|
||||
|
||||
pub fn eat_remaining(&mut self) -> Result<(), DecodeError> {
|
||||
::std::io::copy(self, &mut ::std::io::sink()).unwrap();
|
||||
if self.bytes_read != self.total_bytes {
|
||||
Err(DecodeError::ShortRead)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
impl<R: Read> Read for FixedLengthReader<R> {
|
||||
fn read(&mut self, dest: &mut [u8]) -> Result<usize, ::std::io::Error> {
|
||||
if self.total_bytes == self.bytes_read {
|
||||
Ok(0)
|
||||
} else {
|
||||
let read_len = cmp::min(dest.len() as u64, self.total_bytes - self.bytes_read);
|
||||
match self.read.read(&mut dest[0..(read_len as usize)]) {
|
||||
Ok(v) => {
|
||||
self.bytes_read += v as u64;
|
||||
Ok(v)
|
||||
},
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A Read which tracks whether any bytes have been read at all. This allows us to distinguish
|
||||
/// between "EOF reached before we started" and "EOF reached mid-read".
|
||||
pub(crate) struct ReadTrackingReader<R: Read> {
|
||||
read: R,
|
||||
pub have_read: bool,
|
||||
}
|
||||
impl<R: Read> ReadTrackingReader<R> {
|
||||
pub fn new(read: R) -> Self {
|
||||
Self { read, have_read: false }
|
||||
}
|
||||
}
|
||||
impl<R: Read> Read for ReadTrackingReader<R> {
|
||||
fn read(&mut self, dest: &mut [u8]) -> Result<usize, ::std::io::Error> {
|
||||
match self.read.read(dest) {
|
||||
Ok(0) => Ok(0),
|
||||
Ok(len) => {
|
||||
self.have_read = true;
|
||||
Ok(len)
|
||||
},
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A trait that various rust-lightning types implement allowing them to be written out to a Writer
|
||||
pub trait Writeable {
|
||||
/// Writes self out to the given Writer
|
||||
|
@ -125,6 +205,76 @@ impl<R: Read> Readable<R> for U48 {
|
|||
}
|
||||
}
|
||||
|
||||
/// Lightning TLV uses a custom variable-length integer called BigSize. It is similar to Bitcoin's
|
||||
/// variable-length integers except that it is serialized in big-endian instead of little-endian.
|
||||
///
|
||||
/// Like Bitcoin's variable-length integer, it exhibits ambiguity in that certain values can be
|
||||
/// encoded in several different ways, which we must check for at deserialization-time. Thus, if
|
||||
/// you're looking for an example of a variable-length integer to use for your own project, move
|
||||
/// along, this is a rather poor design.
|
||||
pub(crate) struct BigSize(pub u64);
|
||||
impl Writeable for BigSize {
|
||||
#[inline]
|
||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
|
||||
match self.0 {
|
||||
0...0xFC => {
|
||||
(self.0 as u8).write(writer)
|
||||
},
|
||||
0xFD...0xFFFF => {
|
||||
0xFDu8.write(writer)?;
|
||||
(self.0 as u16).write(writer)
|
||||
},
|
||||
0x10000...0xFFFFFFFF => {
|
||||
0xFEu8.write(writer)?;
|
||||
(self.0 as u32).write(writer)
|
||||
},
|
||||
_ => {
|
||||
0xFFu8.write(writer)?;
|
||||
(self.0 as u64).write(writer)
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
impl<R: Read> Readable<R> for BigSize {
|
||||
#[inline]
|
||||
fn read(reader: &mut R) -> Result<BigSize, DecodeError> {
|
||||
let n: u8 = Readable::read(reader)?;
|
||||
match n {
|
||||
0xFF => {
|
||||
let x: u64 = Readable::read(reader)?;
|
||||
if x < 0x100000000 {
|
||||
Err(DecodeError::InvalidValue)
|
||||
} else {
|
||||
Ok(BigSize(x))
|
||||
}
|
||||
}
|
||||
0xFE => {
|
||||
let x: u32 = Readable::read(reader)?;
|
||||
if x < 0x10000 {
|
||||
Err(DecodeError::InvalidValue)
|
||||
} else {
|
||||
Ok(BigSize(x as u64))
|
||||
}
|
||||
}
|
||||
0xFD => {
|
||||
let x: u16 = Readable::read(reader)?;
|
||||
if x < 0xFD {
|
||||
Err(DecodeError::InvalidValue)
|
||||
} else {
|
||||
Ok(BigSize(x as u64))
|
||||
}
|
||||
}
|
||||
n => Ok(BigSize(n as u64))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// In TLV we occasionally send fields which only consist of, or potentially end with, a
|
||||
/// variable-length integer which is simply truncated by skipping high zero bytes. This type
|
||||
/// encapsulates such integers implementing Readable/Writeable for them.
|
||||
#[cfg_attr(test, derive(PartialEq, Debug))]
|
||||
pub(crate) struct HighZeroBytesDroppedVarInt<T>(pub T);
|
||||
|
||||
macro_rules! impl_writeable_primitive {
|
||||
($val_type:ty, $meth_write:ident, $len: expr, $meth_read:ident) => {
|
||||
impl Writeable for $val_type {
|
||||
|
@ -133,6 +283,13 @@ macro_rules! impl_writeable_primitive {
|
|||
writer.write_all(&$meth_write(*self))
|
||||
}
|
||||
}
|
||||
impl Writeable for HighZeroBytesDroppedVarInt<$val_type> {
|
||||
#[inline]
|
||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
|
||||
// Skip any full leading 0 bytes when writing (in BE):
|
||||
writer.write_all(&$meth_write(self.0)[(self.0.leading_zeros()/8) as usize..$len])
|
||||
}
|
||||
}
|
||||
impl<R: Read> Readable<R> for $val_type {
|
||||
#[inline]
|
||||
fn read(reader: &mut R) -> Result<$val_type, DecodeError> {
|
||||
|
@ -141,6 +298,30 @@ macro_rules! impl_writeable_primitive {
|
|||
Ok($meth_read(&buf))
|
||||
}
|
||||
}
|
||||
impl<R: Read> Readable<R> for HighZeroBytesDroppedVarInt<$val_type> {
|
||||
#[inline]
|
||||
fn read(reader: &mut R) -> Result<HighZeroBytesDroppedVarInt<$val_type>, DecodeError> {
|
||||
// We need to accept short reads (read_len == 0) as "EOF" and handle them as simply
|
||||
// the high bytes being dropped. To do so, we start reading into the middle of buf
|
||||
// and then convert the appropriate number of bytes with extra high bytes out of
|
||||
// buf.
|
||||
let mut buf = [0; $len*2];
|
||||
let mut read_len = reader.read(&mut buf[$len..])?;
|
||||
let mut total_read_len = read_len;
|
||||
while read_len != 0 && total_read_len != $len {
|
||||
read_len = reader.read(&mut buf[($len + total_read_len)..])?;
|
||||
total_read_len += read_len;
|
||||
}
|
||||
if total_read_len == 0 || buf[$len] != 0 {
|
||||
let first_byte = $len - ($len - total_read_len);
|
||||
Ok(HighZeroBytesDroppedVarInt($meth_read(&buf[first_byte..first_byte + $len])))
|
||||
} else {
|
||||
// If the encoding had extra zero bytes, return a failure even though we know
|
||||
// what they meant (as the TLV test vectors require this)
|
||||
Err(DecodeError::InvalidValue)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,3 +1,109 @@
|
|||
macro_rules! encode_tlv {
|
||||
($stream: expr, {$(($type: expr, $field: expr)),*}) => { {
|
||||
use util::ser::{BigSize, LengthCalculatingWriter};
|
||||
$(
|
||||
BigSize($type).write($stream)?;
|
||||
let mut len_calc = LengthCalculatingWriter(0);
|
||||
$field.write(&mut len_calc)?;
|
||||
BigSize(len_calc.0 as u64).write($stream)?;
|
||||
$field.write($stream)?;
|
||||
)*
|
||||
} }
|
||||
}
|
||||
|
||||
macro_rules! encode_varint_length_prefixed_tlv {
|
||||
($stream: expr, {$(($type: expr, $field: expr)),*}) => { {
|
||||
use util::ser::{BigSize, LengthCalculatingWriter};
|
||||
let mut len = LengthCalculatingWriter(0);
|
||||
{
|
||||
$(
|
||||
BigSize($type).write(&mut len)?;
|
||||
let mut field_len = LengthCalculatingWriter(0);
|
||||
$field.write(&mut field_len)?;
|
||||
BigSize(field_len.0 as u64).write(&mut len)?;
|
||||
len.0 += field_len.0;
|
||||
)*
|
||||
}
|
||||
|
||||
BigSize(len.0 as u64).write($stream)?;
|
||||
encode_tlv!($stream, {
|
||||
$(($type, $field)),*
|
||||
});
|
||||
} }
|
||||
}
|
||||
|
||||
macro_rules! decode_tlv {
|
||||
($stream: expr, {$(($reqtype: expr, $reqfield: ident)),*}, {$(($type: expr, $field: ident)),*}) => { {
|
||||
use ln::msgs::DecodeError;
|
||||
let mut last_seen_type: Option<u64> = None;
|
||||
'tlv_read: loop {
|
||||
use util::ser;
|
||||
|
||||
// First decode the type of this TLV:
|
||||
let typ: ser::BigSize = {
|
||||
// We track whether any bytes were read during the consensus_decode call to
|
||||
// determine whether we should break or return ShortRead if we get an
|
||||
// UnexpectedEof. This should in every case be largely cosmetic, but its nice to
|
||||
// pass the TLV test vectors exactly, which requre this distinction.
|
||||
let mut tracking_reader = ser::ReadTrackingReader::new($stream);
|
||||
match ser::Readable::read(&mut tracking_reader) {
|
||||
Err(DecodeError::ShortRead) => {
|
||||
if !tracking_reader.have_read {
|
||||
break 'tlv_read
|
||||
} else {
|
||||
Err(DecodeError::ShortRead)?
|
||||
}
|
||||
},
|
||||
Err(e) => Err(e)?,
|
||||
Ok(t) => t,
|
||||
}
|
||||
};
|
||||
|
||||
// Types must be unique and monotonically increasing:
|
||||
match last_seen_type {
|
||||
Some(t) if typ.0 <= t => {
|
||||
Err(DecodeError::InvalidValue)?
|
||||
},
|
||||
_ => {},
|
||||
}
|
||||
// As we read types, make sure we hit every required type:
|
||||
$(if (last_seen_type.is_none() || last_seen_type.unwrap() < $reqtype) && typ.0 > $reqtype {
|
||||
Err(DecodeError::InvalidValue)?
|
||||
})*
|
||||
last_seen_type = Some(typ.0);
|
||||
|
||||
// Finally, read the length and value itself:
|
||||
let length: ser::BigSize = Readable::read($stream)?;
|
||||
let mut s = ser::FixedLengthReader::new($stream, length.0);
|
||||
match typ.0 {
|
||||
$($reqtype => {
|
||||
$reqfield = ser::Readable::read(&mut s)?;
|
||||
if s.bytes_remain() {
|
||||
s.eat_remaining()?; // Return ShortRead if there's actually not enough bytes
|
||||
Err(DecodeError::InvalidValue)?
|
||||
}
|
||||
},)*
|
||||
$($type => {
|
||||
$field = Some(ser::Readable::read(&mut s)?);
|
||||
if s.bytes_remain() {
|
||||
s.eat_remaining()?; // Return ShortRead if there's actually not enough bytes
|
||||
Err(DecodeError::InvalidValue)?
|
||||
}
|
||||
},)*
|
||||
x if x % 2 == 0 => {
|
||||
Err(DecodeError::UnknownRequiredFeature)?
|
||||
},
|
||||
_ => {},
|
||||
}
|
||||
s.eat_remaining()?;
|
||||
}
|
||||
// Make sure we got to each required type after we've read every TLV:
|
||||
$(if last_seen_type.is_none() || last_seen_type.unwrap() < $reqtype {
|
||||
Err(DecodeError::InvalidValue)?
|
||||
})*
|
||||
} }
|
||||
}
|
||||
|
||||
macro_rules! impl_writeable {
|
||||
($st:ident, $len: expr, {$($field:ident),*}) => {
|
||||
impl ::util::ser::Writeable for $st {
|
||||
|
@ -40,3 +146,224 @@ macro_rules! impl_writeable_len_match {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::io::{Cursor, Read};
|
||||
use ln::msgs::DecodeError;
|
||||
use util::ser::{Readable, Writeable, HighZeroBytesDroppedVarInt, VecWriter};
|
||||
use secp256k1::PublicKey;
|
||||
|
||||
// The BOLT TLV test cases don't include any tests which use our "required-value" logic since
|
||||
// the encoding layer in the BOLTs has no such concept, though it makes our macros easier to
|
||||
// work with so they're baked into the decoder. Thus, we have a few additional tests below
|
||||
fn tlv_reader(s: &[u8]) -> Result<(u64, u32, Option<u32>), DecodeError> {
|
||||
let mut s = Cursor::new(s);
|
||||
let mut a: u64 = 0;
|
||||
let mut b: u32 = 0;
|
||||
let mut c: Option<u32> = None;
|
||||
decode_tlv!(&mut s, {(2, a), (3, b)}, {(4, c)});
|
||||
Ok((a, b, c))
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn tlv_v_short_read() {
|
||||
// We only expect a u32 for type 3 (which we are given), but the L says its 8 bytes.
|
||||
if let Err(DecodeError::ShortRead) = tlv_reader(&::hex::decode(
|
||||
concat!("0100", "0208deadbeef1badbeef", "0308deadbeef")
|
||||
).unwrap()[..]) {
|
||||
} else { panic!(); }
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn tlv_types_out_of_order() {
|
||||
if let Err(DecodeError::InvalidValue) = tlv_reader(&::hex::decode(
|
||||
concat!("0100", "0304deadbeef", "0208deadbeef1badbeef")
|
||||
).unwrap()[..]) {
|
||||
} else { panic!(); }
|
||||
// ...even if its some field we don't understand
|
||||
if let Err(DecodeError::InvalidValue) = tlv_reader(&::hex::decode(
|
||||
concat!("0208deadbeef1badbeef", "0100", "0304deadbeef")
|
||||
).unwrap()[..]) {
|
||||
} else { panic!(); }
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn tlv_req_type_missing_or_extra() {
|
||||
// It's also bad if they included even fields we don't understand
|
||||
if let Err(DecodeError::UnknownRequiredFeature) = tlv_reader(&::hex::decode(
|
||||
concat!("0100", "0208deadbeef1badbeef", "0304deadbeef", "0600")
|
||||
).unwrap()[..]) {
|
||||
} else { panic!(); }
|
||||
// ... or if they're missing fields we need
|
||||
if let Err(DecodeError::InvalidValue) = tlv_reader(&::hex::decode(
|
||||
concat!("0100", "0208deadbeef1badbeef")
|
||||
).unwrap()[..]) {
|
||||
} else { panic!(); }
|
||||
// ... even if that field is even
|
||||
if let Err(DecodeError::InvalidValue) = tlv_reader(&::hex::decode(
|
||||
concat!("0304deadbeef", "0500")
|
||||
).unwrap()[..]) {
|
||||
} else { panic!(); }
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn tlv_simple_good_cases() {
|
||||
assert_eq!(tlv_reader(&::hex::decode(
|
||||
concat!("0208deadbeef1badbeef", "03041bad1dea")
|
||||
).unwrap()[..]).unwrap(),
|
||||
(0xdeadbeef1badbeef, 0x1bad1dea, None));
|
||||
assert_eq!(tlv_reader(&::hex::decode(
|
||||
concat!("0208deadbeef1badbeef", "03041bad1dea", "040401020304")
|
||||
).unwrap()[..]).unwrap(),
|
||||
(0xdeadbeef1badbeef, 0x1bad1dea, Some(0x01020304)));
|
||||
}
|
||||
|
||||
impl<R: Read> Readable<R> for (PublicKey, u64, u64) {
|
||||
#[inline]
|
||||
fn read(reader: &mut R) -> Result<(PublicKey, u64, u64), DecodeError> {
|
||||
Ok((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?))
|
||||
}
|
||||
}
|
||||
|
||||
// BOLT TLV test cases
|
||||
fn tlv_reader_n1(s: &[u8]) -> Result<(Option<HighZeroBytesDroppedVarInt<u64>>, Option<u64>, Option<(PublicKey, u64, u64)>, Option<u16>), DecodeError> {
|
||||
let mut s = Cursor::new(s);
|
||||
let mut tlv1: Option<HighZeroBytesDroppedVarInt<u64>> = None;
|
||||
let mut tlv2: Option<u64> = None;
|
||||
let mut tlv3: Option<(PublicKey, u64, u64)> = None;
|
||||
let mut tlv4: Option<u16> = None;
|
||||
decode_tlv!(&mut s, {}, {(1, tlv1), (2, tlv2), (3, tlv3), (254, tlv4)});
|
||||
Ok((tlv1, tlv2, tlv3, tlv4))
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bolt_tlv_bogus_stream() {
|
||||
macro_rules! do_test {
|
||||
($stream: expr, $reason: ident) => {
|
||||
if let Err(DecodeError::$reason) = tlv_reader_n1(&::hex::decode($stream).unwrap()[..]) {
|
||||
} else { panic!(); }
|
||||
}
|
||||
}
|
||||
|
||||
// TLVs from the BOLT test cases which should not decode as either n1 or n2
|
||||
do_test!(concat!("fd01"), ShortRead);
|
||||
do_test!(concat!("fd0001", "00"), InvalidValue);
|
||||
do_test!(concat!("fd0101"), ShortRead);
|
||||
do_test!(concat!("0f", "fd"), ShortRead);
|
||||
do_test!(concat!("0f", "fd26"), ShortRead);
|
||||
do_test!(concat!("0f", "fd2602"), ShortRead);
|
||||
do_test!(concat!("0f", "fd0001", "00"), InvalidValue);
|
||||
do_test!(concat!("0f", "fd0201", "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"), ShortRead);
|
||||
|
||||
do_test!(concat!("12", "00"), UnknownRequiredFeature);
|
||||
do_test!(concat!("fd0102", "00"), UnknownRequiredFeature);
|
||||
do_test!(concat!("fe01000002", "00"), UnknownRequiredFeature);
|
||||
do_test!(concat!("ff0100000000000002", "00"), UnknownRequiredFeature);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bolt_tlv_bogus_n1_stream() {
|
||||
macro_rules! do_test {
|
||||
($stream: expr, $reason: ident) => {
|
||||
if let Err(DecodeError::$reason) = tlv_reader_n1(&::hex::decode($stream).unwrap()[..]) {
|
||||
} else { panic!(); }
|
||||
}
|
||||
}
|
||||
|
||||
// TLVs from the BOLT test cases which should not decode as n1
|
||||
do_test!(concat!("01", "09", "ffffffffffffffffff"), InvalidValue);
|
||||
do_test!(concat!("01", "01", "00"), InvalidValue);
|
||||
do_test!(concat!("01", "02", "0001"), InvalidValue);
|
||||
do_test!(concat!("01", "03", "000100"), InvalidValue);
|
||||
do_test!(concat!("01", "04", "00010000"), InvalidValue);
|
||||
do_test!(concat!("01", "05", "0001000000"), InvalidValue);
|
||||
do_test!(concat!("01", "06", "000100000000"), InvalidValue);
|
||||
do_test!(concat!("01", "07", "00010000000000"), InvalidValue);
|
||||
do_test!(concat!("01", "08", "0001000000000000"), InvalidValue);
|
||||
do_test!(concat!("02", "07", "01010101010101"), ShortRead);
|
||||
do_test!(concat!("02", "09", "010101010101010101"), InvalidValue);
|
||||
do_test!(concat!("03", "21", "023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb"), ShortRead);
|
||||
do_test!(concat!("03", "29", "023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb0000000000000001"), ShortRead);
|
||||
do_test!(concat!("03", "30", "023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb000000000000000100000000000001"), ShortRead);
|
||||
do_test!(concat!("03", "31", "043da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb00000000000000010000000000000002"), InvalidValue);
|
||||
do_test!(concat!("03", "32", "023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb0000000000000001000000000000000001"), InvalidValue);
|
||||
do_test!(concat!("fd00fe", "00"), ShortRead);
|
||||
do_test!(concat!("fd00fe", "01", "01"), ShortRead);
|
||||
do_test!(concat!("fd00fe", "03", "010101"), InvalidValue);
|
||||
do_test!(concat!("00", "00"), UnknownRequiredFeature);
|
||||
|
||||
do_test!(concat!("02", "08", "0000000000000226", "01", "01", "2a"), InvalidValue);
|
||||
do_test!(concat!("02", "08", "0000000000000231", "02", "08", "0000000000000451"), InvalidValue);
|
||||
do_test!(concat!("1f", "00", "0f", "01", "2a"), InvalidValue);
|
||||
do_test!(concat!("1f", "00", "1f", "01", "2a"), InvalidValue);
|
||||
|
||||
// The last BOLT test modified to not require creating a new decoder for one trivial test.
|
||||
do_test!(concat!("ffffffffffffffffff", "00", "01", "00"), InvalidValue);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bolt_tlv_valid_n1_stream() {
|
||||
macro_rules! do_test {
|
||||
($stream: expr, $tlv1: expr, $tlv2: expr, $tlv3: expr, $tlv4: expr) => {
|
||||
if let Ok((tlv1, tlv2, tlv3, tlv4)) = tlv_reader_n1(&::hex::decode($stream).unwrap()[..]) {
|
||||
assert_eq!(tlv1.map(|v| v.0), $tlv1);
|
||||
assert_eq!(tlv2, $tlv2);
|
||||
assert_eq!(tlv3, $tlv3);
|
||||
assert_eq!(tlv4, $tlv4);
|
||||
} else { panic!(); }
|
||||
}
|
||||
}
|
||||
|
||||
do_test!(concat!(""), None, None, None, None);
|
||||
do_test!(concat!("21", "00"), None, None, None, None);
|
||||
do_test!(concat!("fd0201", "00"), None, None, None, None);
|
||||
do_test!(concat!("fd00fd", "00"), None, None, None, None);
|
||||
do_test!(concat!("fd00ff", "00"), None, None, None, None);
|
||||
do_test!(concat!("fe02000001", "00"), None, None, None, None);
|
||||
do_test!(concat!("ff0200000000000001", "00"), None, None, None, None);
|
||||
|
||||
do_test!(concat!("01", "00"), Some(0), None, None, None);
|
||||
do_test!(concat!("01", "01", "01"), Some(1), None, None, None);
|
||||
do_test!(concat!("01", "02", "0100"), Some(256), None, None, None);
|
||||
do_test!(concat!("01", "03", "010000"), Some(65536), None, None, None);
|
||||
do_test!(concat!("01", "04", "01000000"), Some(16777216), None, None, None);
|
||||
do_test!(concat!("01", "05", "0100000000"), Some(4294967296), None, None, None);
|
||||
do_test!(concat!("01", "06", "010000000000"), Some(1099511627776), None, None, None);
|
||||
do_test!(concat!("01", "07", "01000000000000"), Some(281474976710656), None, None, None);
|
||||
do_test!(concat!("01", "08", "0100000000000000"), Some(72057594037927936), None, None, None);
|
||||
do_test!(concat!("02", "08", "0000000000000226"), None, Some((0 << 30) | (0 << 5) | (550 << 0)), None, None);
|
||||
do_test!(concat!("03", "31", "023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb00000000000000010000000000000002"),
|
||||
None, None, Some((
|
||||
PublicKey::from_slice(&::hex::decode("023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb").unwrap()[..]).unwrap(), 1, 2)),
|
||||
None);
|
||||
do_test!(concat!("fd00fe", "02", "0226"), None, None, None, Some(550));
|
||||
}
|
||||
|
||||
fn do_simple_test_tlv_write() -> Result<(), ::std::io::Error> {
|
||||
let mut stream = VecWriter(Vec::new());
|
||||
|
||||
stream.0.clear();
|
||||
encode_varint_length_prefixed_tlv!(&mut stream, { (1, 1u8) });
|
||||
assert_eq!(stream.0, ::hex::decode("03010101").unwrap());
|
||||
|
||||
stream.0.clear();
|
||||
encode_varint_length_prefixed_tlv!(&mut stream, { (4, 0xabcdu16) });
|
||||
assert_eq!(stream.0, ::hex::decode("040402abcd").unwrap());
|
||||
|
||||
stream.0.clear();
|
||||
encode_varint_length_prefixed_tlv!(&mut stream, { (0xff, 0xabcdu16) });
|
||||
assert_eq!(stream.0, ::hex::decode("06fd00ff02abcd").unwrap());
|
||||
|
||||
stream.0.clear();
|
||||
encode_varint_length_prefixed_tlv!(&mut stream, { (0, 1u64), (0xff, HighZeroBytesDroppedVarInt(0u64)) });
|
||||
assert_eq!(stream.0, ::hex::decode("0e00080000000000000001fd00ff00").unwrap());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn simple_test_tlv_write() {
|
||||
do_simple_test_tlv_write().unwrap();
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue