2022-12-24 23:49:50 +00:00
|
|
|
// Copyright (c) 2020-2022 The Bitcoin Core developers
|
2020-04-04 07:30:51 +08:00
|
|
|
// Distributed under the MIT software license, see the accompanying
|
|
|
|
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
|
|
|
|
|
|
|
#include <test/util/net.h>
|
|
|
|
|
|
|
|
#include <net.h>
|
2022-07-11 18:14:36 +02:00
|
|
|
#include <net_processing.h>
|
2023-10-27 18:53:13 -06:00
|
|
|
#include <netaddress.h>
|
2022-07-11 18:14:36 +02:00
|
|
|
#include <netmessagemaker.h>
|
2023-10-27 18:53:13 -06:00
|
|
|
#include <node/connection_types.h>
|
|
|
|
#include <node/eviction.h>
|
|
|
|
#include <protocol.h>
|
|
|
|
#include <random.h>
|
|
|
|
#include <serialize.h>
|
2021-07-01 12:48:51 +02:00
|
|
|
#include <span.h>
|
2022-12-06 13:42:03 +01:00
|
|
|
#include <sync.h>
|
2021-07-01 12:48:51 +02:00
|
|
|
|
2022-12-06 13:42:03 +01:00
|
|
|
#include <chrono>
|
|
|
|
#include <optional>
|
2021-07-01 12:48:51 +02:00
|
|
|
#include <vector>
|
2020-04-04 07:30:51 +08:00
|
|
|
|
2022-07-11 18:14:36 +02:00
|
|
|
void ConnmanTestMsg::Handshake(CNode& node,
|
|
|
|
bool successfully_connected,
|
|
|
|
ServiceFlags remote_services,
|
2020-07-20 20:28:37 +01:00
|
|
|
ServiceFlags local_services,
|
2022-07-11 18:14:36 +02:00
|
|
|
int32_t version,
|
|
|
|
bool relay_txs)
|
|
|
|
{
|
|
|
|
auto& peerman{static_cast<PeerManager&>(*m_msgproc)};
|
|
|
|
auto& connman{*this};
|
|
|
|
|
2020-07-20 20:28:37 +01:00
|
|
|
peerman.InitializeNode(node, local_services);
|
2024-07-04 20:12:13 +02:00
|
|
|
peerman.SendMessages(&node);
|
|
|
|
FlushSendBuffer(node); // Drop the version message added by SendMessages.
|
2022-07-11 18:07:53 +02:00
|
|
|
|
2022-07-11 18:14:36 +02:00
|
|
|
CSerializedNetMsg msg_version{
|
2023-11-16 15:43:15 +01:00
|
|
|
NetMsg::Make(NetMsgType::VERSION,
|
2022-07-11 18:14:36 +02:00
|
|
|
version, //
|
|
|
|
Using<CustomUintFormatter<8>>(remote_services), //
|
|
|
|
int64_t{}, // dummy time
|
|
|
|
int64_t{}, // ignored service bits
|
2023-09-14 10:20:49 +10:00
|
|
|
CNetAddr::V1(CService{}), // dummy
|
2022-07-11 18:14:36 +02:00
|
|
|
int64_t{}, // ignored service bits
|
2023-09-14 10:20:49 +10:00
|
|
|
CNetAddr::V1(CService{}), // ignored
|
2022-07-11 18:14:36 +02:00
|
|
|
uint64_t{1}, // dummy nonce
|
|
|
|
std::string{}, // dummy subver
|
|
|
|
int32_t{}, // dummy starting_height
|
|
|
|
relay_txs),
|
|
|
|
};
|
|
|
|
|
2023-07-21 16:31:59 -04:00
|
|
|
(void)connman.ReceiveMsgFrom(node, std::move(msg_version));
|
2022-07-11 18:14:36 +02:00
|
|
|
node.fPauseSend = false;
|
|
|
|
connman.ProcessMessagesOnce(node);
|
2022-09-13 12:22:18 +10:00
|
|
|
peerman.SendMessages(&node);
|
net: move message conversion to wire bytes from PushMessage to SocketSendData
This furthers transport abstraction by removing the assumption that a message
can always immediately be converted to wire bytes. This assumption does not hold
for the v2 transport proposed by BIP324, as no messages can be sent before the
handshake completes.
This is done by only keeping (complete) CSerializedNetMsg objects in vSendMsg,
rather than the resulting bytes (for header and payload) that need to be sent.
In SocketSendData, these objects are handed to the transport as permitted by it,
and sending out the bytes the transport tells us to send. This also removes the
nSendOffset member variable in CNode, as keeping track of how much has been sent
is now a responsability of the transport.
This is not a pure refactor, and has the following effects even for the current
v1 transport:
* Checksum calculation now happens in SocketSendData rather than PushMessage.
For non-optimistic-send messages, that means this computation now happens in
the network thread rather than the message handler thread (generally a good
thing, as the message handler thread is more of a computational bottleneck).
* Checksum calculation now happens while holding the cs_vSend lock. This is
technically unnecessary for the v1 transport, as messages are encoded
independent from one another, but is untenable for the v2 transport anyway.
* Statistics updates about per-message sent bytes now happen when those bytes
are actually handed to the OS, rather than at PushMessage time.
2023-08-16 13:31:50 -04:00
|
|
|
FlushSendBuffer(node); // Drop the verack message added by SendMessages.
|
2022-07-11 18:14:36 +02:00
|
|
|
if (node.fDisconnect) return;
|
|
|
|
assert(node.nVersion == version);
|
|
|
|
assert(node.GetCommonVersion() == std::min(version, PROTOCOL_VERSION));
|
|
|
|
CNodeStateStats statestats;
|
|
|
|
assert(peerman.GetNodeStateStats(node.GetId(), statestats));
|
|
|
|
assert(statestats.m_relay_txs == (relay_txs && !node.IsBlockOnlyConn()));
|
2020-07-20 18:46:13 +01:00
|
|
|
assert(statestats.their_services == remote_services);
|
2022-07-11 18:14:36 +02:00
|
|
|
if (successfully_connected) {
|
2023-11-16 15:43:15 +01:00
|
|
|
CSerializedNetMsg msg_verack{NetMsg::Make(NetMsgType::VERACK)};
|
2023-07-21 16:31:59 -04:00
|
|
|
(void)connman.ReceiveMsgFrom(node, std::move(msg_verack));
|
2022-07-11 18:14:36 +02:00
|
|
|
node.fPauseSend = false;
|
|
|
|
connman.ProcessMessagesOnce(node);
|
2022-09-13 12:22:18 +10:00
|
|
|
peerman.SendMessages(&node);
|
2022-07-11 18:14:36 +02:00
|
|
|
assert(node.fSuccessfullyConnected == true);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
net: count traffic bytes and number of messages globally
Before this change only per-peer stats were gathered. They
are lost when the peer disconnects.
So, collect the traffic stats globally in `CConnman`, broken down by
* direction (sent or received, (2))
* network of the peer (IPv4, IPv6, Tor, I2P, CJDNS (5))
* connection type (inbound, full outbound, feeler, etc, (6))
* message type (verack, ping, etc, (36))
2023-11-22 15:03:49 +01:00
|
|
|
void ConnmanTestMsg::NodeReceiveMsgBytes(CNode& node, Span<const uint8_t> msg_bytes, bool& complete)
|
2020-04-04 07:30:51 +08:00
|
|
|
{
|
net: count traffic bytes and number of messages globally
Before this change only per-peer stats were gathered. They
are lost when the peer disconnects.
So, collect the traffic stats globally in `CConnman`, broken down by
* direction (sent or received, (2))
* network of the peer (IPv4, IPv6, Tor, I2P, CJDNS (5))
* connection type (inbound, full outbound, feeler, etc, (6))
* message type (verack, ping, etc, (36))
2023-11-22 15:03:49 +01:00
|
|
|
assert(node.ReceiveMsgBytes(msg_bytes, complete, m_net_stats));
|
2020-04-04 07:30:51 +08:00
|
|
|
if (complete) {
|
2023-03-24 15:45:50 +01:00
|
|
|
node.MarkReceivedMsgsForProcessing();
|
2020-04-04 07:30:51 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
net: move message conversion to wire bytes from PushMessage to SocketSendData
This furthers transport abstraction by removing the assumption that a message
can always immediately be converted to wire bytes. This assumption does not hold
for the v2 transport proposed by BIP324, as no messages can be sent before the
handshake completes.
This is done by only keeping (complete) CSerializedNetMsg objects in vSendMsg,
rather than the resulting bytes (for header and payload) that need to be sent.
In SocketSendData, these objects are handed to the transport as permitted by it,
and sending out the bytes the transport tells us to send. This also removes the
nSendOffset member variable in CNode, as keeping track of how much has been sent
is now a responsability of the transport.
This is not a pure refactor, and has the following effects even for the current
v1 transport:
* Checksum calculation now happens in SocketSendData rather than PushMessage.
For non-optimistic-send messages, that means this computation now happens in
the network thread rather than the message handler thread (generally a good
thing, as the message handler thread is more of a computational bottleneck).
* Checksum calculation now happens while holding the cs_vSend lock. This is
technically unnecessary for the v1 transport, as messages are encoded
independent from one another, but is untenable for the v2 transport anyway.
* Statistics updates about per-message sent bytes now happen when those bytes
are actually handed to the OS, rather than at PushMessage time.
2023-08-16 13:31:50 -04:00
|
|
|
void ConnmanTestMsg::FlushSendBuffer(CNode& node) const
|
|
|
|
{
|
|
|
|
LOCK(node.cs_vSend);
|
|
|
|
node.vSendMsg.clear();
|
|
|
|
node.m_send_memusage = 0;
|
|
|
|
while (true) {
|
net: add have_next_message argument to Transport::GetBytesToSend()
Before this commit, there are only two possibly outcomes for the "more" prediction
in Transport::GetBytesToSend():
* true: the transport itself has more to send, so the answer is certainly yes.
* false: the transport has nothing further to send, but if vSendMsg has more message(s)
left, that still will result in more wire bytes after the next
SetMessageToSend().
For the BIP324 v2 transport, there will arguably be a third state:
* definitely not: the transport has nothing further to send, but even if vSendMsg has
more messages left, they can't be sent (right now). This happens
before the handshake is complete.
To implement this, we move the entire decision logic to the Transport, by adding a
boolean to GetBytesToSend(), called have_next_message, which informs the transport
whether more messages are available. The return values are still true and false, but
they mean "definitely yes" and "definitely no", rather than "yes" and "maybe".
2023-08-16 13:21:35 -04:00
|
|
|
const auto& [to_send, _more, _msg_type] = node.m_transport->GetBytesToSend(false);
|
net: move message conversion to wire bytes from PushMessage to SocketSendData
This furthers transport abstraction by removing the assumption that a message
can always immediately be converted to wire bytes. This assumption does not hold
for the v2 transport proposed by BIP324, as no messages can be sent before the
handshake completes.
This is done by only keeping (complete) CSerializedNetMsg objects in vSendMsg,
rather than the resulting bytes (for header and payload) that need to be sent.
In SocketSendData, these objects are handed to the transport as permitted by it,
and sending out the bytes the transport tells us to send. This also removes the
nSendOffset member variable in CNode, as keeping track of how much has been sent
is now a responsability of the transport.
This is not a pure refactor, and has the following effects even for the current
v1 transport:
* Checksum calculation now happens in SocketSendData rather than PushMessage.
For non-optimistic-send messages, that means this computation now happens in
the network thread rather than the message handler thread (generally a good
thing, as the message handler thread is more of a computational bottleneck).
* Checksum calculation now happens while holding the cs_vSend lock. This is
technically unnecessary for the v1 transport, as messages are encoded
independent from one another, but is untenable for the v2 transport anyway.
* Statistics updates about per-message sent bytes now happen when those bytes
are actually handed to the OS, rather than at PushMessage time.
2023-08-16 13:31:50 -04:00
|
|
|
if (to_send.empty()) break;
|
|
|
|
node.m_transport->MarkBytesSent(to_send.size());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
net: count traffic bytes and number of messages globally
Before this change only per-peer stats were gathered. They
are lost when the peer disconnects.
So, collect the traffic stats globally in `CConnman`, broken down by
* direction (sent or received, (2))
* network of the peer (IPv4, IPv6, Tor, I2P, CJDNS (5))
* connection type (inbound, full outbound, feeler, etc, (6))
* message type (verack, ping, etc, (36))
2023-11-22 15:03:49 +01:00
|
|
|
bool ConnmanTestMsg::ReceiveMsgFrom(CNode& node, CSerializedNetMsg&& ser_msg)
|
2020-04-04 07:30:51 +08:00
|
|
|
{
|
2023-07-21 16:31:59 -04:00
|
|
|
bool queued = node.m_transport->SetMessageToSend(ser_msg);
|
|
|
|
assert(queued);
|
|
|
|
bool complete{false};
|
|
|
|
while (true) {
|
net: add have_next_message argument to Transport::GetBytesToSend()
Before this commit, there are only two possibly outcomes for the "more" prediction
in Transport::GetBytesToSend():
* true: the transport itself has more to send, so the answer is certainly yes.
* false: the transport has nothing further to send, but if vSendMsg has more message(s)
left, that still will result in more wire bytes after the next
SetMessageToSend().
For the BIP324 v2 transport, there will arguably be a third state:
* definitely not: the transport has nothing further to send, but even if vSendMsg has
more messages left, they can't be sent (right now). This happens
before the handshake is complete.
To implement this, we move the entire decision logic to the Transport, by adding a
boolean to GetBytesToSend(), called have_next_message, which informs the transport
whether more messages are available. The return values are still true and false, but
they mean "definitely yes" and "definitely no", rather than "yes" and "maybe".
2023-08-16 13:21:35 -04:00
|
|
|
const auto& [to_send, _more, _msg_type] = node.m_transport->GetBytesToSend(false);
|
2023-07-21 16:31:59 -04:00
|
|
|
if (to_send.empty()) break;
|
|
|
|
NodeReceiveMsgBytes(node, to_send, complete);
|
|
|
|
node.m_transport->MarkBytesSent(to_send.size());
|
|
|
|
}
|
2020-04-04 07:30:51 +08:00
|
|
|
return complete;
|
|
|
|
}
|
2021-07-01 12:48:51 +02:00
|
|
|
|
2023-10-27 18:53:13 -06:00
|
|
|
CNode* ConnmanTestMsg::ConnectNodePublic(PeerManager& peerman, const char* pszDest, ConnectionType conn_type)
|
|
|
|
{
|
|
|
|
CNode* node = ConnectNode(CAddress{}, pszDest, /*fCountFailure=*/false, conn_type, /*use_v2transport=*/true);
|
|
|
|
if (!node) return nullptr;
|
|
|
|
node->SetCommonVersion(PROTOCOL_VERSION);
|
|
|
|
peerman.InitializeNode(*node, ServiceFlags(NODE_NETWORK | NODE_WITNESS));
|
|
|
|
node->fSuccessfullyConnected = true;
|
|
|
|
AddTestNode(*node);
|
|
|
|
return node;
|
|
|
|
}
|
|
|
|
|
2021-07-01 12:48:51 +02:00
|
|
|
std::vector<NodeEvictionCandidate> GetRandomNodeEvictionCandidates(int n_candidates, FastRandomContext& random_context)
|
|
|
|
{
|
|
|
|
std::vector<NodeEvictionCandidate> candidates;
|
2023-03-26 20:17:55 +01:00
|
|
|
candidates.reserve(n_candidates);
|
2021-07-01 12:48:51 +02:00
|
|
|
for (int id = 0; id < n_candidates; ++id) {
|
|
|
|
candidates.push_back({
|
2024-07-05 21:56:30 +02:00
|
|
|
.id=id,
|
|
|
|
.m_connected=std::chrono::seconds{random_context.randrange(100)},
|
|
|
|
.m_min_ping_time=std::chrono::microseconds{random_context.randrange(100)},
|
|
|
|
.m_last_block_time=std::chrono::seconds{random_context.randrange(100)},
|
|
|
|
.m_last_tx_time=std::chrono::seconds{random_context.randrange(100)},
|
|
|
|
.fRelevantServices=random_context.randbool(),
|
|
|
|
.m_relay_txs=random_context.randbool(),
|
|
|
|
.fBloomFilter=random_context.randbool(),
|
|
|
|
.nKeyedNetGroup=random_context.randrange(100u),
|
|
|
|
.prefer_evict=random_context.randbool(),
|
|
|
|
.m_is_local=random_context.randbool(),
|
|
|
|
.m_network=ALL_NETWORKS[random_context.randrange(ALL_NETWORKS.size())],
|
|
|
|
.m_noban=false,
|
|
|
|
.m_conn_type=ConnectionType::INBOUND,
|
2021-07-01 12:48:51 +02:00
|
|
|
});
|
|
|
|
}
|
|
|
|
return candidates;
|
|
|
|
}
|
2022-12-06 13:38:21 +01:00
|
|
|
|
2024-09-06 11:16:50 +02:00
|
|
|
// Have different ZeroSock (or others that inherit from it) objects have different
|
|
|
|
// m_socket because EqualSharedPtrSock compares m_socket and we want to avoid two
|
|
|
|
// different objects comparing as equal.
|
|
|
|
static std::atomic<SOCKET> g_mocked_sock_fd{0};
|
2022-12-06 13:38:21 +01:00
|
|
|
|
2024-09-06 11:16:50 +02:00
|
|
|
ZeroSock::ZeroSock() : Sock{g_mocked_sock_fd++} {}
|
2022-12-06 13:38:21 +01:00
|
|
|
|
2024-09-06 11:16:50 +02:00
|
|
|
// Sock::~Sock() would try to close(2) m_socket if it is not INVALID_SOCKET, avoid that.
|
|
|
|
ZeroSock::~ZeroSock() { m_socket = INVALID_SOCKET; }
|
2022-12-06 13:38:21 +01:00
|
|
|
|
2024-09-06 11:16:50 +02:00
|
|
|
ssize_t ZeroSock::Send(const void*, size_t len, int) const { return len; }
|
2022-12-06 13:38:21 +01:00
|
|
|
|
2024-09-06 11:16:50 +02:00
|
|
|
ssize_t ZeroSock::Recv(void* buf, size_t len, int flags) const
|
2022-12-06 13:38:21 +01:00
|
|
|
{
|
2024-09-06 11:16:50 +02:00
|
|
|
memset(buf, 0x0, len);
|
|
|
|
return len;
|
2022-12-06 13:38:21 +01:00
|
|
|
}
|
|
|
|
|
2024-09-06 11:16:50 +02:00
|
|
|
int ZeroSock::Connect(const sockaddr*, socklen_t) const { return 0; }
|
2022-12-06 13:38:21 +01:00
|
|
|
|
2024-09-06 11:16:50 +02:00
|
|
|
int ZeroSock::Bind(const sockaddr*, socklen_t) const { return 0; }
|
2022-12-06 13:38:21 +01:00
|
|
|
|
2024-09-06 11:16:50 +02:00
|
|
|
int ZeroSock::Listen(int) const { return 0; }
|
2022-12-06 13:38:21 +01:00
|
|
|
|
2024-09-06 11:16:50 +02:00
|
|
|
std::unique_ptr<Sock> ZeroSock::Accept(sockaddr* addr, socklen_t* addr_len) const
|
2022-12-06 13:38:21 +01:00
|
|
|
{
|
|
|
|
if (addr != nullptr) {
|
|
|
|
// Pretend all connections come from 5.5.5.5:6789
|
|
|
|
memset(addr, 0x00, *addr_len);
|
|
|
|
const socklen_t write_len = static_cast<socklen_t>(sizeof(sockaddr_in));
|
|
|
|
if (*addr_len >= write_len) {
|
|
|
|
*addr_len = write_len;
|
|
|
|
sockaddr_in* addr_in = reinterpret_cast<sockaddr_in*>(addr);
|
|
|
|
addr_in->sin_family = AF_INET;
|
|
|
|
memset(&addr_in->sin_addr, 0x05, sizeof(addr_in->sin_addr));
|
|
|
|
addr_in->sin_port = htons(6789);
|
|
|
|
}
|
|
|
|
}
|
2024-09-06 11:16:50 +02:00
|
|
|
return std::make_unique<ZeroSock>();
|
|
|
|
}
|
2022-12-06 13:38:21 +01:00
|
|
|
|
2024-09-06 11:16:50 +02:00
|
|
|
int ZeroSock::GetSockOpt(int level, int opt_name, void* opt_val, socklen_t* opt_len) const
|
2022-12-06 13:38:21 +01:00
|
|
|
{
|
|
|
|
std::memset(opt_val, 0x0, *opt_len);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2024-09-06 11:16:50 +02:00
|
|
|
int ZeroSock::SetSockOpt(int, int, const void*, socklen_t) const { return 0; }
|
2022-12-06 13:38:21 +01:00
|
|
|
|
2024-09-06 11:16:50 +02:00
|
|
|
int ZeroSock::GetSockName(sockaddr* name, socklen_t* name_len) const
|
2022-12-06 13:38:21 +01:00
|
|
|
{
|
|
|
|
std::memset(name, 0x0, *name_len);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2024-09-06 11:16:50 +02:00
|
|
|
bool ZeroSock::SetNonBlocking() const { return true; }
|
2022-12-06 13:38:21 +01:00
|
|
|
|
2024-09-06 11:16:50 +02:00
|
|
|
bool ZeroSock::IsSelectable() const { return true; }
|
2022-12-06 13:38:21 +01:00
|
|
|
|
2024-09-06 11:16:50 +02:00
|
|
|
bool ZeroSock::Wait(std::chrono::milliseconds timeout, Event requested, Event* occurred) const
|
2022-12-06 13:38:21 +01:00
|
|
|
{
|
|
|
|
if (occurred != nullptr) {
|
|
|
|
*occurred = requested;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2024-09-06 11:16:50 +02:00
|
|
|
bool ZeroSock::WaitMany(std::chrono::milliseconds timeout, EventsPerSock& events_per_sock) const
|
2022-12-06 13:38:21 +01:00
|
|
|
{
|
|
|
|
for (auto& [sock, events] : events_per_sock) {
|
|
|
|
(void)sock;
|
|
|
|
events.occurred = events.requested;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2024-09-06 11:16:50 +02:00
|
|
|
ZeroSock& ZeroSock::operator=(Sock&& other)
|
2022-12-06 13:38:21 +01:00
|
|
|
{
|
2024-09-06 11:16:50 +02:00
|
|
|
assert(false && "Move of Sock into ZeroSock not allowed.");
|
|
|
|
return *this;
|
|
|
|
}
|
|
|
|
|
|
|
|
StaticContentsSock::StaticContentsSock(const std::string& contents)
|
|
|
|
: m_contents{contents}
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
ssize_t StaticContentsSock::Recv(void* buf, size_t len, int flags) const
|
|
|
|
{
|
|
|
|
const size_t consume_bytes{std::min(len, m_contents.size() - m_consumed)};
|
|
|
|
std::memcpy(buf, m_contents.data() + m_consumed, consume_bytes);
|
|
|
|
if ((flags & MSG_PEEK) == 0) {
|
|
|
|
m_consumed += consume_bytes;
|
|
|
|
}
|
|
|
|
return consume_bytes;
|
|
|
|
}
|
|
|
|
|
|
|
|
StaticContentsSock& StaticContentsSock::operator=(Sock&& other)
|
|
|
|
{
|
|
|
|
assert(false && "Move of Sock into StaticContentsSock not allowed.");
|
|
|
|
return *this;
|
2022-12-06 13:38:21 +01:00
|
|
|
}
|
2022-12-06 13:42:03 +01:00
|
|
|
|
|
|
|
ssize_t DynSock::Pipe::GetBytes(void* buf, size_t len, int flags)
|
|
|
|
{
|
|
|
|
WAIT_LOCK(m_mutex, lock);
|
|
|
|
|
|
|
|
if (m_data.empty()) {
|
|
|
|
if (m_eof) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
errno = EAGAIN; // Same as recv(2) on a non-blocking socket.
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
const size_t read_bytes{std::min(len, m_data.size())};
|
|
|
|
|
|
|
|
std::memcpy(buf, m_data.data(), read_bytes);
|
|
|
|
if ((flags & MSG_PEEK) == 0) {
|
|
|
|
m_data.erase(m_data.begin(), m_data.begin() + read_bytes);
|
|
|
|
}
|
|
|
|
|
|
|
|
return read_bytes;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::optional<CNetMessage> DynSock::Pipe::GetNetMsg()
|
|
|
|
{
|
|
|
|
V1Transport transport{NodeId{0}};
|
|
|
|
|
|
|
|
{
|
|
|
|
WAIT_LOCK(m_mutex, lock);
|
|
|
|
|
|
|
|
WaitForDataOrEof(lock);
|
|
|
|
if (m_eof && m_data.empty()) {
|
|
|
|
return std::nullopt;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
Span<const uint8_t> s{m_data};
|
|
|
|
if (!transport.ReceivedBytes(s)) { // Consumed bytes are removed from the front of s.
|
|
|
|
return std::nullopt;
|
|
|
|
}
|
|
|
|
m_data.erase(m_data.begin(), m_data.begin() + m_data.size() - s.size());
|
|
|
|
if (transport.ReceivedMessageComplete()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (m_data.empty()) {
|
|
|
|
WaitForDataOrEof(lock);
|
|
|
|
if (m_eof && m_data.empty()) {
|
|
|
|
return std::nullopt;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool reject{false};
|
|
|
|
CNetMessage msg{transport.GetReceivedMessage(/*time=*/{}, reject)};
|
|
|
|
if (reject) {
|
|
|
|
return std::nullopt;
|
|
|
|
}
|
|
|
|
return std::make_optional<CNetMessage>(std::move(msg));
|
|
|
|
}
|
|
|
|
|
|
|
|
void DynSock::Pipe::PushBytes(const void* buf, size_t len)
|
|
|
|
{
|
|
|
|
LOCK(m_mutex);
|
|
|
|
const uint8_t* b = static_cast<const uint8_t*>(buf);
|
|
|
|
m_data.insert(m_data.end(), b, b + len);
|
|
|
|
m_cond.notify_all();
|
|
|
|
}
|
|
|
|
|
|
|
|
void DynSock::Pipe::Eof()
|
|
|
|
{
|
|
|
|
LOCK(m_mutex);
|
|
|
|
m_eof = true;
|
|
|
|
m_cond.notify_all();
|
|
|
|
}
|
|
|
|
|
|
|
|
void DynSock::Pipe::WaitForDataOrEof(UniqueLock<Mutex>& lock)
|
|
|
|
{
|
|
|
|
Assert(lock.mutex() == &m_mutex);
|
|
|
|
|
|
|
|
m_cond.wait(lock, [&]() EXCLUSIVE_LOCKS_REQUIRED(m_mutex) {
|
|
|
|
AssertLockHeld(m_mutex);
|
|
|
|
return !m_data.empty() || m_eof;
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
DynSock::DynSock(std::shared_ptr<Pipes> pipes, std::shared_ptr<Queue> accept_sockets)
|
|
|
|
: m_pipes{pipes}, m_accept_sockets{accept_sockets}
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
DynSock::~DynSock()
|
|
|
|
{
|
|
|
|
m_pipes->send.Eof();
|
|
|
|
}
|
|
|
|
|
|
|
|
ssize_t DynSock::Recv(void* buf, size_t len, int flags) const
|
|
|
|
{
|
|
|
|
return m_pipes->recv.GetBytes(buf, len, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
ssize_t DynSock::Send(const void* buf, size_t len, int) const
|
|
|
|
{
|
|
|
|
m_pipes->send.PushBytes(buf, len);
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::unique_ptr<Sock> DynSock::Accept(sockaddr* addr, socklen_t* addr_len) const
|
|
|
|
{
|
|
|
|
ZeroSock::Accept(addr, addr_len);
|
|
|
|
return m_accept_sockets->Pop().value_or(nullptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool DynSock::Wait(std::chrono::milliseconds timeout,
|
|
|
|
Event requested,
|
|
|
|
Event* occurred) const
|
|
|
|
{
|
|
|
|
EventsPerSock ev;
|
|
|
|
ev.emplace(this, Events{requested});
|
|
|
|
const bool ret{WaitMany(timeout, ev)};
|
|
|
|
if (occurred != nullptr) {
|
|
|
|
*occurred = ev.begin()->second.occurred;
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool DynSock::WaitMany(std::chrono::milliseconds timeout, EventsPerSock& events_per_sock) const
|
|
|
|
{
|
|
|
|
const auto deadline = std::chrono::steady_clock::now() + timeout;
|
|
|
|
bool at_least_one_event_occurred{false};
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
// Check all sockets for readiness without waiting.
|
|
|
|
for (auto& [sock, events] : events_per_sock) {
|
|
|
|
if ((events.requested & Sock::SEND) != 0) {
|
|
|
|
// Always ready for Send().
|
|
|
|
events.occurred |= Sock::SEND;
|
|
|
|
at_least_one_event_occurred = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((events.requested & Sock::RECV) != 0) {
|
|
|
|
auto dyn_sock = reinterpret_cast<const DynSock*>(sock.get());
|
|
|
|
uint8_t b;
|
|
|
|
if (dyn_sock->m_pipes->recv.GetBytes(&b, 1, MSG_PEEK) == 1 || !dyn_sock->m_accept_sockets->Empty()) {
|
|
|
|
events.occurred |= Sock::RECV;
|
|
|
|
at_least_one_event_occurred = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (at_least_one_event_occurred || std::chrono::steady_clock::now() > deadline) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::this_thread::sleep_for(10ms);
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
DynSock& DynSock::operator=(Sock&&)
|
|
|
|
{
|
|
|
|
assert(false && "Move of Sock into DynSock not allowed.");
|
|
|
|
return *this;
|
|
|
|
}
|