mirror of
https://github.com/bisq-network/bisq.git
synced 2024-11-19 09:52:23 +01:00
Improve logging of received data
The numbers did not match up from delivered response size and items as we did not count in the overhead of the ProtectedStorageEntry (pub key+sig) and did estimate the size with taking only first item and multiplying it. A measurement resulted in 20 ms costs for the exact calculation (toProtoMessage().getSerializedSize() has some costs). I guess that is acceptable to get correct metrics.
This commit is contained in:
parent
0aeaa5386c
commit
9dfcc01acd
@ -32,10 +32,10 @@ import bisq.network.p2p.storage.payload.ProtectedStorageEntry;
|
|||||||
import bisq.common.Timer;
|
import bisq.common.Timer;
|
||||||
import bisq.common.UserThread;
|
import bisq.common.UserThread;
|
||||||
import bisq.common.proto.network.NetworkEnvelope;
|
import bisq.common.proto.network.NetworkEnvelope;
|
||||||
|
import bisq.common.proto.network.NetworkPayload;
|
||||||
import bisq.common.util.Tuple2;
|
import bisq.common.util.Tuple2;
|
||||||
import bisq.common.util.Utilities;
|
import bisq.common.util.Utilities;
|
||||||
|
|
||||||
import com.google.common.collect.Streams;
|
|
||||||
import com.google.common.util.concurrent.FutureCallback;
|
import com.google.common.util.concurrent.FutureCallback;
|
||||||
import com.google.common.util.concurrent.Futures;
|
import com.google.common.util.concurrent.Futures;
|
||||||
import com.google.common.util.concurrent.MoreExecutors;
|
import com.google.common.util.concurrent.MoreExecutors;
|
||||||
@ -43,7 +43,6 @@ import com.google.common.util.concurrent.SettableFuture;
|
|||||||
|
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Objects;
|
|
||||||
import java.util.Random;
|
import java.util.Random;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.concurrent.atomic.AtomicInteger;
|
import java.util.concurrent.atomic.AtomicInteger;
|
||||||
@ -232,20 +231,15 @@ class RequestDataHandler implements MessageListener {
|
|||||||
private void logContents(GetDataResponse getDataResponse) {
|
private void logContents(GetDataResponse getDataResponse) {
|
||||||
Set<ProtectedStorageEntry> dataSet = getDataResponse.getDataSet();
|
Set<ProtectedStorageEntry> dataSet = getDataResponse.getDataSet();
|
||||||
Set<PersistableNetworkPayload> persistableNetworkPayloadSet = getDataResponse.getPersistableNetworkPayloadSet();
|
Set<PersistableNetworkPayload> persistableNetworkPayloadSet = getDataResponse.getPersistableNetworkPayloadSet();
|
||||||
Map<String, Tuple2<AtomicInteger, Integer>> numPayloadsByClassName = new HashMap<>();
|
Map<String, Tuple2<AtomicInteger, AtomicInteger>> numPayloadsByClassName = new HashMap<>();
|
||||||
Streams.concat(dataSet.stream().map(ProtectedStorageEntry::getProtectedStoragePayload).filter(Objects::nonNull),
|
dataSet.forEach(protectedStorageEntry -> {
|
||||||
persistableNetworkPayloadSet.stream())
|
String className = protectedStorageEntry.getProtectedStoragePayload().getClass().getSimpleName();
|
||||||
.forEach(data -> {
|
addDetails(numPayloadsByClassName, protectedStorageEntry, className);
|
||||||
String className = data.getClass().getSimpleName();
|
});
|
||||||
// The data.toProtoMessage().getSerializedSize() call is not cheap, so want to avoid to call it on
|
persistableNetworkPayloadSet.forEach(persistableNetworkPayload -> {
|
||||||
// each object. As most objects of the same data type are expected to have a similar size,
|
String className = persistableNetworkPayload.getClass().getSimpleName();
|
||||||
// we only take the first and multiply later to get the total size.
|
addDetails(numPayloadsByClassName, persistableNetworkPayload, className);
|
||||||
// This is sufficient for the informational purpose of that log.
|
});
|
||||||
numPayloadsByClassName.putIfAbsent(className, new Tuple2<>(new AtomicInteger(0),
|
|
||||||
data.toProtoMessage().getSerializedSize()));
|
|
||||||
numPayloadsByClassName.get(className).first.getAndIncrement();
|
|
||||||
|
|
||||||
});
|
|
||||||
StringBuilder sb = new StringBuilder();
|
StringBuilder sb = new StringBuilder();
|
||||||
String sep = System.lineSeparator();
|
String sep = System.lineSeparator();
|
||||||
sb.append(sep).append("#################################################################").append(sep);
|
sb.append(sep).append("#################################################################").append(sep);
|
||||||
@ -256,13 +250,21 @@ class RequestDataHandler implements MessageListener {
|
|||||||
numPayloadsByClassName.forEach((key, value) -> sb.append(key)
|
numPayloadsByClassName.forEach((key, value) -> sb.append(key)
|
||||||
.append(": ")
|
.append(": ")
|
||||||
.append(value.first.get())
|
.append(value.first.get())
|
||||||
.append(" / ≈")
|
.append(" / ")
|
||||||
.append(Utilities.readableFileSize(value.second * value.first.get()))
|
.append(Utilities.readableFileSize(value.second.get()))
|
||||||
.append(sep));
|
.append(sep));
|
||||||
sb.append("#################################################################");
|
sb.append("#################################################################");
|
||||||
log.info(sb.toString());
|
log.info(sb.toString());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private void addDetails(Map<String, Tuple2<AtomicInteger, AtomicInteger>> numPayloadsByClassName,
|
||||||
|
NetworkPayload networkPayload, String className) {
|
||||||
|
numPayloadsByClassName.putIfAbsent(className, new Tuple2<>(new AtomicInteger(0),
|
||||||
|
new AtomicInteger(0)));
|
||||||
|
numPayloadsByClassName.get(className).first.getAndIncrement();
|
||||||
|
numPayloadsByClassName.get(className).second.getAndAdd(networkPayload.toProtoMessage().getSerializedSize());
|
||||||
|
}
|
||||||
|
|
||||||
@SuppressWarnings("UnusedParameters")
|
@SuppressWarnings("UnusedParameters")
|
||||||
private void handleFault(String errorMessage,
|
private void handleFault(String errorMessage,
|
||||||
NodeAddress nodeAddress,
|
NodeAddress nodeAddress,
|
||||||
|
Loading…
Reference in New Issue
Block a user