mirror of
https://github.com/bisq-network/bisq.git
synced 2024-11-19 18:03:12 +01:00
Merge branch 'master' into Development
This commit is contained in:
commit
c8b216a890
11
README.md
11
README.md
@ -33,12 +33,13 @@ Staying in Touch
|
||||
|
||||
Contact the team and keep up to date using any of the following:
|
||||
|
||||
- The [Bisq Website](https://bisq.network)
|
||||
- The [Bisq website](https://bisq.network)
|
||||
- GitHub [Issues](https://github.com/bisq-network/exchange/issues)
|
||||
- The [Bisq Forum]( https://forum.bisq.network)
|
||||
- The [#bitsquare](https://webchat.freenode.net/?channels=bitsquare) IRC channel on Freenode ([logs](https://botbot.me/freenode/bitsquare))
|
||||
- Our [mailing list](https://groups.google.com/forum/#!forum/bitsquare)
|
||||
- [@Bitsquare_](https://twitter.com/bitsquare_) on Twitter
|
||||
- The [Bisq forum]( https://bisq.community)
|
||||
- The [#bisq](https://webchat.freenode.net/?channels=bisq) IRC channel on Freenode
|
||||
- Our [contributor mailing list](https://lists.bisq.network/listinfo/bisq-contrib)
|
||||
- [@bisq_network](https://twitter.com/bisq_network) on Twitter
|
||||
- The [Bisq newsletter](https://eepurl.com/5uQf9)
|
||||
|
||||
|
||||
License
|
||||
|
@ -88,6 +88,13 @@
|
||||
<artifactId>maven-shade-plugin</artifactId>
|
||||
<version>3.1.0</version>
|
||||
<configuration>
|
||||
<!-- For the binary build that bouncycastle exclusion need to be removed. -->
|
||||
<artifactSet>
|
||||
<excludes>
|
||||
<exclude>org.bouncycastle:*:*:*</exclude>
|
||||
</excludes>
|
||||
</artifactSet>
|
||||
|
||||
<!-- broken with Java 8 (MSHADE-174), using ProGuard instead. -->
|
||||
<minimizeJar>false</minimizeJar>
|
||||
<transformers>
|
||||
|
@ -146,7 +146,7 @@ public class BackupView extends ActivatableView<GridPane, Void> {
|
||||
String backupDirectory = preferences.getBackupDirectory();
|
||||
if (backupDirectory != null && backupDirectory.length() > 0) {
|
||||
try {
|
||||
String dateString = new SimpleDateFormat("YYYY-MM-dd-HHmmss").format(new Date());
|
||||
String dateString = new SimpleDateFormat("yyyy-MM-dd-HHmmss").format(new Date());
|
||||
String destination = Paths.get(backupDirectory, "bisq_backup_" + dateString).toString();
|
||||
FileUtils.copyDirectory(dataDir,
|
||||
new File(destination));
|
||||
|
@ -226,7 +226,9 @@ public class SeedWordsView extends ActivatableView<GridPane, Void> {
|
||||
}
|
||||
|
||||
private void doRestore() {
|
||||
long date = restoreDatePicker.getValue().atStartOfDay().toEpochSecond(ZoneOffset.UTC);
|
||||
final LocalDate value = restoreDatePicker.getValue();
|
||||
//TODO Is ZoneOffset correct?
|
||||
long date = value != null ? value.atStartOfDay().toEpochSecond(ZoneOffset.UTC) : 0;
|
||||
DeterministicSeed seed = new DeterministicSeed(Splitter.on(" ").splitToList(seedWordsTextArea.getText()), null, "", date);
|
||||
walletsManager.restoreSeedWords(
|
||||
seed,
|
||||
|
@ -322,6 +322,12 @@ public class TraderDisputeView extends ActivatableView<VBox, Void> {
|
||||
.onAddAlertMessage(privateNotificationManager::sendPrivateNotificationMessageIfKeyIsValid)
|
||||
.show();
|
||||
}
|
||||
} else if (Utilities.isAltOrCtrlPressed(KeyCode.ENTER, event)) {
|
||||
if (selectedDispute != null) {
|
||||
if (messagesInputBox.isVisible() && inputTextArea.isFocused()) {
|
||||
onTrySendMessage();
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
@ -425,6 +431,21 @@ public class TraderDisputeView extends ActivatableView<VBox, Void> {
|
||||
contractWindow.show(dispute);
|
||||
}
|
||||
|
||||
private void onTrySendMessage() {
|
||||
if (p2PService.isBootstrapped()) {
|
||||
String text = inputTextArea.getText();
|
||||
if (!text.isEmpty()) {
|
||||
if (text.length() < 5_000) {
|
||||
onSendMessage(text, selectedDispute);
|
||||
} else {
|
||||
new Popup<>().information(Res.get("popup.warning.messageTooLong")).show();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
new Popup<>().information(Res.get("popup.warning.notFullyConnected")).show();
|
||||
}
|
||||
}
|
||||
|
||||
private void onSendMessage(String inputText, Dispute dispute) {
|
||||
if (disputeCommunicationMessage != null) {
|
||||
disputeCommunicationMessage.arrivedProperty().removeListener(arrivedPropertyListener);
|
||||
@ -627,18 +648,7 @@ public class TraderDisputeView extends ActivatableView<VBox, Void> {
|
||||
|
||||
sendButton = new Button(Res.get("support.send"));
|
||||
sendButton.setDefaultButton(true);
|
||||
sendButton.setOnAction(e -> {
|
||||
if (p2PService.isBootstrapped()) {
|
||||
String text = inputTextArea.getText();
|
||||
if (!text.isEmpty())
|
||||
if (text.length() < 5_000)
|
||||
onSendMessage(text, selectedDispute);
|
||||
else
|
||||
new Popup<>().information(Res.get("popup.warning.messageTooLong")).show();
|
||||
} else {
|
||||
new Popup<>().information(Res.get("popup.warning.notFullyConnected")).show();
|
||||
}
|
||||
});
|
||||
sendButton.setOnAction(e -> onTrySendMessage());
|
||||
inputTextAreaTextSubscription = EasyBind.subscribe(inputTextArea.textProperty(), t -> sendButton.setDisable(t.isEmpty()));
|
||||
|
||||
Button uploadButton = new Button(Res.get("support.addAttachments"));
|
||||
|
@ -340,7 +340,9 @@ public class WalletPasswordWindow extends Overlay<WalletPasswordWindow> {
|
||||
}
|
||||
|
||||
private void doRestore() {
|
||||
long date = datePicker.getValue().atStartOfDay().toEpochSecond(ZoneOffset.UTC);
|
||||
final LocalDate value = datePicker.getValue();
|
||||
//TODO Is ZoneOffset correct?
|
||||
long date = value != null ? value.atStartOfDay().toEpochSecond(ZoneOffset.UTC) : 0;
|
||||
DeterministicSeed seed = new DeterministicSeed(Splitter.on(" ").splitToList(seedWordsTextArea.getText()), null, "", date);
|
||||
walletsManager.restoreSeedWords(
|
||||
seed,
|
||||
|
@ -10,24 +10,26 @@
|
||||
<appender-ref ref="CONSOLE_APPENDER"/>
|
||||
</root>
|
||||
|
||||
<!--
|
||||
<logger name="com.neemre.btcdcli4j" level="WARN"/>
|
||||
<logger name="org.berndpruenster" level="WARN"/>
|
||||
|
||||
<logger name="com.msopentech.thali.toronionproxy.OnionProxyManagerEventHandler" level="INFO"/>
|
||||
<!--
|
||||
<logger name="com.neemre.btcdcli4j" level="WARN"/>
|
||||
|
||||
<logger name="org.bitcoinj.core.AbstractBlockChain" level="WARN"/>
|
||||
<logger name="org.bitcoinj.net.BlockingClient" level="WARN"/>
|
||||
<logger name="org.bitcoinj.core.PeerGroup" level="WARN"/>
|
||||
<logger name="org.bitcoinj.core.Peer" level="WARN"/>
|
||||
<logger name="org.bitcoinj.core.Context" level="WARN"/>
|
||||
<logger name="org.bitcoinj.wallet.WalletFiles" level="WARN"/>
|
||||
<logger name="org.bitcoinj.core.listeners.DownloadProgressTracker" level="WARN"/>
|
||||
<logger name="org.bitcoinj.core.PeerSocketHandler" level="WARN"/>
|
||||
<logger name="org.bitcoinj.net.NioClientManager" level="WARN"/>
|
||||
<logger name="com.msopentech.thali.toronionproxy.OnionProxyManagerEventHandler" level="INFO"/>
|
||||
|
||||
<!– We get too many errors logged from connection issues–>
|
||||
<logger name="org.bitcoinj.net.BlockingClient" level="OFF"/>
|
||||
-->
|
||||
<logger name="org.bitcoinj.core.AbstractBlockChain" level="WARN"/>
|
||||
<logger name="org.bitcoinj.net.BlockingClient" level="WARN"/>
|
||||
<logger name="org.bitcoinj.core.PeerGroup" level="WARN"/>
|
||||
<logger name="org.bitcoinj.core.Peer" level="WARN"/>
|
||||
<logger name="org.bitcoinj.core.Context" level="WARN"/>
|
||||
<logger name="org.bitcoinj.wallet.WalletFiles" level="WARN"/>
|
||||
<logger name="org.bitcoinj.core.listeners.DownloadProgressTracker" level="WARN"/>
|
||||
<logger name="org.bitcoinj.core.PeerSocketHandler" level="WARN"/>
|
||||
<logger name="org.bitcoinj.net.NioClientManager" level="WARN"/>
|
||||
|
||||
<!– We get too many errors logged from connection issues–>
|
||||
<logger name="org.bitcoinj.net.BlockingClient" level="OFF"/>
|
||||
-->
|
||||
|
||||
<!--
|
||||
<logger name="org.bitcoinj.net.ConnectionHandler" level="WARN"/>
|
||||
|
@ -88,6 +88,11 @@
|
||||
<artifactId>maven-shade-plugin</artifactId>
|
||||
<version>3.1.0</version>
|
||||
<configuration>
|
||||
<artifactSet>
|
||||
<excludes>
|
||||
<exclude>org.bouncycastle:*:*:*</exclude>
|
||||
</excludes>
|
||||
</artifactSet>
|
||||
<!-- broken with Java 8 (MSHADE-174), using ProGuard instead. -->
|
||||
<minimizeJar>false</minimizeJar>
|
||||
<transformers>
|
||||
|
@ -96,7 +96,7 @@ public class MonitorMain extends BisqExecutable {
|
||||
// In order to work around a bug in JavaFX 8u25 and below, you must include the following code as the first line of your realMain method:
|
||||
Thread.currentThread().setContextClassLoader(MonitorMain.class.getClassLoader());
|
||||
|
||||
port(8080);
|
||||
port(80);
|
||||
get("/", (req, res) -> {
|
||||
log.info("Incoming request from: " + req.userAgent());
|
||||
return seedNodeMonitor.getMetricsModel().getResultAsHtml();
|
||||
|
@ -57,6 +57,8 @@ public class MetricsModel {
|
||||
private List<Peer> connectedPeers;
|
||||
private Map<Tuple2<BitcoinNodes.BtcNode, Boolean>, Integer> btcNodeDownTimeMap = new HashMap<>();
|
||||
private Map<Tuple2<BitcoinNodes.BtcNode, Boolean>, Integer> btcNodeUpTimeMap = new HashMap<>();
|
||||
@Getter
|
||||
private Set<NodeAddress> nodesInError = new HashSet<>();
|
||||
|
||||
@Inject
|
||||
public MetricsModel(SeedNodesRepository seedNodesRepository,
|
||||
@ -403,4 +405,11 @@ public class MetricsModel {
|
||||
duration = duration.replace(", ", "");
|
||||
return duration;
|
||||
}
|
||||
|
||||
public void addNodesInError(NodeAddress nodeAddress) {
|
||||
nodesInError.add(nodeAddress);
|
||||
}
|
||||
public void removeNodesInError(NodeAddress nodeAddress) {
|
||||
nodesInError.remove(nodeAddress);
|
||||
}
|
||||
}
|
||||
|
@ -53,7 +53,7 @@ class MonitorRequestHandler implements MessageListener {
|
||||
|
||||
private final NetworkNode networkNode;
|
||||
private P2PDataStorage dataStorage;
|
||||
private Metrics metric;
|
||||
private Metrics metrics;
|
||||
private final Listener listener;
|
||||
private Timer timeoutTimer;
|
||||
private final int nonce = new Random().nextInt();
|
||||
@ -64,10 +64,10 @@ class MonitorRequestHandler implements MessageListener {
|
||||
// Constructor
|
||||
///////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
public MonitorRequestHandler(NetworkNode networkNode, P2PDataStorage dataStorage, Metrics metric, Listener listener) {
|
||||
public MonitorRequestHandler(NetworkNode networkNode, P2PDataStorage dataStorage, Metrics metrics, Listener listener) {
|
||||
this.networkNode = networkNode;
|
||||
this.dataStorage = dataStorage;
|
||||
this.metric = metric;
|
||||
this.metrics = metrics;
|
||||
this.listener = listener;
|
||||
}
|
||||
|
||||
@ -214,12 +214,12 @@ class MonitorRequestHandler implements MessageListener {
|
||||
});
|
||||
sb.append("#################################################################");
|
||||
log.info(sb.toString());
|
||||
metric.getReceivedObjectsList().add(receivedObjects);
|
||||
metrics.getReceivedObjectsList().add(receivedObjects);
|
||||
|
||||
final long duration = new Date().getTime() - requestTs;
|
||||
log.info("Requesting data took {} ms", duration);
|
||||
metric.getRequestDurations().add(duration);
|
||||
metric.getErrorMessages().add(arbitratorReceived[0] ? "" : "No Arbitrator objects received! Seed node need to be restarted!");
|
||||
metrics.getRequestDurations().add(duration);
|
||||
metrics.getErrorMessages().add(arbitratorReceived[0] ? "" : "No Arbitrator objects received! Seed node need to be restarted!");
|
||||
|
||||
cleanup();
|
||||
connection.shutDown(CloseConnectionReason.CLOSE_REQUESTED_BY_PEER, listener::onComplete);
|
||||
@ -247,13 +247,11 @@ class MonitorRequestHandler implements MessageListener {
|
||||
|
||||
private void handleFault(String errorMessage, NodeAddress nodeAddress, CloseConnectionReason closeConnectionReason) {
|
||||
cleanup();
|
||||
metric.getErrorMessages().add(errorMessage + " (" + new Date().toString() + ")");
|
||||
|
||||
// In case we would have already a connection we close it
|
||||
networkNode.getAllConnections().stream()
|
||||
.filter(connection -> connection.getPeersNodeAddressOptional().isPresent() && connection.getPeersNodeAddressOptional().get().equals(nodeAddress))
|
||||
.forEach(c -> c.shutDown(closeConnectionReason));
|
||||
// We do not log every error only if it fails several times in a row.
|
||||
|
||||
// We do not close the connection as it might be we have opened a new connection for that peer and
|
||||
// we don't want to close that. We do not know the connection at fault as the fault handler does not contain that,
|
||||
// so we could only search for connections for that nodeAddress but that would close an new connection attempt.
|
||||
listener.onFault(errorMessage, nodeAddress);
|
||||
}
|
||||
|
||||
|
@ -23,10 +23,10 @@ import java.util.concurrent.TimeUnit;
|
||||
|
||||
@Slf4j
|
||||
public class MonitorRequestManager implements ConnectionListener {
|
||||
private static final long RETRY_DELAY_SEC = 20;
|
||||
private static final long RETRY_DELAY_SEC = 30;
|
||||
private static final long CLEANUP_TIMER = 60;
|
||||
private static final long REQUEST_PERIOD_MIN = 10;
|
||||
private static final int MAX_RETRIES = 6;
|
||||
private static final int MAX_RETRIES = 3;
|
||||
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////
|
||||
@ -46,7 +46,6 @@ public class MonitorRequestManager implements ConnectionListener {
|
||||
private Map<NodeAddress, Timer> retryTimerMap = new HashMap<>();
|
||||
private Map<NodeAddress, Integer> retryCounterMap = new HashMap<>();
|
||||
private boolean stopped;
|
||||
private Set<NodeAddress> nodesInError = new HashSet<>();
|
||||
private int completedRequestIndex;
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////
|
||||
@ -132,9 +131,10 @@ public class MonitorRequestManager implements ConnectionListener {
|
||||
private void requestFromNode(NodeAddress nodeAddress) {
|
||||
if (!stopped) {
|
||||
if (!handlerMap.containsKey(nodeAddress)) {
|
||||
final Metrics metrics = metricsModel.getMetrics(nodeAddress);
|
||||
MonitorRequestHandler requestDataHandler = new MonitorRequestHandler(networkNode,
|
||||
dataStorage,
|
||||
metricsModel.getMetrics(nodeAddress),
|
||||
metrics,
|
||||
new MonitorRequestHandler.Listener() {
|
||||
@Override
|
||||
public void onComplete() {
|
||||
@ -151,8 +151,8 @@ public class MonitorRequestManager implements ConnectionListener {
|
||||
if (completedRequestIndex == numNodes)
|
||||
metricsModel.log();
|
||||
|
||||
if (nodesInError.contains(nodeAddress)) {
|
||||
nodesInError.remove(nodeAddress);
|
||||
if (metricsModel.getNodesInError().contains(nodeAddress)) {
|
||||
metricsModel.removeNodesInError(nodeAddress);
|
||||
if (slackApi != null)
|
||||
slackApi.call(new SlackMessage("Fixed: " + nodeAddress.getFullAddress(),
|
||||
"<" + seedNodesRepository.getOperator(nodeAddress) + ">" + " Your seed node is recovered."));
|
||||
@ -162,6 +162,8 @@ public class MonitorRequestManager implements ConnectionListener {
|
||||
@Override
|
||||
public void onFault(String errorMessage, NodeAddress nodeAddress) {
|
||||
handlerMap.remove(nodeAddress);
|
||||
stopRetryTimer(nodeAddress);
|
||||
|
||||
int retryCounter;
|
||||
if (retryCounterMap.containsKey(nodeAddress))
|
||||
retryCounter = retryCounterMap.get(nodeAddress);
|
||||
@ -169,15 +171,25 @@ public class MonitorRequestManager implements ConnectionListener {
|
||||
retryCounter = 0;
|
||||
|
||||
if (retryCounter < MAX_RETRIES) {
|
||||
log.info("We got an error at peer={}. We will try again after a delay of {} sec. error={} ",
|
||||
nodeAddress, RETRY_DELAY_SEC, errorMessage);
|
||||
final Timer timer = UserThread.runAfter(() -> requestFromNode(nodeAddress), RETRY_DELAY_SEC);
|
||||
retryTimerMap.put(nodeAddress, timer);
|
||||
retryCounterMap.put(nodeAddress, ++retryCounter);
|
||||
} else {
|
||||
log.warn("We got repeated errors at peer={}. error={} ",
|
||||
nodeAddress, errorMessage);
|
||||
|
||||
metricsModel.addNodesInError(nodeAddress);
|
||||
metrics.getErrorMessages().add(errorMessage + " (" + new Date().toString() + ")");
|
||||
|
||||
metricsModel.updateReport();
|
||||
completedRequestIndex++;
|
||||
if (completedRequestIndex == numNodes)
|
||||
metricsModel.log();
|
||||
nodesInError.add(nodeAddress);
|
||||
|
||||
retryCounterMap.remove(nodeAddress);
|
||||
|
||||
if (slackApi != null)
|
||||
slackApi.call(new SlackMessage("Error: " + nodeAddress.getFullAddress(),
|
||||
"<" + seedNodesRepository.getOperator(nodeAddress) + ">" + " Your seed node failed " + RETRY_DELAY_SEC + " times with error message: " + errorMessage));
|
||||
|
@ -20,9 +20,11 @@
|
||||
<dependency>
|
||||
<groupId>com.github.JesusMcCloud.netlayer</groupId>
|
||||
<artifactId>tor.native</artifactId>
|
||||
<!-- older version e7195748 -->
|
||||
<!-- newer version 1c9d80e -> deactivate as there might be some issues TODO check with dev -->
|
||||
<version>e7195748</version>
|
||||
|
||||
<!-- v0.3.2: first integration e7195748 -->
|
||||
<!-- v0.3.3: fixed shutdown exception 1c9d80e -->
|
||||
<!-- v0.3.4: use latest tor version b3497f1d -->
|
||||
<version>b3497f1d</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>org.slf4j</groupId>
|
||||
|
@ -359,7 +359,7 @@ public class Connection implements MessageListener {
|
||||
///////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
public void setPeerType(PeerType peerType) {
|
||||
Log.traceCall(peerType.toString());
|
||||
log.info("setPeerType: peerType={}, nodeAddressOpt={}", peerType.toString(), peersNodeAddressOptional);
|
||||
this.peerType = peerType;
|
||||
}
|
||||
|
||||
@ -431,7 +431,7 @@ public class Connection implements MessageListener {
|
||||
}
|
||||
|
||||
public void shutDown(CloseConnectionReason closeConnectionReason, @Nullable Runnable shutDownCompleteHandler) {
|
||||
Log.traceCall(this.toString());
|
||||
log.info("shutDown: nodeAddressOpt={}, closeConnectionReason={}", this.peersNodeAddressOptional, closeConnectionReason);
|
||||
if (!stopped) {
|
||||
String peersNodeAddress = peersNodeAddressOptional.isPresent() ? peersNodeAddressOptional.get().toString() : "null";
|
||||
log.debug("\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n" +
|
||||
@ -634,9 +634,11 @@ public class Connection implements MessageListener {
|
||||
closeConnectionReason = CloseConnectionReason.SOCKET_CLOSED;
|
||||
else
|
||||
closeConnectionReason = CloseConnectionReason.RESET;
|
||||
|
||||
log.info("SocketException (expected if connection lost). closeConnectionReason={}; connection={}", closeConnectionReason, connection);
|
||||
} else if (e instanceof SocketTimeoutException || e instanceof TimeoutException) {
|
||||
closeConnectionReason = CloseConnectionReason.SOCKET_TIMEOUT;
|
||||
log.debug("Shut down caused by exception {} on connection={}", e.toString(), connection);
|
||||
log.info("Shut down caused by exception {} on connection={}", e.toString(), connection);
|
||||
} else if (e instanceof EOFException) {
|
||||
closeConnectionReason = CloseConnectionReason.TERMINATED;
|
||||
log.warn("Shut down caused by exception {} on connection={}", e.toString(), connection);
|
||||
@ -646,9 +648,9 @@ public class Connection implements MessageListener {
|
||||
} else {
|
||||
// TODO sometimes we get StreamCorruptedException, OptionalDataException, IllegalStateException
|
||||
closeConnectionReason = CloseConnectionReason.UNKNOWN_EXCEPTION;
|
||||
log.warn("Unknown reason for exception at socket {}\n\t" +
|
||||
log.warn("Unknown reason for exception at socket: {}\n\t" +
|
||||
"connection={}\n\t" +
|
||||
"Exception=",
|
||||
"Exception={}",
|
||||
socket.toString(),
|
||||
this,
|
||||
e.toString());
|
||||
@ -744,6 +746,7 @@ public class Connection implements MessageListener {
|
||||
try {
|
||||
if (sharedModel.getSocket() != null &&
|
||||
sharedModel.getSocket().isClosed()) {
|
||||
log.warn("Socket is null or closed socket={}", sharedModel.getSocket());
|
||||
stopAndShutDown(CloseConnectionReason.SOCKET_CLOSED);
|
||||
return;
|
||||
}
|
||||
@ -765,8 +768,10 @@ public class Connection implements MessageListener {
|
||||
PB.NetworkEnvelope proto = PB.NetworkEnvelope.parseDelimitedFrom(protoInputStream);
|
||||
|
||||
if (proto == null) {
|
||||
if (protoInputStream.read() != -1)
|
||||
log.error("proto is null. Should not happen...");
|
||||
if (protoInputStream.read() == -1)
|
||||
log.info("proto is null because protoInputStream.read()=-1 (EOF). That is expected if client got stopped without proper shutdown.");
|
||||
else
|
||||
log.warn("proto is null. protoInputStream.read()=" + protoInputStream.read());
|
||||
stopAndShutDown(CloseConnectionReason.NO_PROTO_BUFFER_ENV);
|
||||
return;
|
||||
}
|
||||
@ -848,7 +853,7 @@ public class Connection implements MessageListener {
|
||||
|
||||
if (networkEnvelope instanceof CloseConnectionMessage) {
|
||||
// If we get a CloseConnectionMessage we shut down
|
||||
log.debug("CloseConnectionMessage received. Reason={}\n\t" +
|
||||
log.info("CloseConnectionMessage received. Reason={}\n\t" +
|
||||
"connection={}", proto.getCloseConnectionMessage().getReason(), connection);
|
||||
if (CloseConnectionReason.PEER_BANNED.name().equals(proto.getCloseConnectionMessage().getReason())) {
|
||||
log.warn("We got shut down because we are banned by the other peer. (InputHandler.run CloseConnectionMessage)");
|
||||
@ -880,10 +885,6 @@ public class Connection implements MessageListener {
|
||||
// 4. DirectMessage (implements SendersNodeAddressMessage)
|
||||
if (networkEnvelope instanceof SendersNodeAddressMessage) {
|
||||
NodeAddress senderNodeAddress = ((SendersNodeAddressMessage) networkEnvelope).getSenderNodeAddress();
|
||||
// We must not shut down a banned peer at that moment as it would trigger a connection termination
|
||||
// and we could not send the CloseConnectionMessage.
|
||||
// We shut down a banned peer at the next step at setPeersNodeAddress().
|
||||
|
||||
Optional<NodeAddress> peersNodeAddressOptional = connection.getPeersNodeAddressOptional();
|
||||
if (peersNodeAddressOptional.isPresent()) {
|
||||
// If we have already the peers address we check again if it matches our stored one
|
||||
@ -891,6 +892,9 @@ public class Connection implements MessageListener {
|
||||
"senderNodeAddress not matching connections peer address.\n\t" +
|
||||
"message=" + networkEnvelope);
|
||||
} else {
|
||||
// We must not shut down a banned peer at that moment as it would trigger a connection termination
|
||||
// and we could not send the CloseConnectionMessage.
|
||||
// We check for a banned peer inside setPeersNodeAddress() and shut down if banned.
|
||||
connection.setPeersNodeAddress(senderNodeAddress);
|
||||
}
|
||||
}
|
||||
|
@ -29,7 +29,6 @@ import java.net.Socket;
|
||||
import java.nio.file.Paths;
|
||||
import java.util.Date;
|
||||
import java.util.List;
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
@ -83,7 +82,10 @@ public class TorNetworkNode extends NetworkNode {
|
||||
@Override
|
||||
protected Socket createSocket(NodeAddress peerNodeAddress) throws IOException {
|
||||
checkArgument(peerNodeAddress.getHostName().endsWith(".onion"), "PeerAddress is not an onion address");
|
||||
return new TorSocket(peerNodeAddress.getHostName(), peerNodeAddress.getPort(), UUID.randomUUID().toString()); // each socket uses a random Tor stream id
|
||||
// If streamId is null stream isolation gets deactivated.
|
||||
// Hidden services use stream isolation by default so we pass null.
|
||||
return new TorSocket(peerNodeAddress.getHostName(), peerNodeAddress.getPort(), null);
|
||||
// return new TorSocket(peerNodeAddress.getHostName(), peerNodeAddress.getPort(), UUID.randomUUID().toString()); // each socket uses a random Tor stream id
|
||||
}
|
||||
|
||||
// TODO handle failure more cleanly
|
||||
@ -210,7 +212,10 @@ public class TorNetworkNode extends NetworkNode {
|
||||
long ts1 = new Date().getTime();
|
||||
log.info("Starting tor");
|
||||
Tor.setDefault(new NativeTor(torDir, bridgeEntries));
|
||||
log.info("Tor started after {} ms. Start publishing hidden service.", (new Date().getTime() - ts1)); // takes usually a few seconds
|
||||
log.info("\n################################################################\n" +
|
||||
"Tor started after {} ms. Start publishing hidden service.\n" +
|
||||
"################################################################",
|
||||
(new Date().getTime() - ts1)); // takes usually a few seconds
|
||||
|
||||
UserThread.execute(() -> setupListeners.stream().forEach(SetupListener::onTorNodeReady));
|
||||
|
||||
@ -218,7 +223,10 @@ public class TorNetworkNode extends NetworkNode {
|
||||
hiddenServiceSocket = new HiddenServiceSocket(localPort, "", servicePort);
|
||||
hiddenServiceSocket.addReadyListener(socket -> {
|
||||
try {
|
||||
log.info("Tor hidden service published after {} ms. Socked={}", (new Date().getTime() - ts2), socket); //takes usually 30-40 sec
|
||||
log.info("\n################################################################\n" +
|
||||
"Tor hidden service published after {} ms. Socked={}\n" +
|
||||
"################################################################",
|
||||
(new Date().getTime() - ts2), socket); //takes usually 30-40 sec
|
||||
new Thread() {
|
||||
@Override
|
||||
public void run() {
|
||||
|
@ -38,8 +38,12 @@ public class PeerManager implements ConnectionListener, PersistedDataHost {
|
||||
|
||||
private static final int MAX_REPORTED_PEERS = 1000;
|
||||
private static final int MAX_PERSISTED_PEERS = 500;
|
||||
private static final long MAX_AGE = TimeUnit.DAYS.toMillis(14); // max age for reported peers is 14 days
|
||||
// max age for reported peers is 14 days
|
||||
private static final long MAX_AGE = TimeUnit.DAYS.toMillis(14);
|
||||
// Age of what we consider connected peers still as live peers
|
||||
private static final long MAX_AGE_LIVE_PEERS = TimeUnit.MINUTES.toMillis(30);
|
||||
private static final boolean PRINT_REPORTED_PEERS_DETAILS = true;
|
||||
private Set<Peer> latestLivePeers = new HashSet<>();
|
||||
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////
|
||||
@ -159,12 +163,12 @@ public class PeerManager implements ConnectionListener, PersistedDataHost {
|
||||
// Modify this to change the relationships between connection limits.
|
||||
// maxConnections default 12
|
||||
private void setConnectionLimits(int maxConnections) {
|
||||
this.maxConnections = maxConnections; // 12
|
||||
disconnectFromSeedNode = Math.min(6, maxConnections + 1); // 6
|
||||
minConnections = Math.max(1, maxConnections - 4); // 1-8
|
||||
maxConnectionsPeer = maxConnections + 4; // 16
|
||||
maxConnectionsNonDirect = maxConnections + 8; // 20
|
||||
maxConnectionsAbsolute = maxConnections + 18; // 30 -> seedNode with maxConnections=30 -> 48
|
||||
this.maxConnections = maxConnections; // app node 12; seedNode 30
|
||||
disconnectFromSeedNode = Math.min(6, maxConnections + 1); // 6
|
||||
minConnections = Math.max(1, (int) Math.round(maxConnections * 0.7)); // app node 1-8; seedNode 21
|
||||
maxConnectionsPeer = Math.max(4, (int) Math.round(maxConnections * 1.3)); // app node 16; seedNode 39
|
||||
maxConnectionsNonDirect = Math.max(8, (int) Math.round(maxConnections * 1.7)); // app node 20; seedNode 51
|
||||
maxConnectionsAbsolute = Math.max(12, (int) Math.round(maxConnections * 2.5)); // app node 30; seedNode 66
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////
|
||||
@ -198,6 +202,7 @@ public class PeerManager implements ConnectionListener, PersistedDataHost {
|
||||
|
||||
@Override
|
||||
public void onDisconnect(CloseConnectionReason closeConnectionReason, Connection connection) {
|
||||
log.info("onDisconnect called: nodeAddress={}, closeConnectionReason={}", connection.getPeersNodeAddressOptional(), closeConnectionReason);
|
||||
Log.logIfStressTests("onDisconnect of peer " +
|
||||
(connection.getPeersNodeAddressOptional().isPresent() ? connection.getPeersNodeAddressOptional().get() : "PeersNode unknown") +
|
||||
" / No. of connections: " + networkNode.getAllConnections().size() +
|
||||
@ -214,6 +219,9 @@ public class PeerManager implements ConnectionListener, PersistedDataHost {
|
||||
lostAllConnections = networkNode.getAllConnections().isEmpty();
|
||||
if (lostAllConnections) {
|
||||
stopped = true;
|
||||
log.warn("\n------------------------------------------------------------\n" +
|
||||
"All connections lost\n" +
|
||||
"------------------------------------------------------------");
|
||||
listeners.stream().forEach(Listener::onAllConnectionsLost);
|
||||
}
|
||||
|
||||
@ -261,10 +269,10 @@ public class PeerManager implements ConnectionListener, PersistedDataHost {
|
||||
Log.traceCall("maxConnections=" + maxConnections);
|
||||
Set<Connection> allConnections = networkNode.getAllConnections();
|
||||
int size = allConnections.size();
|
||||
log.debug("We have {} connections open. Our limit is {}", size, maxConnections);
|
||||
log.info("We have {} connections open. Our limit is {}", size, maxConnections);
|
||||
|
||||
if (size > maxConnections) {
|
||||
log.debug("We have too many connections open.\n\t" +
|
||||
log.info("We have too many connections open.\n\t" +
|
||||
"Lets try first to remove the inbound connections of type PEER.");
|
||||
List<Connection> candidates = allConnections.stream()
|
||||
.filter(e -> e instanceof InboundConnection)
|
||||
@ -275,7 +283,7 @@ public class PeerManager implements ConnectionListener, PersistedDataHost {
|
||||
log.debug("No candidates found. We check if we exceed our " +
|
||||
"maxConnectionsPeer limit of {}", maxConnectionsPeer);
|
||||
if (size > maxConnectionsPeer) {
|
||||
log.debug("Lets try to remove ANY connection of type PEER.");
|
||||
log.info("Lets try to remove ANY connection of type PEER.");
|
||||
candidates = allConnections.stream()
|
||||
.filter(e -> e.getPeerType() == Connection.PeerType.PEER)
|
||||
.collect(Collectors.toList());
|
||||
@ -284,7 +292,7 @@ public class PeerManager implements ConnectionListener, PersistedDataHost {
|
||||
log.debug("No candidates found. We check if we exceed our " +
|
||||
"maxConnectionsNonDirect limit of {}", maxConnectionsNonDirect);
|
||||
if (size > maxConnectionsNonDirect) {
|
||||
log.debug("Lets try to remove any connection which is not of type DIRECT_MSG_PEER or INITIAL_DATA_REQUEST.");
|
||||
log.info("Lets try to remove any connection which is not of type DIRECT_MSG_PEER or INITIAL_DATA_REQUEST.");
|
||||
candidates = allConnections.stream()
|
||||
.filter(e -> e.getPeerType() != Connection.PeerType.DIRECT_MSG_PEER && e.getPeerType() != Connection.PeerType.INITIAL_DATA_REQUEST)
|
||||
.collect(Collectors.toList());
|
||||
@ -293,7 +301,7 @@ public class PeerManager implements ConnectionListener, PersistedDataHost {
|
||||
log.debug("No candidates found. We check if we exceed our " +
|
||||
"maxConnectionsAbsolute limit of {}", maxConnectionsAbsolute);
|
||||
if (size > maxConnectionsAbsolute) {
|
||||
log.debug("Lets try to remove any connection.");
|
||||
log.info("We reached abs. max. connections. Lets try to remove ANY connection.");
|
||||
candidates = allConnections.stream().collect(Collectors.toList());
|
||||
}
|
||||
}
|
||||
@ -304,8 +312,8 @@ public class PeerManager implements ConnectionListener, PersistedDataHost {
|
||||
|
||||
if (!candidates.isEmpty()) {
|
||||
candidates.sort((o1, o2) -> ((Long) o1.getStatistic().getLastActivityTimestamp()).compareTo(((Long) o2.getStatistic().getLastActivityTimestamp())));
|
||||
log.debug("Candidates.size() for shut down=" + candidates.size());
|
||||
Connection connection = candidates.remove(0);
|
||||
log.info("checkMaxConnections: Num candidates for shut down={}. We close oldest connection: {}", candidates.size(), connection);
|
||||
log.debug("We are going to shut down the oldest connection.\n\tconnection=" + connection.toString());
|
||||
if (!connection.isStopped())
|
||||
connection.shutDown(CloseConnectionReason.TOO_MANY_CONNECTIONS_OPEN, () -> UserThread.runAfter(this::checkMaxConnections, 100, TimeUnit.MILLISECONDS));
|
||||
@ -395,7 +403,7 @@ public class PeerManager implements ConnectionListener, PersistedDataHost {
|
||||
return reportedPeers;
|
||||
}
|
||||
|
||||
public void addToReportedPeers(HashSet<Peer> reportedPeersToAdd, Connection connection) {
|
||||
public void addToReportedPeers(Set<Peer> reportedPeersToAdd, Connection connection) {
|
||||
printNewReportedPeers(reportedPeersToAdd);
|
||||
|
||||
// We check if the reported msg is not violating our rules
|
||||
@ -419,11 +427,10 @@ public class PeerManager implements ConnectionListener, PersistedDataHost {
|
||||
private void purgeReportedPeersIfExceeds() {
|
||||
Log.traceCall();
|
||||
int size = reportedPeers.size();
|
||||
int limit = MAX_REPORTED_PEERS - maxConnectionsAbsolute;
|
||||
if (size > limit) {
|
||||
log.trace("We have already {} reported peers which exceeds our limit of {}." +
|
||||
"We remove random peers from the reported peers list.", size, limit);
|
||||
int diff = size - limit;
|
||||
if (size > MAX_REPORTED_PEERS) {
|
||||
log.info("We have already {} reported peers which exceeds our limit of {}." +
|
||||
"We remove random peers from the reported peers list.", size, MAX_REPORTED_PEERS);
|
||||
int diff = size - MAX_REPORTED_PEERS;
|
||||
List<Peer> list = new ArrayList<>(reportedPeers);
|
||||
// we dont use sorting by lastActivityDate to keep it more random
|
||||
for (int i = 0; i < diff; i++) {
|
||||
@ -452,7 +459,7 @@ public class PeerManager implements ConnectionListener, PersistedDataHost {
|
||||
}
|
||||
}
|
||||
|
||||
private void printNewReportedPeers(HashSet<Peer> reportedPeers) {
|
||||
private void printNewReportedPeers(Set<Peer> reportedPeers) {
|
||||
//noinspection ConstantConditions
|
||||
if (PRINT_REPORTED_PEERS_DETAILS) {
|
||||
StringBuilder result = new StringBuilder("We received new reportedPeers:");
|
||||
@ -570,6 +577,7 @@ public class PeerManager implements ConnectionListener, PersistedDataHost {
|
||||
|
||||
public void handleConnectionFault(NodeAddress nodeAddress, @Nullable Connection connection) {
|
||||
Log.traceCall("nodeAddress=" + nodeAddress);
|
||||
log.debug("handleConnectionFault called: nodeAddress=" + nodeAddress);
|
||||
boolean doRemovePersistedPeer = false;
|
||||
removeReportedPeer(nodeAddress);
|
||||
Optional<Peer> persistedPeerOptional = getPersistedPeerOptional(nodeAddress);
|
||||
@ -600,10 +608,20 @@ public class PeerManager implements ConnectionListener, PersistedDataHost {
|
||||
.ifPresent(connection -> connection.shutDown(closeConnectionReason));
|
||||
}
|
||||
|
||||
public HashSet<Peer> getConnectedNonSeedNodeReportedPeers(NodeAddress excludedNodeAddress) {
|
||||
return new HashSet<>(getConnectedNonSeedNodeReportedPeers().stream()
|
||||
// Delivers the live peers from the last 30 min (MAX_AGE_LIVE_PEERS)
|
||||
// We include older peers to avoid risks for network partitioning
|
||||
public Set<Peer> getLivePeers(NodeAddress excludedNodeAddress) {
|
||||
Set<Peer> currentLivePeers = new HashSet<>(getConnectedReportedPeers().stream()
|
||||
.filter(e -> !isSeedNode(e))
|
||||
.filter(e -> !e.getNodeAddress().equals(excludedNodeAddress))
|
||||
.collect(Collectors.toSet()));
|
||||
latestLivePeers.addAll(currentLivePeers);
|
||||
long maxAge = new Date().getTime() - MAX_AGE_LIVE_PEERS;
|
||||
latestLivePeers = latestLivePeers.stream()
|
||||
.filter(peer -> peer.getDate().getTime() > maxAge)
|
||||
.collect(Collectors.toSet());
|
||||
log.info("Num of latestLivePeers={}, latestLivePeers={}", latestLivePeers.size(), latestLivePeers);
|
||||
return latestLivePeers;
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////
|
||||
@ -618,12 +636,6 @@ public class PeerManager implements ConnectionListener, PersistedDataHost {
|
||||
.collect(Collectors.toSet());
|
||||
}
|
||||
|
||||
private HashSet<Peer> getConnectedNonSeedNodeReportedPeers() {
|
||||
return new HashSet<>(getConnectedReportedPeers().stream()
|
||||
.filter(e -> !isSeedNode(e))
|
||||
.collect(Collectors.toSet()));
|
||||
}
|
||||
|
||||
private void stopCheckMaxConnectionsTimer() {
|
||||
if (checkMaxConnectionsTimer != null) {
|
||||
checkMaxConnectionsTimer.stop();
|
||||
|
@ -80,7 +80,7 @@ public class GetDataRequestHandler {
|
||||
|
||||
if (timeoutTimer == null) {
|
||||
timeoutTimer = UserThread.runAfter(() -> { // setup before sending to avoid race conditions
|
||||
String errorMessage = "A timeout occurred for getDataResponse:" + getDataResponse +
|
||||
String errorMessage = "A timeout occurred for getDataResponse " +
|
||||
" on connection:" + connection;
|
||||
handleFault(errorMessage, CloseConnectionReason.SEND_MSG_TIMEOUT, connection);
|
||||
},
|
||||
|
@ -281,7 +281,7 @@ class RequestDataHandler implements MessageListener {
|
||||
log.warn("We have stopped already. We ignore that onDataRequest call.");
|
||||
}
|
||||
} else {
|
||||
log.warn("We got a message from another connection and ignore it. That should never happen.");
|
||||
log.debug("We got the message from another connection and ignore it on that handler. That is expected if we have several requests open.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,9 +1,11 @@
|
||||
package io.bisq.network.p2p.peers.getdata;
|
||||
|
||||
import com.google.inject.name.Named;
|
||||
import io.bisq.common.Timer;
|
||||
import io.bisq.common.UserThread;
|
||||
import io.bisq.common.app.Log;
|
||||
import io.bisq.common.proto.network.NetworkEnvelope;
|
||||
import io.bisq.network.NetworkOptionKeys;
|
||||
import io.bisq.network.p2p.NodeAddress;
|
||||
import io.bisq.network.p2p.network.*;
|
||||
import io.bisq.network.p2p.peers.PeerManager;
|
||||
@ -16,6 +18,7 @@ import org.jetbrains.annotations.Nullable;
|
||||
|
||||
import javax.inject.Inject;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkArgument;
|
||||
@ -24,6 +27,10 @@ import static com.google.common.base.Preconditions.checkArgument;
|
||||
public class RequestDataManager implements MessageListener, ConnectionListener, PeerManager.Listener {
|
||||
private static final long RETRY_DELAY_SEC = 10;
|
||||
private static final long CLEANUP_TIMER = 120;
|
||||
// How many seeds we request the PreliminaryGetDataRequest from
|
||||
private static int NUM_SEEDS_FOR_PRELIMINARY_REQUEST = 2;
|
||||
// how many seeds additional to the first responding PreliminaryGetDataRequest seed we request the GetUpdatedDataRequest from
|
||||
private static int NUM_ADDITIONAL_SEEDS_FOR_UPDATE_REQUEST = 1;
|
||||
private boolean isPreliminaryDataRequest = true;
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////
|
||||
@ -69,7 +76,8 @@ public class RequestDataManager implements MessageListener, ConnectionListener,
|
||||
public RequestDataManager(NetworkNode networkNode,
|
||||
SeedNodesRepository seedNodesRepository,
|
||||
P2PDataStorage dataStorage,
|
||||
PeerManager peerManager) {
|
||||
PeerManager peerManager,
|
||||
@javax.annotation.Nullable @Named(NetworkOptionKeys.MY_ADDRESS) String myAddress) {
|
||||
this.networkNode = networkNode;
|
||||
this.dataStorage = dataStorage;
|
||||
this.peerManager = peerManager;
|
||||
@ -79,6 +87,15 @@ public class RequestDataManager implements MessageListener, ConnectionListener,
|
||||
this.peerManager.addListener(this);
|
||||
|
||||
this.seedNodeAddresses = new HashSet<>(seedNodesRepository.getSeedNodeAddresses());
|
||||
|
||||
// If we are a seed node we use more redundancy at startup to be sure we get all data.
|
||||
// We cannot use networkNode.getNodeAddress() as nodeAddress as that is null at this point, so we use
|
||||
// new NodeAddress(myAddress) for checking if we are a seed node.
|
||||
// seedNodeAddresses do not contain my own address as that gets filtered out
|
||||
if (myAddress != null && !myAddress.isEmpty() && seedNodesRepository.isSeedNode(new NodeAddress(myAddress))) {
|
||||
NUM_SEEDS_FOR_PRELIMINARY_REQUEST = 3;
|
||||
NUM_ADDITIONAL_SEEDS_FOR_UPDATE_REQUEST = 2;
|
||||
}
|
||||
}
|
||||
|
||||
public void shutDown() {
|
||||
@ -105,10 +122,15 @@ public class RequestDataManager implements MessageListener, ConnectionListener,
|
||||
ArrayList<NodeAddress> nodeAddresses = new ArrayList<>(seedNodeAddresses);
|
||||
if (!nodeAddresses.isEmpty()) {
|
||||
Collections.shuffle(nodeAddresses);
|
||||
NodeAddress nextCandidate = nodeAddresses.get(0);
|
||||
nodeAddresses.remove(nextCandidate);
|
||||
ArrayList<NodeAddress> finalNodeAddresses = new ArrayList<>(nodeAddresses);
|
||||
final int size = Math.min(NUM_SEEDS_FOR_PRELIMINARY_REQUEST, finalNodeAddresses.size());
|
||||
for (int i = 0; i < size; i++) {
|
||||
NodeAddress nodeAddress = finalNodeAddresses.get(i);
|
||||
nodeAddresses.remove(nodeAddress);
|
||||
UserThread.runAfter(() -> requestData(nodeAddress, nodeAddresses), (i * 200 + 1), TimeUnit.MILLISECONDS);
|
||||
}
|
||||
|
||||
isPreliminaryDataRequest = true;
|
||||
requestData(nextCandidate, nodeAddresses);
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
@ -119,13 +141,28 @@ public class RequestDataManager implements MessageListener, ConnectionListener,
|
||||
Log.traceCall();
|
||||
checkArgument(nodeAddressOfPreliminaryDataRequest.isPresent(), "nodeAddressOfPreliminaryDataRequest must be present");
|
||||
dataUpdateRequested = true;
|
||||
List<NodeAddress> remainingNodeAddresses = new ArrayList<>(seedNodeAddresses);
|
||||
if (!remainingNodeAddresses.isEmpty()) {
|
||||
Collections.shuffle(remainingNodeAddresses);
|
||||
isPreliminaryDataRequest = false;
|
||||
List<NodeAddress> nodeAddresses = new ArrayList<>(seedNodeAddresses);
|
||||
if (!nodeAddresses.isEmpty()) {
|
||||
// We use the node we have already connected to to request again
|
||||
NodeAddress candidate = nodeAddressOfPreliminaryDataRequest.get();
|
||||
remainingNodeAddresses.remove(candidate);
|
||||
isPreliminaryDataRequest = false;
|
||||
requestData(candidate, remainingNodeAddresses);
|
||||
nodeAddresses.remove(candidate);
|
||||
requestData(candidate, nodeAddresses);
|
||||
|
||||
// For more redundancy we request as well from other random nodes.
|
||||
Collections.shuffle(nodeAddresses);
|
||||
ArrayList<NodeAddress> finalNodeAddresses = new ArrayList<>(nodeAddresses);
|
||||
int numRequests = 0;
|
||||
for (int i = 0; i < finalNodeAddresses.size() && numRequests < NUM_ADDITIONAL_SEEDS_FOR_UPDATE_REQUEST; i++) {
|
||||
NodeAddress nodeAddress = finalNodeAddresses.get(i);
|
||||
nodeAddresses.remove(nodeAddress);
|
||||
|
||||
// It might be that we have a prelim. request open for the same seed, if so we skip to the next.
|
||||
if (!handlerMap.containsKey(nodeAddress)) {
|
||||
UserThread.runAfter(() -> requestData(nodeAddress, nodeAddresses), (i * 200 + 1), TimeUnit.MILLISECONDS);
|
||||
numRequests++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -267,7 +304,10 @@ public class RequestDataManager implements MessageListener, ConnectionListener,
|
||||
// 1. We get a response from requestPreliminaryData
|
||||
if (!nodeAddressOfPreliminaryDataRequest.isPresent()) {
|
||||
nodeAddressOfPreliminaryDataRequest = Optional.of(nodeAddress);
|
||||
listener.onPreliminaryDataReceived();
|
||||
// We delay because it can be that we get the HS published before we receive the
|
||||
// preliminary data and the onPreliminaryDataReceived call triggers the
|
||||
// dataUpdateRequested set to true, so we would also call the onUpdatedDataReceived.
|
||||
UserThread.runAfter(listener::onPreliminaryDataReceived, 100 , TimeUnit.MILLISECONDS);
|
||||
}
|
||||
|
||||
// 2. Later we get a response from requestUpdatesData
|
||||
|
@ -68,7 +68,7 @@ class GetPeersRequestHandler {
|
||||
checkArgument(connection.getPeersNodeAddressOptional().isPresent(),
|
||||
"The peers address must have been already set at the moment");
|
||||
GetPeersResponse getPeersResponse = new GetPeersResponse(getPeersRequest.getNonce(),
|
||||
peerManager.getConnectedNonSeedNodeReportedPeers(connection.getPeersNodeAddressOptional().get()));
|
||||
peerManager.getLivePeers(connection.getPeersNodeAddressOptional().get()));
|
||||
|
||||
checkArgument(timeoutTimer == null, "onGetPeersRequest must not be called twice.");
|
||||
timeoutTimer = UserThread.runAfter(() -> { // setup before sending to avoid race conditions
|
||||
|
@ -84,7 +84,7 @@ class PeerExchangeHandler implements MessageListener {
|
||||
log.info("sendGetPeersRequest to nodeAddress={}", nodeAddress);
|
||||
if (!stopped) {
|
||||
if (networkNode.getNodeAddress() != null) {
|
||||
GetPeersRequest getPeersRequest = new GetPeersRequest(networkNode.getNodeAddress(), nonce, peerManager.getConnectedNonSeedNodeReportedPeers(nodeAddress));
|
||||
GetPeersRequest getPeersRequest = new GetPeersRequest(networkNode.getNodeAddress(), nonce, peerManager.getLivePeers(nodeAddress));
|
||||
if (timeoutTimer == null) {
|
||||
timeoutTimer = UserThread.runAfter(() -> { // setup before sending to avoid race conditions
|
||||
if (!stopped) {
|
||||
|
@ -104,7 +104,7 @@ public class PeerExchangeManager implements MessageListener, ConnectionListener,
|
||||
|
||||
@Override
|
||||
public void onDisconnect(CloseConnectionReason closeConnectionReason, Connection connection) {
|
||||
Log.traceCall();
|
||||
log.info("onDisconnect closeConnectionReason={}, nodeAddressOpt={}", closeConnectionReason, connection.getPeersNodeAddressOptional());
|
||||
closeHandler(connection);
|
||||
|
||||
if (retryTimer == null) {
|
||||
@ -196,7 +196,7 @@ public class PeerExchangeManager implements MessageListener, ConnectionListener,
|
||||
///////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
private void requestReportedPeers(NodeAddress nodeAddress, List<NodeAddress> remainingNodeAddresses) {
|
||||
Log.traceCall("nodeAddress=" + nodeAddress);
|
||||
log.info("requestReportedPeers nodeAddress={}; remainingNodeAddresses.size={}" , nodeAddress,remainingNodeAddresses.size());
|
||||
if (!stopped) {
|
||||
if (!handlerMap.containsKey(nodeAddress)) {
|
||||
PeerExchangeHandler peerExchangeHandler = new PeerExchangeHandler(networkNode,
|
||||
|
@ -15,6 +15,7 @@ import javax.annotation.Nullable;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkNotNull;
|
||||
@ -24,11 +25,11 @@ import static com.google.common.base.Preconditions.checkNotNull;
|
||||
public final class GetPeersRequest extends NetworkEnvelope implements PeerExchangeMessage, SendersNodeAddressMessage, SupportedCapabilitiesMessage {
|
||||
private final NodeAddress senderNodeAddress;
|
||||
private final int nonce;
|
||||
private final HashSet<Peer> reportedPeers;
|
||||
private final Set<Peer> reportedPeers;
|
||||
@Nullable
|
||||
private final List<Integer> supportedCapabilities;
|
||||
|
||||
public GetPeersRequest(NodeAddress senderNodeAddress, int nonce, HashSet<Peer> reportedPeers) {
|
||||
public GetPeersRequest(NodeAddress senderNodeAddress, int nonce, Set<Peer> reportedPeers) {
|
||||
this(senderNodeAddress, nonce, reportedPeers, Capabilities.getSupportedCapabilities(), Version.getP2PMessageVersion());
|
||||
}
|
||||
|
||||
@ -39,7 +40,7 @@ public final class GetPeersRequest extends NetworkEnvelope implements PeerExchan
|
||||
|
||||
private GetPeersRequest(NodeAddress senderNodeAddress,
|
||||
int nonce,
|
||||
HashSet<Peer> reportedPeers,
|
||||
Set<Peer> reportedPeers,
|
||||
@Nullable List<Integer> supportedCapabilities,
|
||||
int messageVersion) {
|
||||
super(messageVersion);
|
||||
|
@ -14,17 +14,18 @@ import javax.annotation.Nullable;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
@EqualsAndHashCode(callSuper = true)
|
||||
@Value
|
||||
public final class GetPeersResponse extends NetworkEnvelope implements PeerExchangeMessage, SupportedCapabilitiesMessage {
|
||||
private final int requestNonce;
|
||||
private final HashSet<Peer> reportedPeers;
|
||||
private final Set<Peer> reportedPeers;
|
||||
@Nullable
|
||||
private final List<Integer> supportedCapabilities;
|
||||
|
||||
public GetPeersResponse(int requestNonce, HashSet<Peer> reportedPeers) {
|
||||
public GetPeersResponse(int requestNonce, Set<Peer> reportedPeers) {
|
||||
this(requestNonce, reportedPeers, Capabilities.getSupportedCapabilities(), Version.getP2PMessageVersion());
|
||||
}
|
||||
|
||||
@ -34,7 +35,7 @@ public final class GetPeersResponse extends NetworkEnvelope implements PeerExcha
|
||||
///////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
private GetPeersResponse(int requestNonce,
|
||||
HashSet<Peer> reportedPeers,
|
||||
Set<Peer> reportedPeers,
|
||||
@Nullable List<Integer> supportedCapabilities,
|
||||
int messageVersion) {
|
||||
super(messageVersion);
|
||||
|
2
pom.xml
2
pom.xml
@ -100,7 +100,7 @@
|
||||
<dependency>
|
||||
<groupId>com.github.bisq-network.libdohj</groupId>
|
||||
<artifactId>libdohj-core</artifactId>
|
||||
<version>bba4088c</version>
|
||||
<version>5a090784</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>com.google.code.findbugs</groupId>
|
||||
|
@ -88,6 +88,11 @@
|
||||
<artifactId>maven-shade-plugin</artifactId>
|
||||
<version>3.1.0</version>
|
||||
<configuration>
|
||||
<artifactSet>
|
||||
<excludes>
|
||||
<exclude>org.bouncycastle:*:*:*</exclude>
|
||||
</excludes>
|
||||
</artifactSet>
|
||||
<!-- broken with Java 8 (MSHADE-174), using ProGuard instead. -->
|
||||
<minimizeJar>false</minimizeJar>
|
||||
<transformers>
|
||||
|
@ -88,6 +88,11 @@
|
||||
<artifactId>maven-shade-plugin</artifactId>
|
||||
<version>3.1.0</version>
|
||||
<configuration>
|
||||
<artifactSet>
|
||||
<excludes>
|
||||
<exclude>org.bouncycastle:*:*:*</exclude>
|
||||
</excludes>
|
||||
</artifactSet>
|
||||
<!-- broken with Java 8 (MSHADE-174), using ProGuard instead. -->
|
||||
<minimizeJar>false</minimizeJar>
|
||||
<transformers>
|
||||
|
@ -43,7 +43,7 @@ import static io.bisq.core.app.BisqEnvironment.DEFAULT_USER_DATA_DIR;
|
||||
@Slf4j
|
||||
public class SeedNodeMain extends BisqExecutable {
|
||||
private static final long MAX_MEMORY_MB_DEFAULT = 500;
|
||||
private static final long CHECK_MEMORY_PERIOD_SEC = 5 * 60;
|
||||
private static final long CHECK_MEMORY_PERIOD_SEC = 2 * 60;
|
||||
private SeedNode seedNode;
|
||||
private volatile boolean stopped;
|
||||
private static long maxMemory = MAX_MEMORY_MB_DEFAULT;
|
||||
@ -140,7 +140,7 @@ public class SeedNodeMain extends BisqExecutable {
|
||||
Profiler.printSystemLoad(log);
|
||||
if (!stopped) {
|
||||
long usedMemoryInMB = Profiler.getUsedMemoryInMB();
|
||||
if (usedMemoryInMB > (maxMemory * 0.8)) {
|
||||
if (usedMemoryInMB > (maxMemory * 0.7)) {
|
||||
log.warn("\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n" +
|
||||
"We are over our memory warn limit and call the GC. usedMemoryInMB: {}" +
|
||||
"\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n",
|
||||
|
@ -87,6 +87,11 @@
|
||||
<artifactId>maven-shade-plugin</artifactId>
|
||||
<version>3.1.0</version>
|
||||
<configuration>
|
||||
<artifactSet>
|
||||
<excludes>
|
||||
<exclude>org.bouncycastle:*:*:*</exclude>
|
||||
</excludes>
|
||||
</artifactSet>
|
||||
<!-- broken with Java 8 (MSHADE-174), using ProGuard instead. -->
|
||||
<minimizeJar>false</minimizeJar>
|
||||
<transformers>
|
||||
|
Loading…
Reference in New Issue
Block a user