fix bug with map

This commit is contained in:
Manfred Karrer 2015-11-18 16:43:28 +01:00
parent e87f99d2a5
commit b235beb56b
3 changed files with 10 additions and 3 deletions

View File

@ -145,9 +145,14 @@ public final class Arbitrator implements PubKeyProtectedExpirablePayload {
@Override
public String toString() {
return "Arbitrator{" +
"arbitratorAddress='" + arbitratorAddress + '\'' +
"arbitratorAddress=" + arbitratorAddress +
", languageCodes=" + languageCodes +
", btcAddress='" + btcAddress + '\'' +
", registrationDate=" + registrationDate +
", btcPubKey.hashCode()=" + Arrays.toString(btcPubKey).hashCode() +
", pubKeyRing.hashCode()=" + pubKeyRing.hashCode() +
", registrationSignature.hashCode()='" + registrationSignature.hashCode() + '\'' +
", registrationPubKey.hashCode()=" + Arrays.toString(registrationPubKey).hashCode() +
'}';
}
}

View File

@ -30,7 +30,8 @@ import java.util.concurrent.*;
public class Connection implements MessageListener {
private static final Logger log = LoggerFactory.getLogger(Connection.class);
private static final int MAX_MSG_SIZE = 5 * 1024 * 1024; // 5 MB of compressed data
private static final int SOCKET_TIMEOUT = 30 * 60 * 1000; // 30 min.
//timeout on blocking Socket operations like ServerSocket.accept() or SocketInputStream.read()
private static final int SOCKET_TIMEOUT = 1 * 60 * 1000; // 1 min.
private ConnectionPriority connectionPriority;
public static int getMaxMsgSize() {

View File

@ -95,7 +95,8 @@ public class ProtectedExpirableDataStorage implements MessageListener {
// object when we get it sent from new peers, we dont remove the sequence number from the map.
// That way a add message for an already expired data will fail because the sequence number
// is equal and not larger.
map.entrySet().stream()
Map<ByteArray, ProtectedData> temp = new HashMap<>(map);
temp.entrySet().stream()
.filter(entry -> entry.getValue().isExpired())
.forEach(entry -> map.remove(entry.getKey()));
}