Rate limit peer discovery

This changes PeerGroup to consider a peer discovery that doesn't return
enough results as a failure, so it does exponential backoff. Otherwise
it discovers constantly on a network with less than the maximum peers
available.

Also added a second sanity check to ensure it's never done more than
once a second.
This commit is contained in:
Ross Nicoll 2015-12-25 15:17:02 +00:00 committed by Andreas Schildbach
parent c82aa3b070
commit cfb942a7fa

View file

@ -476,6 +476,7 @@ public class PeerGroup implements TransactionBroadcaster {
private Runnable triggerConnectionsJob = new Runnable() {
private boolean firstRun = true;
private final static long MIN_PEER_DISCOVERY_INTERVAL = 1000L;
@Override
public void run() {
@ -525,7 +526,9 @@ public class PeerGroup implements TransactionBroadcaster {
lock.lock();
try {
if (doDiscovery) {
if (discoverySuccess) {
// Require that we have enough connections, to consider this
// a success, or we just constantly test for new peers
if (discoverySuccess && countConnectedAndPendingPeers() >= getMaxConnections()) {
groupBackoff.trackSuccess();
} else {
groupBackoff.trackFailure();
@ -534,8 +537,10 @@ public class PeerGroup implements TransactionBroadcaster {
// Inactives is sorted by backoffMap time.
if (inactives.isEmpty()) {
if (countConnectedAndPendingPeers() < getMaxConnections()) {
log.info("Peer discovery didn't provide us any more peers, will try again later.");
executor.schedule(this, groupBackoff.getRetryTime() - now, TimeUnit.MILLISECONDS);
long interval = Math.max(groupBackoff.getRetryTime() - now, MIN_PEER_DISCOVERY_INTERVAL);
log.info("Peer discovery didn't provide us any more peers, will try again in "
+ interval + "ms.");
executor.schedule(this, interval, TimeUnit.MILLISECONDS);
} else {
// We have enough peers and discovery provided no more, so just settle down. Most likely we
// were given a fixed set of addresses in some test scenario.