* Peer Profiles:
- Reduce max age for display to 2h (was 3h) - Drop unused Persist classes - Dynamically adjust expire time to control memory use
This commit is contained in:
@ -1,29 +0,0 @@
|
|||||||
package net.i2p.router.peermanager;
|
|
||||||
|
|
||||||
import java.util.Iterator;
|
|
||||||
import java.util.Set;
|
|
||||||
|
|
||||||
import net.i2p.data.Hash;
|
|
||||||
import net.i2p.router.JobImpl;
|
|
||||||
import net.i2p.router.RouterContext;
|
|
||||||
|
|
||||||
class PersistProfileJob extends JobImpl {
|
|
||||||
private PersistProfilesJob _job;
|
|
||||||
private Iterator _peers;
|
|
||||||
public PersistProfileJob(RouterContext enclosingContext, PersistProfilesJob job, Set peers) {
|
|
||||||
super(enclosingContext);
|
|
||||||
_peers = peers.iterator();
|
|
||||||
_job = job;
|
|
||||||
}
|
|
||||||
public void runJob() {
|
|
||||||
if (_peers.hasNext())
|
|
||||||
_job.persist((Hash)_peers.next());
|
|
||||||
if (_peers.hasNext()) {
|
|
||||||
requeue(1000);
|
|
||||||
} else {
|
|
||||||
// no more left, requeue up the main persist-em-all job
|
|
||||||
_job.requeue();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
public String getName() { return "Persist profile"; }
|
|
||||||
}
|
|
@ -1,26 +0,0 @@
|
|||||||
package net.i2p.router.peermanager;
|
|
||||||
|
|
||||||
import java.util.Set;
|
|
||||||
|
|
||||||
import net.i2p.data.Hash;
|
|
||||||
import net.i2p.router.JobImpl;
|
|
||||||
import net.i2p.router.RouterContext;
|
|
||||||
|
|
||||||
class PersistProfilesJob extends JobImpl {
|
|
||||||
private PeerManager _mgr;
|
|
||||||
private final static long PERSIST_DELAY = 10*60*1000;
|
|
||||||
|
|
||||||
public PersistProfilesJob(RouterContext ctx, PeerManager mgr) {
|
|
||||||
super(ctx);
|
|
||||||
_mgr = mgr;
|
|
||||||
getTiming().setStartAfter(getContext().clock().now() + PERSIST_DELAY);
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getName() { return "Persist profiles"; }
|
|
||||||
public void runJob() {
|
|
||||||
Set peers = _mgr.selectPeers();
|
|
||||||
getContext().jobQueue().addJob(new PersistProfileJob(getContext(), this, peers));
|
|
||||||
}
|
|
||||||
void persist(Hash peer) { _mgr.storeProfile(peer); }
|
|
||||||
void requeue() { requeue(PERSIST_DELAY); }
|
|
||||||
}
|
|
@ -597,6 +597,12 @@ public class ProfileOrganizer {
|
|||||||
} finally { releaseReadLock(); }
|
} finally { releaseReadLock(); }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private static final long MIN_EXPIRE_TIME = 3*60*60*1000;
|
||||||
|
private static final long MAX_EXPIRE_TIME = 6*60*60*1000;
|
||||||
|
private static final long ADJUST_EXPIRE_TIME = 60*1000;
|
||||||
|
private static final int ENOUGH_PROFILES = 600;
|
||||||
|
private long _currentExpireTime = MAX_EXPIRE_TIME;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Place peers into the correct tier, as well as expand/contract and even drop profiles
|
* Place peers into the correct tier, as well as expand/contract and even drop profiles
|
||||||
* according to whatever limits are in place. Peer profiles are not coalesced during
|
* according to whatever limits are in place. Peer profiles are not coalesced during
|
||||||
@ -614,8 +620,13 @@ public class ProfileOrganizer {
|
|||||||
long uptime = _context.router().getUptime();
|
long uptime = _context.router().getUptime();
|
||||||
long expireOlderThan = -1;
|
long expireOlderThan = -1;
|
||||||
if (uptime > 60*60*1000) {
|
if (uptime > 60*60*1000) {
|
||||||
// drop profiles that we haven't spoken with in 6 hours
|
// dynamically adjust expire time to control memory usage
|
||||||
expireOlderThan = _context.clock().now() - 6*60*60*1000;
|
if (countNotFailingPeers() > ENOUGH_PROFILES)
|
||||||
|
_currentExpireTime = Math.max(_currentExpireTime - ADJUST_EXPIRE_TIME, MIN_EXPIRE_TIME);
|
||||||
|
else
|
||||||
|
_currentExpireTime = Math.min(_currentExpireTime + ADJUST_EXPIRE_TIME, MAX_EXPIRE_TIME);
|
||||||
|
// drop profiles that we haven't spoken to in a while
|
||||||
|
expireOlderThan = _context.clock().now() - _currentExpireTime;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!getWriteLock())
|
if (!getWriteLock())
|
||||||
|
@ -33,7 +33,7 @@ class ProfileOrganizerRenderer {
|
|||||||
Set peers = _organizer.selectAllPeers();
|
Set peers = _organizer.selectAllPeers();
|
||||||
|
|
||||||
long now = _context.clock().now();
|
long now = _context.clock().now();
|
||||||
long hideBefore = now - 3*60*60*1000;
|
long hideBefore = now - 2*60*60*1000;
|
||||||
|
|
||||||
TreeSet order = new TreeSet(_comparator);
|
TreeSet order = new TreeSet(_comparator);
|
||||||
TreeSet integratedPeers = new TreeSet(_comparator);
|
TreeSet integratedPeers = new TreeSet(_comparator);
|
||||||
@ -222,9 +222,6 @@ class ProfileOrganizerRenderer {
|
|||||||
buf.append("<li><b>integration</b>: how many new peers have they told us about lately?</li>");
|
buf.append("<li><b>integration</b>: how many new peers have they told us about lately?</li>");
|
||||||
buf.append("<li><b>failing?</b>: is the peer currently swamped (and if possible we should avoid nagging them)?</li>");
|
buf.append("<li><b>failing?</b>: is the peer currently swamped (and if possible we should avoid nagging them)?</li>");
|
||||||
buf.append("</ul></i>");
|
buf.append("</ul></i>");
|
||||||
buf.append("Red peers prefixed with '--' means the peer is failing, and blue peers prefixed ");
|
|
||||||
buf.append("with '++' means we've sent or received a message from them ");
|
|
||||||
buf.append("in the last five minutes.</i><br />");
|
|
||||||
buf.append("<p><b>Thresholds:</b><br />");
|
buf.append("<p><b>Thresholds:</b><br />");
|
||||||
buf.append("<b>Speed:</b> ").append(num(_organizer.getSpeedThreshold())).append(" (").append(fast).append(" fast peers)<br />");
|
buf.append("<b>Speed:</b> ").append(num(_organizer.getSpeedThreshold())).append(" (").append(fast).append(" fast peers)<br />");
|
||||||
buf.append("<b>Capacity:</b> ").append(num(_organizer.getCapacityThreshold())).append(" (").append(reliable).append(" high capacity peers)<br />");
|
buf.append("<b>Capacity:</b> ").append(num(_organizer.getCapacityThreshold())).append(" (").append(reliable).append(" high capacity peers)<br />");
|
||||||
|
Reference in New Issue
Block a user