* Floodfill: Add new FloodfillMonitorJob, which tracks active
floodfills, and automatically enables/disables floodfill on Class O routers to maintain 5-7 total active floodfills
This commit is contained in:
12
history.txt
12
history.txt
@ -1,3 +1,15 @@
|
||||
2008-06-10 zzz
|
||||
* Floodfill: Add new FloodfillMonitorJob, which tracks active
|
||||
floodfills, and automatically enables/disables floodfill on
|
||||
Class O routers to maintain 5-7 total active floodfills
|
||||
* NetDb Stats:
|
||||
- Remove several more stats
|
||||
- Don't publish bw stats in first hour of uptime
|
||||
- Publish floodfill stats even if other stats are disabled
|
||||
- Changes not effective until 0.6.2.1 to provide cover.
|
||||
* graphs.jsp: Fix a bug where it tries to display the combined
|
||||
bandwidth graph when it isn't available
|
||||
|
||||
2008-06-09 zzz
|
||||
* Propagate i2.i2p.i2p-0.6.2.1-pre branch to i2p.i2p
|
||||
|
||||
|
@ -17,7 +17,7 @@ import net.i2p.CoreVersion;
|
||||
public class RouterVersion {
|
||||
public final static String ID = "$Revision: 1.548 $ $Date: 2008-06-07 23:00:00 $";
|
||||
public final static String VERSION = "0.6.2";
|
||||
public final static long BUILD = 1;
|
||||
public final static long BUILD = 2;
|
||||
public static void main(String args[]) {
|
||||
System.out.println("I2P Router version: " + VERSION + "-" + BUILD);
|
||||
System.out.println("Router ID: " + RouterVersion.ID);
|
||||
|
@ -0,0 +1,158 @@
|
||||
package net.i2p.router.networkdb.kademlia;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Properties;
|
||||
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.RouterAddress;
|
||||
import net.i2p.data.RouterInfo;
|
||||
import net.i2p.util.Log;
|
||||
import net.i2p.router.JobImpl;
|
||||
import net.i2p.router.Router;
|
||||
import net.i2p.router.RouterContext;
|
||||
import net.i2p.router.peermanager.PeerProfile;
|
||||
|
||||
/**
|
||||
* Simple job to monitor the floodfill pool.
|
||||
* If we are class O, and meet some other criteria,
|
||||
* we will automatically become floodfill if there aren't enough.
|
||||
* But only change ff status every few hours to minimize ff churn.
|
||||
*
|
||||
*/
|
||||
class FloodfillMonitorJob extends JobImpl {
|
||||
private Log _log;
|
||||
private FloodfillNetworkDatabaseFacade _facade;
|
||||
private long _lastChanged;
|
||||
|
||||
private static final int REQUEUE_DELAY = 60*60*1000;
|
||||
private static final long MIN_UPTIME = 2*60*60*1000;
|
||||
private static final long MIN_CHANGE_DELAY = 6*60*60*1000;
|
||||
private static final int MIN_FF = 5;
|
||||
private static final int MAX_FF = 7;
|
||||
private static final String PROP_FLOODFILL_PARTICIPANT = "router.floodfillParticipant";
|
||||
|
||||
public FloodfillMonitorJob(RouterContext context, FloodfillNetworkDatabaseFacade facade) {
|
||||
super(context);
|
||||
_facade = facade;
|
||||
_log = context.logManager().getLog(FloodfillMonitorJob.class);
|
||||
_lastChanged = 0;
|
||||
}
|
||||
|
||||
public String getName() { return "Monitor the floodfill pool"; }
|
||||
public void runJob() {
|
||||
boolean ff = shouldBeFloodfill();
|
||||
_facade.setFloodfillEnabled(ff);
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Should we be floodfill? " + ff);
|
||||
requeue((REQUEUE_DELAY / 2) + getContext().random().nextInt(REQUEUE_DELAY));
|
||||
}
|
||||
|
||||
private boolean shouldBeFloodfill() {
|
||||
// Only if not shutting down...
|
||||
if (getContext().getProperty(Router.PROP_SHUTDOWN_IN_PROGRESS) != null)
|
||||
return false;
|
||||
|
||||
String enabled = getContext().getProperty(PROP_FLOODFILL_PARTICIPANT, "auto");
|
||||
if ("true".equals(enabled))
|
||||
return true;
|
||||
if ("false".equals(enabled))
|
||||
return false;
|
||||
|
||||
// auto from here down
|
||||
|
||||
// Only if up a while...
|
||||
if (getContext().router().getUptime() < MIN_UPTIME)
|
||||
return false;
|
||||
|
||||
// Only if class O...
|
||||
if (getContext().router().getRouterInfo().getCapabilities().indexOf("O") < 0)
|
||||
return false;
|
||||
|
||||
// This list may include ourselves...
|
||||
List floodfillPeers = _facade.getFloodfillPeers();
|
||||
long now = getContext().clock().now();
|
||||
// We know none at all! Must be our turn...
|
||||
if (floodfillPeers == null || floodfillPeers.size() <= 0) {
|
||||
_lastChanged = now;
|
||||
return true;
|
||||
}
|
||||
|
||||
// Only change status every so often
|
||||
boolean wasFF = _facade.floodfillEnabled();
|
||||
if (_lastChanged + MIN_CHANGE_DELAY > now)
|
||||
return wasFF;
|
||||
|
||||
// This is similar to the qualification we do in FloodOnlySearchJob.runJob().
|
||||
// Count the "good" ff peers.
|
||||
//
|
||||
// Who's not good?
|
||||
// the unheard-from, unprofiled, failing, unreachable and shitlisted ones.
|
||||
// We should hear from floodfills pretty frequently so set a 60m time limit.
|
||||
// If unprofiled we haven't talked to them in a long time.
|
||||
// We aren't contacting the peer directly, so shitlist doesn't strictly matter,
|
||||
// but it's a bad sign, and we often shitlist a peer before we fail it...
|
||||
//
|
||||
// Future: use Integration calculation
|
||||
//
|
||||
int ffcount = floodfillPeers.size();
|
||||
int failcount = 0;
|
||||
long before = now - 60*60*1000;
|
||||
for (int i = 0; i < floodfillPeers.size(); i++) {
|
||||
Hash peer = (Hash)floodfillPeers.get(i);
|
||||
if (peer.equals(getContext().routerHash()))
|
||||
continue;
|
||||
PeerProfile profile = getContext().profileOrganizer().getProfile(peer);
|
||||
if (profile == null || profile.getLastHeardFrom() < before ||
|
||||
profile.getIsFailing() || getContext().shitlist().isShitlisted(peer) ||
|
||||
getContext().commSystem().wasUnreachable(peer))
|
||||
failcount++;
|
||||
}
|
||||
|
||||
int good = ffcount - failcount;
|
||||
boolean happy = getContext().router().getRouterInfo().getCapabilities().indexOf("R") >= 0;
|
||||
// Use the same job lag test as in RouterThrottleImpl
|
||||
happy = happy && getContext().jobQueue().getMaxLag() < 2*1000;
|
||||
// Only if we're pretty well integrated...
|
||||
happy = happy && _facade.getKnownRouters() >= 200;
|
||||
happy = happy && getContext().commSystem().countActivePeers() >= 50;
|
||||
happy = happy && getContext().tunnelManager().getParticipatingCount() >= 100;
|
||||
// We need an address and no introducers
|
||||
if (happy) {
|
||||
RouterAddress ra = getContext().router().getRouterInfo().getTargetAddress("SSU");
|
||||
if (ra == null)
|
||||
happy = false;
|
||||
else {
|
||||
Properties props = ra.getOptions();
|
||||
if (props == null || props.getProperty("ihost0") != null)
|
||||
happy = false;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Too few, and we're reachable, let's volunteer
|
||||
if (good < MIN_FF && happy) {
|
||||
if (!wasFF) {
|
||||
_lastChanged = now;
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Only " + good + " ff peers and we want " + MIN_FF + " so we are becoming floodfill");
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// Too many, or we aren't reachable, let's stop
|
||||
if (good > MAX_FF || (good > MIN_FF && !happy)) {
|
||||
if (wasFF) {
|
||||
_lastChanged = now;
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Have " + good + " ff peers and we need only " + MIN_FF + " to " + MAX_FF +
|
||||
" so we are disabling floodfill; reachable? " + happy);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Have " + good + " ff peers, not changing, enabled? " + wasFF + "; reachable? " + happy);
|
||||
return wasFF;
|
||||
}
|
||||
|
||||
}
|
@ -12,13 +12,13 @@ import net.i2p.util.Log;
|
||||
*/
|
||||
public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacade {
|
||||
public static final char CAPACITY_FLOODFILL = 'f';
|
||||
private static final String PROP_FLOODFILL_PARTICIPANT = "router.floodfillParticipant";
|
||||
private static final String DEFAULT_FLOODFILL_PARTICIPANT = "false";
|
||||
private Map _activeFloodQueries;
|
||||
private boolean _floodfillEnabled;
|
||||
|
||||
public FloodfillNetworkDatabaseFacade(RouterContext context) {
|
||||
super(context);
|
||||
_activeFloodQueries = new HashMap();
|
||||
_floodfillEnabled = false;
|
||||
|
||||
_context.statManager().createRateStat("netDb.successTime", "How long a successful search takes", "NetworkDatabase", new long[] { 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("netDb.failedTime", "How long a failed search takes", "NetworkDatabase", new long[] { 60*60*1000l, 24*60*60*1000l });
|
||||
@ -33,6 +33,11 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
|
||||
_context.statManager().createRateStat("netDb.republishQuantity", "How many peers do we need to send a found leaseSet to?", "NetworkDatabase", new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
}
|
||||
|
||||
public void startup() {
|
||||
super.startup();
|
||||
_context.jobQueue().addJob(new FloodfillMonitorJob(_context, this));
|
||||
}
|
||||
|
||||
protected void createHandlers() {
|
||||
_context.inNetMessagePool().registerHandlerJobBuilder(DatabaseLookupMessage.MESSAGE_TYPE, new FloodfillDatabaseLookupMessageHandler(_context));
|
||||
_context.inNetMessagePool().registerHandlerJobBuilder(DatabaseStoreMessage.MESSAGE_TYPE, new FloodfillDatabaseStoreMessageHandler(_context, this));
|
||||
@ -106,10 +111,10 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
|
||||
|
||||
protected PeerSelector createPeerSelector() { return new FloodfillPeerSelector(_context); }
|
||||
|
||||
public boolean floodfillEnabled() { return floodfillEnabled(_context); }
|
||||
public void setFloodfillEnabled(boolean yes) { _floodfillEnabled = yes; }
|
||||
public boolean floodfillEnabled() { return _floodfillEnabled; }
|
||||
public static boolean floodfillEnabled(RouterContext ctx) {
|
||||
String enabled = ctx.getProperty(PROP_FLOODFILL_PARTICIPANT, DEFAULT_FLOODFILL_PARTICIPANT);
|
||||
return "true".equals(enabled);
|
||||
return ((FloodfillNetworkDatabaseFacade)ctx.netDb()).floodfillEnabled();
|
||||
}
|
||||
|
||||
public static boolean isFloodfill(RouterInfo peer) {
|
||||
|
Reference in New Issue
Block a user