* Tunnels and profiles:

- Increase max fast and high-cap tier sizes
      - Slow profile eval cycle after sufficient uptime
      - Fix bug which started a new build after a successful build
      - Misc. cleanups
This commit is contained in:
zzz
2011-05-06 01:10:28 +00:00
parent 90642a8ab5
commit c356792d02
11 changed files with 104 additions and 57 deletions

View File

@ -1,3 +1,10 @@
2011-05-06 zzz
* Tunnels and profiles:
- Increase max fast and high-cap tier sizes
- Slow profile eval cycle after sufficient uptime
- Fix bug which started a new build after a successful build
- Misc. cleanups
2011-04-28 zzz
* Console: Try to prevent cascaded IllegalStateExceptions in .jsp code;
add logging for original error

View File

@ -18,7 +18,7 @@ public class RouterVersion {
/** deprecated */
public final static String ID = "Monotone";
public final static String VERSION = CoreVersion.VERSION;
public final static long BUILD = 1;
public final static long BUILD = 2;
/** for example "-test" */
public final static String EXTRA = "";

View File

@ -16,7 +16,7 @@ public class TunnelPoolSettings {
private int _quantity;
private int _backupQuantity;
// private int _rebuildPeriod;
private int _duration;
//private int _duration;
private int _length;
private int _lengthVariance;
private int _lengthOverride;
@ -24,8 +24,8 @@ public class TunnelPoolSettings {
private boolean _isExploratory;
private boolean _allowZeroHop;
private int _IPRestriction;
private Properties _unknownOptions;
private Hash _randomKey;
private final Properties _unknownOptions;
private final Hash _randomKey;
/** prefix used to override the router's defaults for clients */
public static final String PREFIX_DEFAULT = "router.defaultPool.";
@ -57,7 +57,7 @@ public class TunnelPoolSettings {
_quantity = DEFAULT_QUANTITY;
_backupQuantity = DEFAULT_BACKUP_QUANTITY;
// _rebuildPeriod = DEFAULT_REBUILD_PERIOD;
_duration = DEFAULT_DURATION;
//_duration = DEFAULT_DURATION;
_length = DEFAULT_LENGTH;
_lengthVariance = DEFAULT_LENGTH_VARIANCE;
_allowZeroHop = DEFAULT_ALLOW_ZERO_HOP;
@ -107,8 +107,9 @@ public class TunnelPoolSettings {
public boolean isExploratory() { return _isExploratory; }
public void setIsExploratory(boolean isExploratory) { _isExploratory = isExploratory; }
public int getDuration() { return _duration; }
public void setDuration(int ms) { _duration = ms; }
// Duration is hardcoded
//public int getDuration() { return _duration; }
//public void setDuration(int ms) { _duration = ms; }
/** what destination is this a tunnel for (or null if none) */
public Hash getDestination() { return _destination; }
@ -141,8 +142,8 @@ public class TunnelPoolSettings {
_allowZeroHop = getBoolean(value, DEFAULT_ALLOW_ZERO_HOP);
else if (name.equalsIgnoreCase(prefix + PROP_BACKUP_QUANTITY))
_backupQuantity = getInt(value, DEFAULT_BACKUP_QUANTITY);
else if (name.equalsIgnoreCase(prefix + PROP_DURATION))
_duration = getInt(value, DEFAULT_DURATION);
//else if (name.equalsIgnoreCase(prefix + PROP_DURATION))
// _duration = getInt(value, DEFAULT_DURATION);
else if (name.equalsIgnoreCase(prefix + PROP_LENGTH))
_length = getInt(value, DEFAULT_LENGTH);
else if (name.equalsIgnoreCase(prefix + PROP_LENGTH_VARIANCE))
@ -165,7 +166,7 @@ public class TunnelPoolSettings {
if (props == null) return;
props.setProperty(prefix + PROP_ALLOW_ZERO_HOP, ""+_allowZeroHop);
props.setProperty(prefix + PROP_BACKUP_QUANTITY, ""+_backupQuantity);
props.setProperty(prefix + PROP_DURATION, ""+_duration);
//props.setProperty(prefix + PROP_DURATION, ""+_duration);
props.setProperty(prefix + PROP_LENGTH, ""+_length);
props.setProperty(prefix + PROP_LENGTH_VARIANCE, ""+_lengthVariance);
if (_destinationNickname != null)

View File

@ -13,8 +13,8 @@ import net.i2p.util.Log;
*
*/
public class DBHistory {
private Log _log;
private RouterContext _context;
private final Log _log;
private final RouterContext _context;
private long _successfulLookups;
private long _failedLookups;
private RateStat _failedLookupRate;
@ -32,25 +32,13 @@ public class DBHistory {
private long _lastStoreFailed;
private long _unpromptedDbStoreNew;
private long _unpromptedDbStoreOld;
private String _statGroup;
private final String _statGroup;
public DBHistory(RouterContext context, String statGroup) {
_context = context;
_log = context.logManager().getLog(DBHistory.class);
_statGroup = statGroup;
_successfulLookups = 0;
_failedLookups = 0;
_failedLookupRate = null;
_invalidReplyRate = null;
_lookupReplyNew = 0;
_lookupReplyOld = 0;
_lookupReplyDuplicate = 0;
_lookupReplyInvalid = 0;
_lookupsReceived = 0;
_avgDelayBetweenLookupsReceived = 0;
_lastLookupReceived = -1;
_unpromptedDbStoreNew = 0;
_unpromptedDbStoreOld = 0;
createRates(statGroup);
}

View File

@ -23,10 +23,9 @@ import net.i2p.data.RouterInfo;
import net.i2p.router.PeerSelectionCriteria;
import net.i2p.router.RouterContext;
import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseFacade;
import net.i2p.util.Log;
import net.i2p.util.SimpleScheduler;
import net.i2p.util.SimpleTimer;
import net.i2p.util.ConcurrentHashSet;
import net.i2p.util.Log;
import net.i2p.util.SimpleTimer2;
/**
* Manage the current state of the statistics
@ -43,12 +42,15 @@ import net.i2p.util.ConcurrentHashSet;
*
*/
class PeerManager {
private Log _log;
private RouterContext _context;
private ProfileOrganizer _organizer;
private ProfilePersistenceHelper _persistenceHelper;
private Set<Hash> _peersByCapability[];
private final Log _log;
private final RouterContext _context;
private final ProfileOrganizer _organizer;
private final ProfilePersistenceHelper _persistenceHelper;
private final Set<Hash> _peersByCapability[];
private final Map<Hash, String> _capabilitiesByPeer;
private static final long REORGANIZE_TIME = 45*1000;
private static final long REORGANIZE_TIME_MEDIUM = 123*1000;
private static final long REORGANIZE_TIME_LONG = 551*1000;
public PeerManager(RouterContext context) {
_context = context;
@ -62,17 +64,30 @@ class PeerManager {
_peersByCapability[i] = new ConcurrentHashSet();
loadProfiles();
////_context.jobQueue().addJob(new EvaluateProfilesJob(_context));
SimpleScheduler.getInstance().addPeriodicEvent(new Reorg(), 0, 45*1000);
//SimpleScheduler.getInstance().addPeriodicEvent(new Reorg(), 0, REORGANIZE_TIME);
new Reorg();
//_context.jobQueue().addJob(new PersistProfilesJob(_context, this));
}
private class Reorg implements SimpleTimer.TimedEvent {
private class Reorg extends SimpleTimer2.TimedEvent {
public Reorg() {
super(SimpleTimer2.getInstance(), REORGANIZE_TIME);
}
public void timeReached() {
try {
_organizer.reorganize(true);
} catch (Throwable t) {
_log.log(Log.CRIT, "Error evaluating profiles", t);
}
long uptime = _context.router().getUptime();
long delay;
if (uptime > 2*60*60*1000)
delay = REORGANIZE_TIME_LONG;
else if (uptime > 10*60*1000)
delay = REORGANIZE_TIME_MEDIUM;
else
delay = REORGANIZE_TIME;
schedule(delay);
}
}

View File

@ -40,23 +40,23 @@ import net.i2p.util.Log;
* should be used to add new profiles (placing them into the appropriate groupings).
*/
public class ProfileOrganizer {
private Log _log;
private RouterContext _context;
private final Log _log;
private final RouterContext _context;
/** H(routerIdentity) to PeerProfile for all peers that are fast and high capacity*/
private Map<Hash, PeerProfile> _fastPeers;
private final Map<Hash, PeerProfile> _fastPeers;
/** H(routerIdentity) to PeerProfile for all peers that have high capacities */
private Map<Hash, PeerProfile> _highCapacityPeers;
private final Map<Hash, PeerProfile> _highCapacityPeers;
/** H(routerIdentity) to PeerProfile for all peers that well integrated into the network and not failing horribly */
private Map<Hash, PeerProfile> _wellIntegratedPeers;
private final Map<Hash, PeerProfile> _wellIntegratedPeers;
/** H(routerIdentity) to PeerProfile for all peers that are not failing horribly */
private Map<Hash, PeerProfile> _notFailingPeers;
private final Map<Hash, PeerProfile> _notFailingPeers;
/** H(routerIdnetity), containing elements in _notFailingPeers */
private List<Hash> _notFailingPeersList;
private final List<Hash> _notFailingPeersList;
/** H(routerIdentity) to PeerProfile for all peers that ARE failing horribly (but that we haven't dropped reference to yet) */
private Map<Hash, PeerProfile> _failingPeers;
private final Map<Hash, PeerProfile> _failingPeers;
/** who are we? */
private Hash _us;
private ProfilePersistenceHelper _persistenceHelper;
private final ProfilePersistenceHelper _persistenceHelper;
/** PeerProfile objects for all peers profiled, orderd by the ones with the highest capacity first */
private Set<PeerProfile> _strictCapacityOrder;
@ -68,7 +68,7 @@ public class ProfileOrganizer {
/** integration value, seperating well integrated from not well integrated */
private double _thresholdIntegrationValue;
private InverseCapacityComparator _comp;
private final InverseCapacityComparator _comp;
/**
* Defines the minimum number of 'fast' peers that the organizer should select. See
@ -79,6 +79,8 @@ public class ProfileOrganizer {
public static final int DEFAULT_MINIMUM_FAST_PEERS = 8;
/** this is misnamed, it is really the max minimum number. */
private static final int DEFAULT_MAXIMUM_FAST_PEERS = 16;
private static final int ABSOLUTE_MAX_FAST_PEERS = 60;
/**
* Defines the minimum number of 'high capacity' peers that the organizer should
* select when using the mean - if less than this many are available, select the
@ -87,6 +89,7 @@ public class ProfileOrganizer {
*/
public static final String PROP_MINIMUM_HIGH_CAPACITY_PEERS = "profileOrganizer.minHighCapacityPeers";
public static final int DEFAULT_MINIMUM_HIGH_CAPACITY_PEERS = 10;
private static final int ABSOLUTE_MAX_HIGHCAP_PEERS = 150;
/** synchronized against this lock when updating the tier that peers are located in (and when fetching them from a peer) */
private final ReentrantReadWriteLock _reorganizeLock = new ReentrantReadWriteLock(true);
@ -276,11 +279,14 @@ public class ProfileOrganizer {
* @param howMany how many peers are desired
* @param exclude set of Hashes for routers that we don't want selected
* @param matches set to store the return value in
* @param mask 0-4 Number of bytes to match to determine if peers in the same IP range should
* not be in the same tunnel. 0 = disable check; 1 = /8; 2 = /16; 3 = /24; 4 = exact IP match
*
*/
public void selectFastPeers(int howMany, Set<Hash> exclude, Set<Hash> matches) {
selectFastPeers(howMany, exclude, matches, 0);
}
public void selectFastPeers(int howMany, Set<Hash> exclude, Set<Hash> matches, int mask) {
getReadLock();
try {
@ -304,6 +310,11 @@ public class ProfileOrganizer {
public void selectHighCapacityPeers(int howMany, Set<Hash> exclude, Set<Hash> matches) {
selectHighCapacityPeers(howMany, exclude, matches, 0);
}
/**
* @param mask 0-4 Number of bytes to match to determine if peers in the same IP range should
* not be in the same tunnel. 0 = disable check; 1 = /8; 2 = /16; 3 = /24; 4 = exact IP match
*/
public void selectHighCapacityPeers(int howMany, Set<Hash> exclude, Set<Hash> matches, int mask) {
getReadLock();
try {
@ -328,13 +339,17 @@ public class ProfileOrganizer {
}
return;
}
/**
* Return a set of Hashes for peers that are well integrated into the network.
*
* @param mask 0-4 Number of bytes to match to determine if peers in the same IP range should
* not be in the same tunnel. 0 = disable check; 1 = /8; 2 = /16; 3 = /24; 4 = exact IP match
*/
public void selectWellIntegratedPeers(int howMany, Set<Hash> exclude, Set<Hash> matches) {
selectWellIntegratedPeers(howMany, exclude, matches, 0);
}
public void selectWellIntegratedPeers(int howMany, Set<Hash> exclude, Set<Hash> matches, int mask) {
getReadLock();
try {
@ -351,6 +366,7 @@ public class ProfileOrganizer {
return;
}
/**
* Return a set of Hashes for peers that are not failing, preferring ones that
* we are already talking with
@ -359,12 +375,18 @@ public class ProfileOrganizer {
public void selectNotFailingPeers(int howMany, Set<Hash> exclude, Set<Hash> matches) {
selectNotFailingPeers(howMany, exclude, matches, false, 0);
}
/**
* @param mask ignored, should call locked_selectPeers, to be fixed
*/
public void selectNotFailingPeers(int howMany, Set<Hash> exclude, Set<Hash> matches, int mask) {
selectNotFailingPeers(howMany, exclude, matches, false, mask);
}
public void selectNotFailingPeers(int howMany, Set<Hash> exclude, Set<Hash> matches, boolean onlyNotFailing) {
selectNotFailingPeers(howMany, exclude, matches, onlyNotFailing, 0);
}
/**
* Return a set of Hashes for peers that are not failing, preferring ones that
* we are already talking with
@ -373,6 +395,7 @@ public class ProfileOrganizer {
* @param exclude what peers to skip (may be null)
* @param matches set to store the matches in
* @param onlyNotFailing if true, don't include any high capacity peers
* @param mask ignored, should call locked_selectPeers, to be fixed
*/
public void selectNotFailingPeers(int howMany, Set<Hash> exclude, Set<Hash> matches, boolean onlyNotFailing, int mask) {
if (matches.size() < howMany)
@ -417,6 +440,9 @@ public class ProfileOrganizer {
* and we're using this to try and limit connections.
*
* This DOES cascade further to non-connected peers.
*
* @param mask 0-4 Number of bytes to match to determine if peers in the same IP range should
* not be in the same tunnel. 0 = disable check; 1 = /8; 2 = /16; 3 = /24; 4 = exact IP match
*/
private void selectActiveNotFailingPeers2(int howMany, Set<Hash> exclude, Set<Hash> matches, int mask) {
if (matches.size() < howMany) {
@ -448,6 +474,7 @@ public class ProfileOrganizer {
public void selectAllNotFailingPeers(int howMany, Set<Hash> exclude, Set<Hash> matches, boolean onlyNotFailing) {
selectAllNotFailingPeers(howMany, exclude, matches, onlyNotFailing, 0);
}
/**
* @param mask ignored, should call locked_selectPeers, to be fixed
*
@ -497,6 +524,7 @@ public class ProfileOrganizer {
}
return;
}
/**
* I'm not quite sure why you'd want this... (other than for failover from the better results)
*
@ -614,6 +642,7 @@ public class ProfileOrganizer {
*
*/
public void reorganize() { reorganize(false); }
public void reorganize(boolean shouldCoalesce) {
long sortTime = 0;
int coalesceTime = 0;
@ -1050,6 +1079,10 @@ public class ProfileOrganizer {
locked_selectPeers(peers, howMany, toExclude, matches, 0);
}
/**
* @param mask 0-4 Number of bytes to match to determine if peers in the same IP range should
* not be in the same tunnel. 0 = disable check; 1 = /8; 2 = /16; 3 = /24; 4 = exact IP match
*/
private void locked_selectPeers(Map<Hash, PeerProfile> peers, int howMany, Set<Hash> toExclude, Set<Hash> matches, int mask) {
List<Hash> all = new ArrayList(peers.keySet());
Set<Integer> IPSet = new HashSet(8);
@ -1261,12 +1294,12 @@ public class ProfileOrganizer {
/** fixme add config @since 0.7.10 */
protected int getMaximumFastPeers() {
return 30;
return ABSOLUTE_MAX_FAST_PEERS;
}
/** fixme add config @since 0.7.11 */
protected int getMaximumHighCapPeers() {
return 75;
return ABSOLUTE_MAX_HIGHCAP_PEERS;
}
/**

View File

@ -14,8 +14,8 @@ import net.i2p.util.Log;
*
*/
public class TunnelHistory {
private RouterContext _context;
private Log _log;
private final RouterContext _context;
private final Log _log;
private volatile long _lifetimeAgreedTo;
private volatile long _lifetimeRejected;
private volatile long _lastAgreedTo;
@ -27,7 +27,7 @@ public class TunnelHistory {
private volatile long _lastFailed;
private RateStat _rejectRate;
private RateStat _failRate;
private String _statGroup;
private final String _statGroup;
/** probabalistic tunnel rejection due to a flood of requests - essentially unused */
public static final int TUNNEL_REJECT_PROBABALISTIC_REJECT = 10;

View File

@ -455,6 +455,10 @@ class BuildExecutor implements Runnable {
}
}
/**
* This wakes up the executor, so call this after TunnelPool.addTunnel()
* so we don't build too many.
*/
public void buildComplete(PooledTunnelCreatorConfig cfg, TunnelPool pool) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Build complete for " + cfg);

View File

@ -223,7 +223,7 @@ class BuildHandler {
_context.messageHistory().tunnelParticipantRejected(peer, "peer rejected after " + rtt + " with " + howBad + ": " + cfg.toString());
}
}
_exec.buildComplete(cfg, cfg.getTunnelPool());
if (allAgree) {
// wikked, completely build
if (cfg.isInbound())
@ -231,6 +231,8 @@ class BuildHandler {
else
_context.tunnelDispatcher().joinOutbound(cfg);
cfg.getTunnelPool().addTunnel(cfg); // self.self.self.foo!
// call buildComplete() after addTunnel() so we don't try another build.
_exec.buildComplete(cfg, cfg.getTunnelPool());
_exec.buildSuccessful(cfg);
ExpireJob expireJob = new ExpireJob(_context, cfg, cfg.getTunnelPool());
@ -242,6 +244,7 @@ class BuildHandler {
_context.statManager().addRateData("tunnel.buildClientSuccess", rtt, rtt);
} else {
// someone is no fun
_exec.buildComplete(cfg, cfg.getTunnelPool());
if (cfg.getDestination() == null)
_context.statManager().addRateData("tunnel.buildExploratoryReject", rtt, rtt);
else

View File

@ -34,7 +34,7 @@ public abstract class TunnelPeerSelector {
* Which peers should go into the next tunnel for the given settings?
*
* @return ordered list of Hash objects (one per peer) specifying what order
* they should appear in a tunnel (endpoint first). This includes
* they should appear in a tunnel (ENDPOINT FIRST). This includes
* the local router in the list. If there are no tunnels or peers
* to build through, and the settings reject 0 hop tunnels, this will
* return null.
@ -61,7 +61,7 @@ public abstract class TunnelPeerSelector {
}
if (length < 0)
length = 0;
if (length > 8) // as documented in tunnel.html
else if (length > 8) // as documented in tunnel.html
length = 8;
/*
if ( (ctx.tunnelManager().getOutboundTunnelCount() <= 0) ||

View File

@ -329,7 +329,6 @@ public class TunnelPoolManager implements TunnelManagerFacade {
/** queue a recurring test job if appropriate */
void buildComplete(PooledTunnelCreatorConfig cfg) {
//buildComplete();
if (cfg.getLength() > 1 &&
(!_context.router().gracefulShutdownInProgress()) &&
!Boolean.valueOf(_context.getProperty("router.disableTunnelTesting")).booleanValue()) {
@ -357,9 +356,6 @@ public class TunnelPoolManager implements TunnelManagerFacade {
}
}
/** ?? */
void buildComplete() {}
public void startup() {
_isShutdown = false;
if (!_executor.isRunning()) {