propagate from branch 'i2p.i2p.zzz.naming' (head 9626e0df682c8d5f706d2c814158ba451f3ebeb5)

to branch 'i2p.i2p' (head a3969e6c9c4fd5bfd69cd716ce0df191ad2af634)
This commit is contained in:
zzz
2011-05-20 14:12:07 +00:00
154 changed files with 11593 additions and 7472 deletions

View File

@ -22,7 +22,7 @@ class JobQueueRunner implements Runnable {
_log = _context.logManager().getLog(JobQueueRunner.class);
_context.statManager().createRateStat("jobQueue.jobRun", "How long jobs take", "JobQueue", new long[] { 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("jobQueue.jobRunSlow", "How long jobs that take over a second take", "JobQueue", new long[] { 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("jobQueue.jobLag", "How long jobs have to wait before running", "JobQueue", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRequiredRateStat("jobQueue.jobLag", "Job run delay (ms)", "JobQueue", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("jobQueue.jobWait", "How long does a job sit on the job queue?", "JobQueue", new long[] { 60*60*1000l, 24*60*60*1000l });
//_context.statManager().createRateStat("jobQueue.jobRunnerInactive", "How long are runners inactive?", "JobQueue", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
//_state = 1;

View File

@ -539,16 +539,14 @@ public class Router {
return true;
return Boolean.valueOf(_context.getProperty(PROP_HIDDEN_HIDDEN)).booleanValue();
}
/**
* @return the certificate for a new RouterInfo - probably a null cert.
*/
public Certificate createCertificate() {
Certificate cert = new Certificate();
if (isHidden()) {
cert.setCertificateType(Certificate.CERTIFICATE_TYPE_HIDDEN);
cert.setPayload(null);
} else {
cert.setCertificateType(Certificate.CERTIFICATE_TYPE_NULL);
cert.setPayload(null);
}
return cert;
if (isHidden())
return new Certificate(Certificate.CERTIFICATE_TYPE_HIDDEN, null);
return Certificate.NULL_CERT;
}
/**
@ -1424,19 +1422,20 @@ private static class CoalesceStatsEvent implements SimpleTimer.TimedEvent {
public CoalesceStatsEvent(RouterContext ctx) {
_ctx = ctx;
ctx.statManager().createRateStat("bw.receiveBps", "How fast we receive data (in KBps)", "Bandwidth", new long[] { 60*1000, 5*60*1000, 60*60*1000 });
ctx.statManager().createRateStat("bw.sendBps", "How fast we send data (in KBps)", "Bandwidth", new long[] { 60*1000, 5*60*1000, 60*60*1000 });
ctx.statManager().createRateStat("bw.sendRate", "Low level bandwidth send rate", "Bandwidth", new long[] { 60*1000l, 5*60*1000l, 10*60*1000l, 60*60*1000l });
ctx.statManager().createRateStat("bw.recvRate", "Low level bandwidth receive rate", "Bandwidth", new long[] { 60*1000l, 5*60*1000l, 10*60*1000l, 60*60*1000l });
ctx.statManager().createRateStat("router.activePeers", "How many peers we are actively talking with", "Throttle", new long[] { 60*1000, 5*60*1000, 60*60*1000 });
ctx.statManager().createRequiredRateStat("bw.receiveBps", "Message receive rate (Bytes/sec)", "Bandwidth", new long[] { 60*1000, 5*60*1000, 60*60*1000 });
// used in the router watchdog
ctx.statManager().createRequiredRateStat("bw.sendBps", "Message send rate (Bytes/sec)", "Bandwidth", new long[] { 60*1000, 5*60*1000, 60*60*1000 });
ctx.statManager().createRequiredRateStat("bw.sendRate", "Low-level send rate (Bytes/sec)", "Bandwidth", new long[] { 60*1000l, 5*60*1000l, 10*60*1000l, 60*60*1000l });
ctx.statManager().createRequiredRateStat("bw.recvRate", "Low-level receive rate (Bytes/sec)", "Bandwidth", new long[] { 60*1000l, 5*60*1000l, 10*60*1000l, 60*60*1000l });
ctx.statManager().createRequiredRateStat("router.activePeers", "How many peers we are actively talking with", "Throttle", new long[] { 60*1000, 5*60*1000, 60*60*1000 });
ctx.statManager().createRateStat("router.activeSendPeers", "How many peers we've sent to this minute", "Throttle", new long[] { 60*1000, 5*60*1000, 60*60*1000 });
ctx.statManager().createRateStat("router.highCapacityPeers", "How many high capacity peers we know", "Throttle", new long[] { 5*60*1000, 60*60*1000 });
ctx.statManager().createRateStat("router.fastPeers", "How many fast peers we know", "Throttle", new long[] { 5*60*1000, 60*60*1000 });
ctx.statManager().createRequiredRateStat("router.fastPeers", "Known fast peers", "Throttle", new long[] { 5*60*1000, 60*60*1000 });
_maxMemory = Runtime.getRuntime().maxMemory();
String legend = "(Bytes)";
if (_maxMemory < Long.MAX_VALUE)
legend += " Max is " + DataHelper.formatSize(_maxMemory) + 'B';
ctx.statManager().createRateStat("router.memoryUsed", legend, "Router", new long[] { 60*1000 });
ctx.statManager().createRequiredRateStat("router.memoryUsed", legend, "Router", new long[] { 60*1000 });
}
private RouterContext getContext() { return _ctx; }
public void timeReached() {
@ -1469,8 +1468,8 @@ private static class CoalesceStatsEvent implements SimpleTimer.TimedEvent {
Rate rate = receiveRate.getRate(60*1000);
if (rate != null) {
double bytes = rate.getLastTotalValue();
double KBps = (bytes*1000.0d)/(rate.getPeriod()*1024.0d);
getContext().statManager().addRateData("bw.receiveBps", (long)KBps, 60*1000);
double bps = (bytes*1000.0d)/rate.getPeriod();
getContext().statManager().addRateData("bw.receiveBps", (long)bps, 60*1000);
}
}
@ -1479,8 +1478,8 @@ private static class CoalesceStatsEvent implements SimpleTimer.TimedEvent {
Rate rate = sendRate.getRate(60*1000);
if (rate != null) {
double bytes = rate.getLastTotalValue();
double KBps = (bytes*1000.0d)/(rate.getPeriod()*1024.0d);
getContext().statManager().addRateData("bw.sendBps", (long)KBps, 60*1000);
double bps = (bytes*1000.0d)/rate.getPeriod();
getContext().statManager().addRateData("bw.sendBps", (long)bps, 60*1000);
}
}
}

View File

@ -131,7 +131,7 @@ public class RouterClock extends Clock {
getLog().info("Updating target clock offset to " + offsetMs + "ms from " + _offset + "ms, Stratum " + stratum);
if (!_statCreated) {
_contextRC.statManager().createRateStat("clock.skew", "How far is the already adjusted clock being skewed?", "Clock", new long[] { 10*60*1000, 3*60*60*1000, 24*60*60*60 });
_contextRC.statManager().createRequiredRateStat("clock.skew", "Clock step adjustment (ms)", "Clock", new long[] { 10*60*1000, 3*60*60*1000, 24*60*60*60 });
_statCreated = true;
}
_contextRC.statManager().addRateData("clock.skew", delta, 0);

View File

@ -18,7 +18,7 @@ public class RouterVersion {
/** deprecated */
public final static String ID = "Monotone";
public final static String VERSION = CoreVersion.VERSION;
public final static long BUILD = 8;
public final static long BUILD = 0;
/** for example "-test" */
public final static String EXTRA = "";

View File

@ -88,8 +88,8 @@ class RouterWatchdog implements Runnable {
r = null;
if (rs != null)
r = rs.getRate(60*1000);
double kbps = (r != null ? r.getAverageValue() : 0);
_log.error("Outbound send rate: " + kbps + "KBps");
double bps = (r != null ? r.getAverageValue() : 0);
_log.error("Outbound send rate: " + bps + " Bps");
long max = Runtime.getRuntime().maxMemory();
long used = Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory();
_log.error("Memory: " + DataHelper.formatSize(used) + '/' + DataHelper.formatSize(max));

View File

@ -16,7 +16,7 @@ public class TunnelPoolSettings {
private int _quantity;
private int _backupQuantity;
// private int _rebuildPeriod;
private int _duration;
//private int _duration;
private int _length;
private int _lengthVariance;
private int _lengthOverride;
@ -24,8 +24,8 @@ public class TunnelPoolSettings {
private boolean _isExploratory;
private boolean _allowZeroHop;
private int _IPRestriction;
private Properties _unknownOptions;
private Hash _randomKey;
private final Properties _unknownOptions;
private final Hash _randomKey;
/** prefix used to override the router's defaults for clients */
public static final String PREFIX_DEFAULT = "router.defaultPool.";
@ -57,7 +57,7 @@ public class TunnelPoolSettings {
_quantity = DEFAULT_QUANTITY;
_backupQuantity = DEFAULT_BACKUP_QUANTITY;
// _rebuildPeriod = DEFAULT_REBUILD_PERIOD;
_duration = DEFAULT_DURATION;
//_duration = DEFAULT_DURATION;
_length = DEFAULT_LENGTH;
_lengthVariance = DEFAULT_LENGTH_VARIANCE;
_allowZeroHop = DEFAULT_ALLOW_ZERO_HOP;
@ -107,8 +107,9 @@ public class TunnelPoolSettings {
public boolean isExploratory() { return _isExploratory; }
public void setIsExploratory(boolean isExploratory) { _isExploratory = isExploratory; }
public int getDuration() { return _duration; }
public void setDuration(int ms) { _duration = ms; }
// Duration is hardcoded
//public int getDuration() { return _duration; }
//public void setDuration(int ms) { _duration = ms; }
/** what destination is this a tunnel for (or null if none) */
public Hash getDestination() { return _destination; }
@ -141,8 +142,8 @@ public class TunnelPoolSettings {
_allowZeroHop = getBoolean(value, DEFAULT_ALLOW_ZERO_HOP);
else if (name.equalsIgnoreCase(prefix + PROP_BACKUP_QUANTITY))
_backupQuantity = getInt(value, DEFAULT_BACKUP_QUANTITY);
else if (name.equalsIgnoreCase(prefix + PROP_DURATION))
_duration = getInt(value, DEFAULT_DURATION);
//else if (name.equalsIgnoreCase(prefix + PROP_DURATION))
// _duration = getInt(value, DEFAULT_DURATION);
else if (name.equalsIgnoreCase(prefix + PROP_LENGTH))
_length = getInt(value, DEFAULT_LENGTH);
else if (name.equalsIgnoreCase(prefix + PROP_LENGTH_VARIANCE))
@ -165,7 +166,7 @@ public class TunnelPoolSettings {
if (props == null) return;
props.setProperty(prefix + PROP_ALLOW_ZERO_HOP, ""+_allowZeroHop);
props.setProperty(prefix + PROP_BACKUP_QUANTITY, ""+_backupQuantity);
props.setProperty(prefix + PROP_DURATION, ""+_duration);
//props.setProperty(prefix + PROP_DURATION, ""+_duration);
props.setProperty(prefix + PROP_LENGTH, ""+_length);
props.setProperty(prefix + PROP_LENGTH_VARIANCE, ""+_lengthVariance);
if (_destinationNickname != null)

View File

@ -51,11 +51,11 @@ import net.i2p.util.SimpleTimer;
* @author jrandom
*/
class ClientConnectionRunner {
private Log _log;
private final Log _log;
protected final RouterContext _context;
private ClientManager _manager;
private final ClientManager _manager;
/** socket for this particular peer connection */
private Socket _socket;
private final Socket _socket;
/** output stream of the socket that I2CP messages bound to the client should be written to */
private OutputStream _out;
/** session ID of the current client */
@ -63,13 +63,13 @@ class ClientConnectionRunner {
/** user's config */
private SessionConfig _config;
/** static mapping of MessageId to Payload, storing messages for retrieval */
private Map<MessageId, Payload> _messages;
private final Map<MessageId, Payload> _messages;
/** lease set request state, or null if there is no request pending on at the moment */
private LeaseRequestState _leaseRequest;
/** currently allocated leaseSet, or null if none is allocated */
private LeaseSet _currentLeaseSet;
/** set of messageIds created but not yet ACCEPTED */
private Set<MessageId> _acceptedPending;
private final Set<MessageId> _acceptedPending;
/** thingy that does stuff */
protected I2CPMessageReader _reader;
/** just for this destination */
@ -137,15 +137,14 @@ class ClientConnectionRunner {
_messages.clear();
if (_sessionKeyManager != null)
_sessionKeyManager.shutdown();
if (_manager != null)
_manager.unregisterConnection(this);
_manager.unregisterConnection(this);
if (_currentLeaseSet != null)
_context.netDb().unpublish(_currentLeaseSet);
_leaseRequest = null;
synchronized (_alreadyProcessed) {
_alreadyProcessed.clear();
}
_config = null;
//_config = null;
//_manager = null;
}

View File

@ -163,7 +163,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
SimpleScheduler.getInstance().addPeriodicEvent(new OCMOSJCacheCleaner(ctx), CLEAN_INTERVAL, CLEAN_INTERVAL);
ctx.statManager().createFrequencyStat("client.sendMessageFailFrequency", "How often does a client fail to send a message?", "ClientMessages", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.sendMessageSize", "How large are messages sent by the client?", "ClientMessages", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.sendAckTime", "Message round trip time", "ClientMessages", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRequiredRateStat("client.sendAckTime", "Message round trip time (ms)", "ClientMessages", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.timeoutCongestionTunnel", "How lagged our tunnels are when a send times out?", "ClientMessages", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.timeoutCongestionMessage", "How fast we process messages locally when a send times out?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.timeoutCongestionInbound", "How much faster we are receiving data than our average bps when a send times out?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });

View File

@ -26,8 +26,8 @@ class FloodfillMonitorJob extends JobImpl {
private static final int REQUEUE_DELAY = 60*60*1000;
private static final long MIN_UPTIME = 2*60*60*1000;
private static final long MIN_CHANGE_DELAY = 6*60*60*1000;
private static final int MIN_FF = 60;
private static final int MAX_FF = 100;
private static final int MIN_FF = 75;
private static final int MAX_FF = 150;
private static final String PROP_FLOODFILL_PARTICIPANT = "router.floodfillParticipant";
public FloodfillMonitorJob(RouterContext context, FloodfillNetworkDatabaseFacade facade) {

View File

@ -48,7 +48,7 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
_verifiesInProgress = new ConcurrentHashSet(8);
_alwaysQuery = _context.getProperty("netDb.alwaysQuery");
_context.statManager().createRateStat("netDb.successTime", "How long a successful search takes", "NetworkDatabase", new long[] { 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRequiredRateStat("netDb.successTime", "Time for successful lookup (ms)", "NetworkDatabase", new long[] { 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("netDb.failedTime", "How long a failed search takes", "NetworkDatabase", new long[] { 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("netDb.failedAttemptedPeers", "How many peers we sent a search to when the search fails", "NetworkDatabase", new long[] { 60*1000l, 10*60*1000l });
_context.statManager().createRateStat("netDb.successPeers", "How many peers are contacted in a successful search", "NetworkDatabase", new long[] { 60*60*1000l, 24*60*60*1000l });

View File

@ -120,7 +120,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
* know anyone or just started up) -- see validate() below
*/
private final static long ROUTER_INFO_EXPIRATION = 3*24*60*60*1000l;
private final static long ROUTER_INFO_EXPIRATION_MIN = 150*60*1000l;
private final static long ROUTER_INFO_EXPIRATION_MIN = 120*60*1000l;
private final static long ROUTER_INFO_EXPIRATION_SHORT = 90*60*1000l;
private final static long ROUTER_INFO_EXPIRATION_FLOODFILL = 60*60*1000l;

View File

@ -35,9 +35,9 @@ public class ReseedChecker {
if (!noReseedFile.exists() && !noReseedFileAlt1.exists() && !noReseedFileAlt2.exists() && !noReseedFileAlt3.exists()) {
Log _log = context.logManager().getLog(ReseedChecker.class);
if (count <= 1)
_log.error("Downloading peer router information for a new I2P installation");
_log.logAlways(Log.INFO, "Downloading peer router information for a new I2P installation");
else
_log.error("Very few routerInfo files remaining - reseeding now");
_log.logAlways(Log.WARN, "Very few known peers remaining - reseeding now");
Reseeder reseeder = new Reseeder(context);
reseeder.requestReseed();
}

View File

@ -13,8 +13,8 @@ import net.i2p.util.Log;
*
*/
public class DBHistory {
private Log _log;
private RouterContext _context;
private final Log _log;
private final RouterContext _context;
private long _successfulLookups;
private long _failedLookups;
private RateStat _failedLookupRate;
@ -32,25 +32,13 @@ public class DBHistory {
private long _lastStoreFailed;
private long _unpromptedDbStoreNew;
private long _unpromptedDbStoreOld;
private String _statGroup;
private final String _statGroup;
public DBHistory(RouterContext context, String statGroup) {
_context = context;
_log = context.logManager().getLog(DBHistory.class);
_statGroup = statGroup;
_successfulLookups = 0;
_failedLookups = 0;
_failedLookupRate = null;
_invalidReplyRate = null;
_lookupReplyNew = 0;
_lookupReplyOld = 0;
_lookupReplyDuplicate = 0;
_lookupReplyInvalid = 0;
_lookupsReceived = 0;
_avgDelayBetweenLookupsReceived = 0;
_lastLookupReceived = -1;
_unpromptedDbStoreNew = 0;
_unpromptedDbStoreOld = 0;
createRates(statGroup);
}

View File

@ -23,10 +23,9 @@ import net.i2p.data.RouterInfo;
import net.i2p.router.PeerSelectionCriteria;
import net.i2p.router.RouterContext;
import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseFacade;
import net.i2p.util.Log;
import net.i2p.util.SimpleScheduler;
import net.i2p.util.SimpleTimer;
import net.i2p.util.ConcurrentHashSet;
import net.i2p.util.Log;
import net.i2p.util.SimpleTimer2;
/**
* Manage the current state of the statistics
@ -43,12 +42,15 @@ import net.i2p.util.ConcurrentHashSet;
*
*/
class PeerManager {
private Log _log;
private RouterContext _context;
private ProfileOrganizer _organizer;
private ProfilePersistenceHelper _persistenceHelper;
private Set<Hash> _peersByCapability[];
private final Log _log;
private final RouterContext _context;
private final ProfileOrganizer _organizer;
private final ProfilePersistenceHelper _persistenceHelper;
private final Set<Hash> _peersByCapability[];
private final Map<Hash, String> _capabilitiesByPeer;
private static final long REORGANIZE_TIME = 45*1000;
private static final long REORGANIZE_TIME_MEDIUM = 123*1000;
private static final long REORGANIZE_TIME_LONG = 551*1000;
public PeerManager(RouterContext context) {
_context = context;
@ -62,17 +64,30 @@ class PeerManager {
_peersByCapability[i] = new ConcurrentHashSet();
loadProfiles();
////_context.jobQueue().addJob(new EvaluateProfilesJob(_context));
SimpleScheduler.getInstance().addPeriodicEvent(new Reorg(), 0, 45*1000);
//SimpleScheduler.getInstance().addPeriodicEvent(new Reorg(), 0, REORGANIZE_TIME);
new Reorg();
//_context.jobQueue().addJob(new PersistProfilesJob(_context, this));
}
private class Reorg implements SimpleTimer.TimedEvent {
private class Reorg extends SimpleTimer2.TimedEvent {
public Reorg() {
super(SimpleTimer2.getInstance(), REORGANIZE_TIME);
}
public void timeReached() {
try {
_organizer.reorganize(true);
} catch (Throwable t) {
_log.log(Log.CRIT, "Error evaluating profiles", t);
}
long uptime = _context.router().getUptime();
long delay;
if (uptime > 2*60*60*1000)
delay = REORGANIZE_TIME_LONG;
else if (uptime > 10*60*1000)
delay = REORGANIZE_TIME_MEDIUM;
else
delay = REORGANIZE_TIME;
schedule(delay);
}
}

View File

@ -24,11 +24,11 @@ import net.i2p.util.Log;
*
*/
public class PeerManagerFacadeImpl implements PeerManagerFacade {
private Log _log;
private final Log _log;
private PeerManager _manager;
private RouterContext _context;
private ProfilePersistenceHelper _persistenceHelper;
private PeerTestJob _testJob;
private final RouterContext _context;
private final ProfilePersistenceHelper _persistenceHelper;
private final PeerTestJob _testJob;
public PeerManagerFacadeImpl(RouterContext ctx) {
_context = ctx;

View File

@ -27,7 +27,7 @@ import net.i2p.util.Log;
*
*/
public class PeerTestJob extends JobImpl {
private Log _log;
private final Log _log;
private PeerManager _manager;
private boolean _keepTesting;
private static final long DEFAULT_PEER_TEST_DELAY = 5*60*1000;

View File

@ -22,8 +22,8 @@ import net.i2p.router.RouterContext;
import net.i2p.util.Log;
public class ProfileManagerImpl implements ProfileManager {
private Log _log;
private RouterContext _context;
private final Log _log;
private final RouterContext _context;
public ProfileManagerImpl(RouterContext context) {
_context = context;

View File

@ -2,7 +2,6 @@ package net.i2p.router.peermanager;
import java.io.IOException;
import java.io.OutputStream;
import java.io.Writer;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.text.DecimalFormat;
@ -22,6 +21,7 @@ import java.util.TreeSet;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import net.i2p.crypto.SHA256Generator;
import net.i2p.data.Hash;
import net.i2p.data.RouterAddress;
import net.i2p.data.RouterInfo;
@ -40,23 +40,23 @@ import net.i2p.util.Log;
* should be used to add new profiles (placing them into the appropriate groupings).
*/
public class ProfileOrganizer {
private Log _log;
private RouterContext _context;
private final Log _log;
private final RouterContext _context;
/** H(routerIdentity) to PeerProfile for all peers that are fast and high capacity*/
private Map<Hash, PeerProfile> _fastPeers;
private final Map<Hash, PeerProfile> _fastPeers;
/** H(routerIdentity) to PeerProfile for all peers that have high capacities */
private Map<Hash, PeerProfile> _highCapacityPeers;
private final Map<Hash, PeerProfile> _highCapacityPeers;
/** H(routerIdentity) to PeerProfile for all peers that well integrated into the network and not failing horribly */
private Map<Hash, PeerProfile> _wellIntegratedPeers;
private final Map<Hash, PeerProfile> _wellIntegratedPeers;
/** H(routerIdentity) to PeerProfile for all peers that are not failing horribly */
private Map<Hash, PeerProfile> _notFailingPeers;
private final Map<Hash, PeerProfile> _notFailingPeers;
/** H(routerIdnetity), containing elements in _notFailingPeers */
private List<Hash> _notFailingPeersList;
private final List<Hash> _notFailingPeersList;
/** H(routerIdentity) to PeerProfile for all peers that ARE failing horribly (but that we haven't dropped reference to yet) */
private Map<Hash, PeerProfile> _failingPeers;
private final Map<Hash, PeerProfile> _failingPeers;
/** who are we? */
private Hash _us;
private ProfilePersistenceHelper _persistenceHelper;
private final ProfilePersistenceHelper _persistenceHelper;
/** PeerProfile objects for all peers profiled, orderd by the ones with the highest capacity first */
private Set<PeerProfile> _strictCapacityOrder;
@ -68,7 +68,7 @@ public class ProfileOrganizer {
/** integration value, seperating well integrated from not well integrated */
private double _thresholdIntegrationValue;
private InverseCapacityComparator _comp;
private final InverseCapacityComparator _comp;
/**
* Defines the minimum number of 'fast' peers that the organizer should select. See
@ -78,7 +78,9 @@ public class ProfileOrganizer {
public static final String PROP_MINIMUM_FAST_PEERS = "profileOrganizer.minFastPeers";
public static final int DEFAULT_MINIMUM_FAST_PEERS = 8;
/** this is misnamed, it is really the max minimum number. */
private static final int DEFAULT_MAXIMUM_FAST_PEERS = 16;
private static final int DEFAULT_MAXIMUM_FAST_PEERS = 18;
private static final int ABSOLUTE_MAX_FAST_PEERS = 60;
/**
* Defines the minimum number of 'high capacity' peers that the organizer should
* select when using the mean - if less than this many are available, select the
@ -87,13 +89,11 @@ public class ProfileOrganizer {
*/
public static final String PROP_MINIMUM_HIGH_CAPACITY_PEERS = "profileOrganizer.minHighCapacityPeers";
public static final int DEFAULT_MINIMUM_HIGH_CAPACITY_PEERS = 10;
private static final int ABSOLUTE_MAX_HIGHCAP_PEERS = 150;
/** synchronized against this lock when updating the tier that peers are located in (and when fetching them from a peer) */
private final ReentrantReadWriteLock _reorganizeLock = new ReentrantReadWriteLock(true);
/** incredibly weak PRNG, just used for shuffling peers. no need to waste the real PRNG on this */
private Random _random = new Random();
public ProfileOrganizer(RouterContext context) {
_context = context;
_log = context.logManager().getLog(ProfileOrganizer.class);
@ -105,9 +105,6 @@ public class ProfileOrganizer {
_notFailingPeersList = new ArrayList(256);
_failingPeers = new HashMap(16);
_strictCapacityOrder = new TreeSet(_comp);
_thresholdSpeedValue = 0.0d;
_thresholdCapacityValue = 0.0d;
_thresholdIntegrationValue = 0.0d;
_persistenceHelper = new ProfilePersistenceHelper(_context);
_context.statManager().createRateStat("peer.profileSortTime", "How long the reorg takes sorting peers", "Peers", new long[] { 10*60*1000 });
@ -116,7 +113,7 @@ public class ProfileOrganizer {
_context.statManager().createRateStat("peer.profilePlaceTime", "How long the reorg takes placing peers in the tiers", "Peers", new long[] { 10*60*1000 });
_context.statManager().createRateStat("peer.profileReorgTime", "How long the reorg takes overall", "Peers", new long[] { 10*60*1000 });
// used in DBHistory
_context.statManager().createRateStat("peer.failedLookupRate", "DB Lookup fail rate", "Peers", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRequiredRateStat("peer.failedLookupRate", "Net DB Lookup fail rate", "Peers", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
}
private void getReadLock() {
@ -281,6 +278,20 @@ public class ProfileOrganizer {
public void selectFastPeers(int howMany, Set<Hash> exclude, Set<Hash> matches) {
selectFastPeers(howMany, exclude, matches, 0);
}
/**
* Return a set of Hashes for peers that are both fast and reliable. If an insufficient
* number of peers are both fast and reliable, fall back onto high capacity peers, and if that
* doesn't contain sufficient peers, fall back onto not failing peers, and even THAT doesn't
* have sufficient peers, fall back onto failing peers.
*
* @param howMany how many peers are desired
* @param exclude set of Hashes for routers that we don't want selected
* @param matches set to store the return value in
* @param mask 0-4 Number of bytes to match to determine if peers in the same IP range should
* not be in the same tunnel. 0 = disable check; 1 = /8; 2 = /16; 3 = /24; 4 = exact IP match
*
*/
public void selectFastPeers(int howMany, Set<Hash> exclude, Set<Hash> matches, int mask) {
getReadLock();
try {
@ -297,6 +308,51 @@ public class ProfileOrganizer {
return;
}
/**
* Return a set of Hashes for peers that are both fast and reliable. If an insufficient
* number of peers are both fast and reliable, fall back onto high capacity peers, and if that
* doesn't contain sufficient peers, fall back onto not failing peers, and even THAT doesn't
* have sufficient peers, fall back onto failing peers.
*
* @param howMany how many peers are desired
* @param exclude set of Hashes for routers that we don't want selected
* @param matches set to store the return value in
* @param randomKey used for deterministic random partitioning into subtiers
* @param subTierMode 0 or 2-7:
*<pre>
* 0: no partitioning, use entire tier
* 2: return only from group 0 or 1
* 3: return only from group 2 or 3
* 4: return only from group 0
* 5: return only from group 1
* 6: return only from group 2
* 7: return only from group 3
*</pre>
*/
public void selectFastPeers(int howMany, Set<Hash> exclude, Set<Hash> matches, Hash randomKey, int subTierMode) {
getReadLock();
try {
if (subTierMode > 0) {
int sz = _fastPeers.size();
if (sz < 6 || (subTierMode >= 4 && sz < 12))
subTierMode = 0;
}
if (subTierMode > 0)
locked_selectPeers(_fastPeers, howMany, exclude, matches, randomKey, subTierMode);
else
locked_selectPeers(_fastPeers, howMany, exclude, matches, 2);
} finally { releaseReadLock(); }
if (matches.size() < howMany) {
if (_log.shouldLog(Log.INFO))
_log.info("selectFastPeers("+howMany+"), not enough fast (" + matches.size() + ") going on to highCap");
selectHighCapacityPeers(howMany, exclude, matches, 2);
} else {
if (_log.shouldLog(Log.INFO))
_log.info("selectFastPeers("+howMany+"), found enough fast (" + matches.size() + ")");
}
return;
}
/**
* Return a set of Hashes for peers that have a high capacity
*
@ -304,6 +360,11 @@ public class ProfileOrganizer {
public void selectHighCapacityPeers(int howMany, Set<Hash> exclude, Set<Hash> matches) {
selectHighCapacityPeers(howMany, exclude, matches, 0);
}
/**
* @param mask 0-4 Number of bytes to match to determine if peers in the same IP range should
* not be in the same tunnel. 0 = disable check; 1 = /8; 2 = /16; 3 = /24; 4 = exact IP match
*/
public void selectHighCapacityPeers(int howMany, Set<Hash> exclude, Set<Hash> matches, int mask) {
getReadLock();
try {
@ -328,6 +389,7 @@ public class ProfileOrganizer {
}
return;
}
/**
* Return a set of Hashes for peers that are well integrated into the network.
*
@ -335,6 +397,13 @@ public class ProfileOrganizer {
public void selectWellIntegratedPeers(int howMany, Set<Hash> exclude, Set<Hash> matches) {
selectWellIntegratedPeers(howMany, exclude, matches, 0);
}
/**
* Return a set of Hashes for peers that are well integrated into the network.
*
* @param mask 0-4 Number of bytes to match to determine if peers in the same IP range should
* not be in the same tunnel. 0 = disable check; 1 = /8; 2 = /16; 3 = /24; 4 = exact IP match
*/
public void selectWellIntegratedPeers(int howMany, Set<Hash> exclude, Set<Hash> matches, int mask) {
getReadLock();
try {
@ -351,6 +420,7 @@ public class ProfileOrganizer {
return;
}
/**
* Return a set of Hashes for peers that are not failing, preferring ones that
* we are already talking with
@ -359,12 +429,18 @@ public class ProfileOrganizer {
public void selectNotFailingPeers(int howMany, Set<Hash> exclude, Set<Hash> matches) {
selectNotFailingPeers(howMany, exclude, matches, false, 0);
}
/**
* @param mask ignored, should call locked_selectPeers, to be fixed
*/
public void selectNotFailingPeers(int howMany, Set<Hash> exclude, Set<Hash> matches, int mask) {
selectNotFailingPeers(howMany, exclude, matches, false, mask);
}
public void selectNotFailingPeers(int howMany, Set<Hash> exclude, Set<Hash> matches, boolean onlyNotFailing) {
selectNotFailingPeers(howMany, exclude, matches, onlyNotFailing, 0);
}
/**
* Return a set of Hashes for peers that are not failing, preferring ones that
* we are already talking with
@ -373,6 +449,7 @@ public class ProfileOrganizer {
* @param exclude what peers to skip (may be null)
* @param matches set to store the matches in
* @param onlyNotFailing if true, don't include any high capacity peers
* @param mask ignored, should call locked_selectPeers, to be fixed
*/
public void selectNotFailingPeers(int howMany, Set<Hash> exclude, Set<Hash> matches, boolean onlyNotFailing, int mask) {
if (matches.size() < howMany)
@ -417,6 +494,9 @@ public class ProfileOrganizer {
* and we're using this to try and limit connections.
*
* This DOES cascade further to non-connected peers.
*
* @param mask 0-4 Number of bytes to match to determine if peers in the same IP range should
* not be in the same tunnel. 0 = disable check; 1 = /8; 2 = /16; 3 = /24; 4 = exact IP match
*/
private void selectActiveNotFailingPeers2(int howMany, Set<Hash> exclude, Set<Hash> matches, int mask) {
if (matches.size() < howMany) {
@ -448,6 +528,7 @@ public class ProfileOrganizer {
public void selectAllNotFailingPeers(int howMany, Set<Hash> exclude, Set<Hash> matches, boolean onlyNotFailing) {
selectAllNotFailingPeers(howMany, exclude, matches, onlyNotFailing, 0);
}
/**
* @param mask ignored, should call locked_selectPeers, to be fixed
*
@ -497,6 +578,7 @@ public class ProfileOrganizer {
}
return;
}
/**
* I'm not quite sure why you'd want this... (other than for failover from the better results)
*
@ -614,6 +696,7 @@ public class ProfileOrganizer {
*
*/
public void reorganize() { reorganize(false); }
public void reorganize(boolean shouldCoalesce) {
long sortTime = 0;
int coalesceTime = 0;
@ -1034,10 +1117,10 @@ public class ProfileOrganizer {
/** called after locking the reorganizeLock */
private PeerProfile locked_getProfile(Hash peer) {
PeerProfile cur = (PeerProfile)_notFailingPeers.get(peer);
PeerProfile cur = _notFailingPeers.get(peer);
if (cur != null)
return cur;
cur = (PeerProfile)_failingPeers.get(peer);
cur = _failingPeers.get(peer);
return cur;
}
@ -1050,6 +1133,10 @@ public class ProfileOrganizer {
locked_selectPeers(peers, howMany, toExclude, matches, 0);
}
/**
* @param mask 0-4 Number of bytes to match to determine if peers in the same IP range should
* not be in the same tunnel. 0 = disable check; 1 = /8; 2 = /16; 3 = /24; 4 = exact IP match
*/
private void locked_selectPeers(Map<Hash, PeerProfile> peers, int howMany, Set<Hash> toExclude, Set<Hash> matches, int mask) {
List<Hash> all = new ArrayList(peers.keySet());
Set<Integer> IPSet = new HashSet(8);
@ -1125,7 +1212,7 @@ public class ProfileOrganizer {
}
/** generate an arbitrary unique value for this ip/mask (mask = 1-4) */
private Integer maskedIP(byte[] ip, int mask) {
private static Integer maskedIP(byte[] ip, int mask) {
int rv = 0;
for (int i = 0; i < mask; i++)
rv = (rv << 8) | (ip[i] & 0xff);
@ -1133,7 +1220,7 @@ public class ProfileOrganizer {
}
/** does a contain any of the elements in b? */
private boolean containsAny(Set a, Set b) {
private static boolean containsAny(Set a, Set b) {
for (Object o : b) {
if (a.contains(o))
return true;
@ -1141,6 +1228,58 @@ public class ProfileOrganizer {
return false;
}
/**
* @param randomKey used for deterministic random partitioning into subtiers
* @param subTierMode 2-7:
*<pre>
* 2: return only from group 0 or 1
* 3: return only from group 2 or 3
* 4: return only from group 0
* 5: return only from group 1
* 6: return only from group 2
* 7: return only from group 3
*</pre>
*/
private void locked_selectPeers(Map<Hash, PeerProfile> peers, int howMany, Set<Hash> toExclude, Set<Hash> matches, Hash randomKey, int subTierMode) {
List<Hash> all = new ArrayList(peers.keySet());
// use RandomIterator to avoid shuffling the whole thing
for (Iterator<Hash> iter = new RandomIterator(all); (matches.size() < howMany) && iter.hasNext(); ) {
Hash peer = iter.next();
if (toExclude != null && toExclude.contains(peer))
continue;
if (matches.contains(peer))
continue;
if (_us.equals(peer))
continue;
int subTier = getSubTier(peer, randomKey);
if (subTierMode >= 4) {
if (subTier != (subTierMode & 0x03))
continue;
} else {
if ((subTier >> 1) != (subTierMode & 0x01))
continue;
}
boolean ok = isSelectable(peer);
if (ok)
matches.add(peer);
else
matches.remove(peer);
}
}
/**
* Implement a random, deterministic split into 4 groups that cannot be predicted by
* others.
* @return 0-3
*/
private static int getSubTier(Hash peer, Hash randomKey) {
byte[] data = new byte[Hash.HASH_LENGTH + 4];
System.arraycopy(peer.getData(), 0, data, 0, Hash.HASH_LENGTH);
System.arraycopy(randomKey.getData(), 0, data, Hash.HASH_LENGTH, 4);
Hash rh = SHA256Generator.getInstance().calculateHash(data);
return rh.getData()[0] & 0x03;
}
public boolean isSelectable(Hash peer) {
NetworkDatabaseFacade netDb = _context.netDb();
// the CLI shouldn't depend upon the netDb
@ -1255,18 +1394,18 @@ public class ProfileOrganizer {
*/
protected int getMinimumFastPeers() {
int def = Math.min(DEFAULT_MAXIMUM_FAST_PEERS,
(2 *_context.clientManager().listClients().size()) + DEFAULT_MINIMUM_FAST_PEERS - 2);
(6 *_context.clientManager().listClients().size()) + DEFAULT_MINIMUM_FAST_PEERS - 2);
return _context.getProperty(PROP_MINIMUM_FAST_PEERS, def);
}
/** fixme add config @since 0.7.10 */
protected int getMaximumFastPeers() {
return 30;
return ABSOLUTE_MAX_FAST_PEERS;
}
/** fixme add config @since 0.7.11 */
protected int getMaximumHighCapPeers() {
return 75;
return ABSOLUTE_MAX_HIGHCAP_PEERS;
}
/**

View File

@ -28,9 +28,9 @@ import net.i2p.util.SecureFileOutputStream;
/**
* Write profiles to disk at shutdown,
* read at startup.
* The files are gzip compressed, however we unfortunately store them
* with a ".dat" extension instead of ".txt.gz", so it isn't apparent.
* TODO: Migrate to new extension.
* The files are gzip compressed, we previously stored them
* with a ".dat" extension instead of ".txt.gz", so it wasn't apparent.
* Now migrated to a ".txt.gz" extension.
*/
class ProfilePersistenceHelper {
private final Log _log;

View File

@ -14,8 +14,8 @@ import net.i2p.util.Log;
*
*/
public class TunnelHistory {
private RouterContext _context;
private Log _log;
private final RouterContext _context;
private final Log _log;
private volatile long _lifetimeAgreedTo;
private volatile long _lifetimeRejected;
private volatile long _lastAgreedTo;
@ -27,7 +27,7 @@ public class TunnelHistory {
private volatile long _lastFailed;
private RateStat _rejectRate;
private RateStat _failRate;
private String _statGroup;
private final String _statGroup;
/** probabalistic tunnel rejection due to a flood of requests - essentially unused */
public static final int TUNNEL_REJECT_PROBABALISTIC_REJECT = 10;
@ -111,6 +111,7 @@ public class TunnelHistory {
_lastFailed = _context.clock().now();
}
/***** all unused
public void setLifetimeAgreedTo(long num) { _lifetimeAgreedTo = num; }
public void setLifetimeRejected(long num) { _lifetimeRejected = num; }
public void setLifetimeFailed(long num) { _lifetimeFailed = num; }
@ -120,6 +121,7 @@ public class TunnelHistory {
public void setLastRejectedTransient(long when) { _lastRejectedTransient = when; }
public void setLastRejectedProbabalistic(long when) { _lastRejectedProbabalistic = when; }
public void setLastFailed(long when) { _lastFailed = when; }
******/
public RateStat getRejectionRate() { return _rejectRate; }
public RateStat getFailedRate() { return _failRate; }

View File

@ -164,14 +164,22 @@ public class WorkingDir {
}
}
/** Returns <code>false</code> if a directory is empty, or contains nothing besides a subdirectory named plugins */
/**
* Tests if <code>dir</code> has been set up as a I2P working directory.<br/>
* Returns <code>false</code> if a directory is empty, or contains nothing besides
* subdirectories named <code>plugins</code> and/or <code>logs</code>.<br/>
* Returns <code>true</code> if the directory contains something not named
* <code>plugins</code> or <code>logs</code>.</br>
* This allows to pre-install plugins before the first router start.
*/
private static boolean isSetup(File dir) {
if (dir.isDirectory()) {
String[] files = dir.list();
if (files.length == 0)
if (files == null)
return false;
if (files.length>1 || !"plugins".equals(files[0]))
return true;
for (String file: files)
if (!"plugins".equals(file) && !"logs".equals(file))
return true;
}
return false;
}

View File

@ -85,13 +85,6 @@ public class FIFOBandwidthLimiter {
_context.statManager().createRateStat("bwLimiter.pendingInboundRequests", "How many inbound requests are ahead of the current one (ignoring ones with 0)?", "BandwidthLimiter", new long[] { 60*1000l, 5*60*1000l, 10*60*1000l, 60*60*1000l });
_context.statManager().createRateStat("bwLimiter.outboundDelayedTime", "How long it takes to honor an outbound request (ignoring ones with that go instantly)?", "BandwidthLimiter", new long[] { 60*1000l, 5*60*1000l, 10*60*1000l, 60*60*1000l });
_context.statManager().createRateStat("bwLimiter.inboundDelayedTime", "How long it takes to honor an inbound request (ignoring ones with that go instantly)?", "BandwidthLimiter", new long[] { 60*1000l, 5*60*1000l, 10*60*1000l, 60*60*1000l });
if (_log.shouldLog(Log.WARN)) {
// If you want to see these you better have the logging set at startup!
_context.statManager().createRateStat("bw.sendBps1s", "How fast we are transmitting for the 1s quantization (period is the number of bytes transmitted)?", "Bandwidth", new long[] { 60*1000l, 10*60*1000l });
_context.statManager().createRateStat("bw.recvBps1s", "How fast we are receiving for the 1s quantization (period is the number of bytes transmitted)?", "Bandwidth", new long[] { 60*1000l, 10*60*1000l });
_context.statManager().createRateStat("bw.sendBps15s", "How fast we are transmitting for the 15s quantization (period is the number of bytes transmitted)?", "Bandwidth", new long[] { 60*1000l, 10*60*1000l });
_context.statManager().createRateStat("bw.recvBps15s", "How fast we are receiving for the 15s quantization (period is the number of bytes transmitted)?", "Bandwidth", new long[] { 60*1000l, 10*60*1000l });
}
_pendingInboundRequests = new ArrayList(16);
_pendingOutboundRequests = new ArrayList(16);
_lastTotalSent = _totalAllocatedOutboundBytes.get();

View File

@ -64,11 +64,11 @@ public abstract class TransportImpl implements Transport {
_log = _context.logManager().getLog(TransportImpl.class);
_context.statManager().createRateStat("transport.sendMessageFailureLifetime", "How long the lifetime of messages that fail are?", "Transport", new long[] { 60*1000l, 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("transport.sendMessageSize", "How large are the messages sent?", "Transport", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("transport.receiveMessageSize", "How large are the messages received?", "Transport", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRequiredRateStat("transport.sendMessageSize", "Size of sent messages (bytes)", "Transport", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRequiredRateStat("transport.receiveMessageSize", "Size of received messages (bytes)", "Transport", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("transport.receiveMessageTime", "How long it takes to read a message?", "Transport", new long[] { 60*1000l, 5*60*1000l, 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("transport.receiveMessageTimeSlow", "How long it takes to read a message (when it takes more than a second)?", "Transport", new long[] { 60*1000l, 5*60*1000l, 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("transport.sendProcessingTime", "How long does it take from noticing that we want to send the message to having it completely sent (successfully or failed)?", "Transport", new long[] { 60*1000l, 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRequiredRateStat("transport.sendProcessingTime", "Time to process and send a message (ms)", "Transport", new long[] { 60*1000l, 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("transport.expiredOnQueueLifetime", "How long a message that expires on our outbound queue is processed", "Transport", new long[] { 60*1000l, 10*60*1000l, 60*60*1000l, 24*60*60*1000l } );
_sendPool = new ArrayList(16);
_unreachableEntries = new HashMap(16);

View File

@ -10,8 +10,10 @@ import java.util.HashSet;
import java.util.Iterator;
import java.util.Set;
import net.i2p.util.Log;
import net.i2p.I2PAppContext;
import net.i2p.data.DataHelper;
import net.i2p.util.Log;
import net.i2p.util.Translate;
import org.cybergarage.upnp.Action;
import org.cybergarage.upnp.ArgumentList;
@ -53,8 +55,8 @@ import org.freenetproject.ForwardPortStatus;
* TODO: Implement EventListener and react on ip-change
*/
class UPnP extends ControlPoint implements DeviceChangeListener, EventListener {
private Log _log;
private I2PAppContext _context;
private final Log _log;
private final I2PAppContext _context;
/** some schemas */
private static final String ROUTER_DEVICE = "urn:schemas-upnp-org:device:InternetGatewayDevice:1";
@ -73,7 +75,7 @@ class UPnP extends ControlPoint implements DeviceChangeListener, EventListener {
/** List of ports we want to forward */
private Set<ForwardPort> portsToForward;
/** List of ports we have actually forwarded */
private Set<ForwardPort> portsForwarded;
private final Set<ForwardPort> portsForwarded;
/** Callback to call when a forward fails or succeeds */
private ForwardPortCallback forwardCallback;
@ -397,31 +399,31 @@ class UPnP extends ControlPoint implements DeviceChangeListener, EventListener {
for(int i=0; i<sl.size(); i++) {
Service serv = sl.getService(i);
if(serv == null) continue;
sb.append("<li>Service: ");
sb.append("<li>").append(_("Service")).append(": ");
if("urn:schemas-upnp-org:service:WANCommonInterfaceConfig:1".equals(serv.getServiceType())){
sb.append("WAN Common Interface Config<ul>");
sb.append("<li>Status: " + toString("GetCommonLinkProperties", "NewPhysicalLinkStatus", serv));
sb.append("<li>Type: " + toString("GetCommonLinkProperties", "NewWANAccessType", serv));
sb.append("<li>Upstream: " + toString("GetCommonLinkProperties", "NewLayer1UpstreamMaxBitRate", serv));
sb.append("<li>Downstream: " + toString("GetCommonLinkProperties", "NewLayer1DownstreamMaxBitRate", serv) + "<br>");
sb.append(_("WAN Common Interface Configuration"));
sb.append("<ul><li>").append(_("Status")).append(": " + toString("GetCommonLinkProperties", "NewPhysicalLinkStatus", serv));
sb.append("<li>").append(_("Type")).append(": " + toString("GetCommonLinkProperties", "NewWANAccessType", serv));
sb.append("<li>").append(_("Upstream")).append(": " + toString("GetCommonLinkProperties", "NewLayer1UpstreamMaxBitRate", serv));
sb.append("<li>").append(_("Downstream")).append(": " + toString("GetCommonLinkProperties", "NewLayer1DownstreamMaxBitRate", serv) + "<br>");
}else if("urn:schemas-upnp-org:service:WANPPPConnection:1".equals(serv.getServiceType())){
sb.append("WAN PPP Connection<ul>");
sb.append("<li>Status: " + toString("GetStatusInfo", "NewConnectionStatus", serv));
sb.append("<li>Type: " + toString("GetConnectionTypeInfo", "NewConnectionType", serv));
sb.append("<li>Upstream: " + toString("GetLinkLayerMaxBitRates", "NewUpstreamMaxBitRate", serv));
sb.append("<li>Downstream: " + toString("GetLinkLayerMaxBitRates", "NewDownstreamMaxBitRate", serv) + "<br>");
sb.append("<li>External IP: " + toString("GetExternalIPAddress", "NewExternalIPAddress", serv) + "<br>");
sb.append(_("WAN PPP Connection"));
sb.append("<ul><li>").append(_("Status")).append(": " + toString("GetStatusInfo", "NewConnectionStatus", serv));
sb.append("<li>").append(_("Type")).append(": " + toString("GetConnectionTypeInfo", "NewConnectionType", serv));
sb.append("<li>").append(_("Upstream")).append(": " + toString("GetLinkLayerMaxBitRates", "NewUpstreamMaxBitRate", serv));
sb.append("<li>").append(_("Downstream")).append(": " + toString("GetLinkLayerMaxBitRates", "NewDownstreamMaxBitRate", serv) + "<br>");
sb.append("<li>").append(_("External IP")).append(": " + toString("GetExternalIPAddress", "NewExternalIPAddress", serv) + "<br>");
}else if("urn:schemas-upnp-org:service:Layer3Forwarding:1".equals(serv.getServiceType())){
sb.append("Layer 3 Forwarding<ul>");
sb.append("<li>Default Connection Service: " + toString("GetDefaultConnectionService", "NewDefaultConnectionService", serv));
sb.append(_("Layer 3 Forwarding"));
sb.append("<ul><li>").append(_("Default Connection Service")).append(": " + toString("GetDefaultConnectionService", "NewDefaultConnectionService", serv));
}else if(WAN_IP_CONNECTION.equals(serv.getServiceType())){
sb.append("WAN IP Connection<ul>");
sb.append("<li>Status: " + toString("GetStatusInfo", "NewConnectionStatus", serv));
sb.append("<li>Type: " + toString("GetConnectionTypeInfo", "NewConnectionType", serv));
sb.append("<li>External IP: " + toString("GetExternalIPAddress", "NewExternalIPAddress", serv) + "<br>");
sb.append(_("WAN IP Connection"));
sb.append("<ul><li>").append(_("Status")).append(": " + toString("GetStatusInfo", "NewConnectionStatus", serv));
sb.append("<li>").append(_("Type")).append(": " + toString("GetConnectionTypeInfo", "NewConnectionType", serv));
sb.append("<li>").append(_("External IP")).append(": " + toString("GetExternalIPAddress", "NewExternalIPAddress", serv) + "<br>");
}else if("urn:schemas-upnp-org:service:WANEthernetLinkConfig:1".equals(serv.getServiceType())){
sb.append("WAN Ethernet Link Config<ol>");
sb.append("<li>Status: " + toString("GetEthernetLinkStatus", "NewEthernetLinkStatus", serv) + "<br>");
sb.append(_("WAN Ethernet Link Configuration"));
sb.append("<ul><li>").append(_("Status")).append(": " + toString("GetEthernetLinkStatus", "NewEthernetLinkStatus", serv) + "<br>");
}else
sb.append("~~~~~~~ "+serv.getServiceType() + "<ul>");
//listActions(serv, sb);
@ -433,10 +435,12 @@ class UPnP extends ControlPoint implements DeviceChangeListener, EventListener {
private void listSubDev(String prefix, Device dev, StringBuilder sb){
if (prefix == null)
sb.append("Device: ");
sb.append("<p>").append(_("Found Device")).append(": ");
else
sb.append("<li>Subdevice: ");
sb.append("<li>").append(_("Subdevice")).append(": ");
sb.append(dev.getFriendlyName());
if (prefix == null)
sb.append("</p>");
listSubServices(dev, sb);
DeviceList dl = dev.getDeviceList();
@ -454,38 +458,40 @@ class UPnP extends ControlPoint implements DeviceChangeListener, EventListener {
/** warning - slow */
public String renderStatusHTML() {
final StringBuilder sb = new StringBuilder();
sb.append("<h3><a name=\"upnp\"></a>UPnP Status</h3>");
sb.append("<h3><a name=\"upnp\"></a>").append(_("UPnP Status")).append("</h3>");
if(isDisabled) {
sb.append("UPnP has been disabled; Do you have more than one UPnP Internet Gateway Device on your LAN ?");
sb.append(_("UPnP has been disabled; Do you have more than one UPnP Internet Gateway Device on your LAN ?"));
return sb.toString();
} else if(!isNATPresent()) {
sb.append("UPnP has not found any UPnP-aware, compatible device on your LAN.");
sb.append(_("UPnP has not found any UPnP-aware, compatible device on your LAN."));
return sb.toString();
}
// FIXME L10n!
sb.append("<p>Found ");
listSubDev(null, _router, sb);
String addr = getNATAddress();
sb.append("<p>");
if (addr != null)
sb.append("<br>The current external IP address reported by UPnP is " + addr);
sb.append(_("The current external IP address reported by UPnP is {0}", addr));
else
sb.append("<br>The current external IP address is not available.");
sb.append(_("The current external IP address is not available."));
int downstreamMaxBitRate = getDownstreamMaxBitRate();
int upstreamMaxBitRate = getUpstreamMaxBitRate();
if(downstreamMaxBitRate > 0)
sb.append("<br>UPnP reports the max downstream bit rate is : " + downstreamMaxBitRate+ " bits/sec\n");
sb.append("<br>").append(_("UPnP reports the maximum downstream bit rate is {0}bits/sec", DataHelper.formatSize2(downstreamMaxBitRate)));
if(upstreamMaxBitRate > 0)
sb.append("<br>UPnP reports the max upstream bit rate is : " + upstreamMaxBitRate+ " bits/sec\n");
sb.append("<br>").append(_("UPnP reports the maximum upstream bit rate is {0}bits/sec", DataHelper.formatSize2(upstreamMaxBitRate)));
synchronized(lock) {
if(portsToForward != null) {
for(ForwardPort port : portsToForward) {
sb.append("<br>" + protoToString(port.protocol) + " port " + port.portNumber + " for " + port.name);
sb.append("<br>");
if(portsForwarded.contains(port))
sb.append(" has been forwarded successfully by UPnP.\n");
// {0} is TCP or UDP
// {1,number,#####} prevents 12345 from being output as 12,345 in the English locale.
// If you want the digit separator in your locale, translate as {1}.
sb.append(_("{0} port {1,number,#####} was successfully forwarded by UPnP.", protoToString(port.protocol), port.portNumber));
else
sb.append(" has not been forwarded by UPnP.\n");
sb.append(_("{0} port {1,number,#####} was not forwarded by UPnP.", protoToString(port.protocol), port.portNumber));
}
}
}
@ -711,4 +717,27 @@ class UPnP extends ControlPoint implements DeviceChangeListener, EventListener {
Thread.sleep(2000);
}
}
private static final String BUNDLE_NAME = "net.i2p.router.web.messages";
/**
* Translate
*/
private final String _(String s) {
return Translate.getString(s, _context, BUNDLE_NAME);
}
/**
* Translate
*/
private final String _(String s, Object o) {
return Translate.getString(s, o, _context, BUNDLE_NAME);
}
/**
* Translate
*/
private final String _(String s, Object o, Object o2) {
return Translate.getString(s, o, o2, _context, BUNDLE_NAME);
}
}

View File

@ -11,6 +11,7 @@ import java.util.Set;
import net.i2p.router.RouterContext;
import net.i2p.util.Log;
import net.i2p.util.Translate;
import org.cybergarage.util.Debug;
import org.freenetproject.DetectedIP;
@ -25,13 +26,13 @@ import org.freenetproject.ForwardPortStatus;
* @author zzz
*/
class UPnPManager {
private Log _log;
private RouterContext _context;
private UPnP _upnp;
private UPnPCallback _upnpCallback;
private final Log _log;
private final RouterContext _context;
private final UPnP _upnp;
private final UPnPCallback _upnpCallback;
private volatile boolean _isRunning;
private InetAddress _detectedAddress;
private TransportManager _manager;
private final TransportManager _manager;
/**
* This is the TCP HTTP Event listener
* We move these so we don't conflict with other users of the same upnp library
@ -56,7 +57,6 @@ class UPnPManager {
_upnp.setHTTPPort(_context.getProperty(PROP_HTTP_PORT, DEFAULT_HTTP_PORT));
_upnp.setSSDPPort(_context.getProperty(PROP_SSDP_PORT, DEFAULT_SSDP_PORT));
_upnpCallback = new UPnPCallback();
_isRunning = false;
}
public synchronized void start() {
@ -158,7 +158,17 @@ class UPnPManager {
public String renderStatusHTML() {
if (!_isRunning)
return "<h3><a name=\"upnp\"></a>UPnP is not enabled</h3>\n";
return "<h3><a name=\"upnp\"></a>" + _("UPnP is not enabled") + "</h3>\n";
return _upnp.renderStatusHTML();
}
private static final String BUNDLE_NAME = "net.i2p.router.web.messages";
/**
* Translate
*/
private final String _(String s) {
return Translate.getString(s, _context, BUNDLE_NAME);
}
}

View File

@ -23,10 +23,11 @@ import net.i2p.util.Log;
* Currently, the comm system doesn't even inject any lag, though it could (later).
* It does honor the standard transport stats though, but not the TCP specific ones.
*
* FOR DEBUGGING AND LOCAL TESTING ONLY.
*/
public class VMCommSystem extends CommSystemFacade {
private Log _log;
private RouterContext _context;
private final Log _log;
private final RouterContext _context;
/**
* Mapping from Hash to VMCommSystem for all routers hooked together
*/
@ -36,15 +37,15 @@ public class VMCommSystem extends CommSystemFacade {
_context = context;
_log = context.logManager().getLog(VMCommSystem.class);
_context.statManager().createFrequencyStat("transport.sendMessageFailureFrequency", "How often do we fail to send messages?", "Transport", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("transport.sendMessageSize", "How large are the messages sent?", "Transport", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("transport.receiveMessageSize", "How large are the messages received?", "Transport", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRequiredRateStat("transport.sendMessageSize", "Size of sent messages (bytes)", "Transport", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRequiredRateStat("transport.receiveMessageSize", "Size of received messages (bytes)", "Transport", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("transport.sendMessageSmall", "How many messages under 1KB are sent?", "Transport", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("transport.receiveMessageSmall", "How many messages under 1KB are received?", "Transport", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("transport.sendMessageMedium", "How many messages between 1KB and 4KB are sent?", "Transport", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("transport.receiveMessageMedium", "How many messages between 1KB and 4KB are received?", "Transport", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("transport.sendMessageLarge", "How many messages over 4KB are sent?", "Transport", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("transport.receiveMessageLarge", "How many messages over 4KB are received?", "Transport", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("transport.sendProcessingTime", "How long does it take from noticing that we want to send the message to having it completely sent (successfully or failed)?", "Transport", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRequiredRateStat("transport.sendProcessingTime", "Time to process and send a message (ms)", "Transport", new long[] { 60*1000l, 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
}
/**

View File

@ -64,7 +64,7 @@ class OutboundMessageFragments {
_context.statManager().createRateStat("udp.sendSparse", "How many fragments were partially ACKed and hence not resent (time == message lifetime)", "udp", UDPTransport.RATES);
_context.statManager().createRateStat("udp.sendPiggyback", "How many acks were piggybacked on a data packet (time == message lifetime)", "udp", UDPTransport.RATES);
_context.statManager().createRateStat("udp.sendPiggybackPartial", "How many partial acks were piggybacked on a data packet (time == message lifetime)", "udp", UDPTransport.RATES);
_context.statManager().createRateStat("udp.packetsRetransmitted", "Lifetime of packets during their retransmission (period == packets transmitted, lifetime)", "udp", UDPTransport.RATES);
_context.statManager().createRequiredRateStat("udp.packetsRetransmitted", "Lifetime of packets during retransmission (ms)", "udp", UDPTransport.RATES);
_context.statManager().createRateStat("udp.peerPacketsRetransmitted", "How many packets have been retransmitted to the peer (lifetime) when a burst of packets are retransmitted (period == packets transmitted, lifetime)", "udp", UDPTransport.RATES);
_context.statManager().createRateStat("udp.blockedRetransmissions", "How packets have been transmitted to the peer when we blocked a retransmission to them?", "udp", UDPTransport.RATES);
_context.statManager().createRateStat("udp.sendCycleTime", "How long it takes to cycle through all of the active messages?", "udp", UDPTransport.RATES);

View File

@ -38,12 +38,12 @@ class UDPSender {
_context.statManager().createRateStat("udp.sendQueueSize", "How many packets are queued on the UDP sender", "udp", UDPTransport.RATES);
_context.statManager().createRateStat("udp.sendQueueFailed", "How often it was unable to add a new packet to the queue", "udp", UDPTransport.RATES);
_context.statManager().createRateStat("udp.sendQueueTrimmed", "How many packets were removed from the queue for being too old (duration == remaining)", "udp", UDPTransport.RATES);
_context.statManager().createRateStat("udp.sendPacketSize", "How large packets sent are", "udp", UDPTransport.RATES);
_context.statManager().createRequiredRateStat("udp.sendPacketSize", "Size of sent packets (bytes)", "udp", UDPTransport.RATES);
_context.statManager().createRateStat("udp.socketSendTime", "How long the actual socket.send took", "udp", UDPTransport.RATES);
_context.statManager().createRateStat("udp.sendBWThrottleTime", "How long the send is blocked by the bandwidth throttle", "udp", UDPTransport.RATES);
_context.statManager().createRateStat("udp.sendACKTime", "How long an ACK packet is blocked for (duration == lifetime)", "udp", UDPTransport.RATES);
// used in RouterWatchdog
_context.statManager().createRateStat("udp.sendException", "How frequently we fail to send a packet (likely due to a windows exception)", "udp", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRequiredRateStat("udp.sendException", "Send fails (Windows exception?)", "udp", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRateStat("udp.sendPacketSize." + PacketBuilder.TYPE_ACK, "ack-only packet size", "udp", UDPTransport.RATES);
_context.statManager().createRateStat("udp.sendPacketSize." + PacketBuilder.TYPE_PUNCH, "hole punch packet size", "udp", UDPTransport.RATES);

View File

@ -114,9 +114,9 @@ public class FragmentHandler {
"Tunnels", RATES);
_context.statManager().createRateStat("tunnel.fragmentedComplete", "How many fragments were in a completely received message?",
"Tunnels", RATES);
_context.statManager().createRateStat("tunnel.fragmentedDropped", "How many fragments were in a partially received yet failed message?",
_context.statManager().createRequiredRateStat("tunnel.fragmentedDropped", "Number of dropped fragments",
"Tunnels", RATES);
_context.statManager().createRateStat("tunnel.corruptMessage", "How many corrupted messages arrived?",
_context.statManager().createRequiredRateStat("tunnel.corruptMessage", "Corrupt messages received",
"Tunnels", RATES);
}

View File

@ -18,17 +18,17 @@ import net.i2p.router.TunnelInfo;
*
*/
public class TunnelCreatorConfig implements TunnelInfo {
protected RouterContext _context;
protected final RouterContext _context;
/** only necessary for client tunnels */
private Hash _destination;
private final Hash _destination;
/** gateway first */
private HopConfig _config[];
private final HopConfig _config[];
/** gateway first */
private Hash _peers[];
private final Hash _peers[];
private long _expiration;
private List<Integer> _order;
private long _replyMessageId;
private boolean _isInbound;
private final boolean _isInbound;
private long _messagesProcessed;
private volatile long _verifiedBytesTransferred;
private boolean _failed;
@ -48,10 +48,6 @@ public class TunnelCreatorConfig implements TunnelInfo {
}
_isInbound = isInbound;
_destination = destination;
_messagesProcessed = 0;
_verifiedBytesTransferred = 0;
_failed = false;
_failures = 0;
}
/**

View File

@ -57,9 +57,9 @@ public class TunnelDispatcher implements Service {
_validator = null;
_pumper = new TunnelGatewayPumper(ctx);
_leaveJob = new LeaveTunnel(ctx);
ctx.statManager().createRateStat("tunnel.participatingTunnels",
"How many tunnels are we participating in?", "Tunnels",
new long[] { 60*1000, 10*60*1000l, 60*60*1000l });
ctx.statManager().createRequiredRateStat("tunnel.participatingTunnels",
"Tunnels routed for others", "Tunnels",
new long[] { 60*1000, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("tunnel.dispatchOutboundPeer",
"How many messages we send out a tunnel targetting a peer?", "Tunnels",
new long[] { 10*60*1000l, 60*60*1000l });
@ -108,17 +108,17 @@ public class TunnelDispatcher implements Service {
ctx.statManager().createRateStat("tunnel.dispatchOutboundZeroHopTime",
"How long it takes to dispatch an outbound message through a zero hop tunnel", "Tunnels",
new long[] { 60*1000l, 60*60*1000l });
ctx.statManager().createRateStat("tunnel.participatingBandwidth",
"Participating traffic", "Tunnels",
ctx.statManager().createRequiredRateStat("tunnel.participatingBandwidth",
"Participating traffic received (Bytes/sec)", "Tunnels",
new long[] { 60*1000l, 60*10*1000l });
ctx.statManager().createRateStat("tunnel.participatingBandwidthOut",
"Participating traffic", "Tunnels",
ctx.statManager().createRequiredRateStat("tunnel.participatingBandwidthOut",
"Participating traffic sent (Bytes/sec)", "Tunnels",
new long[] { 60*1000l, 60*10*1000l });
ctx.statManager().createRateStat("tunnel.participatingMessageDropped",
"Dropped for exceeding share limit", "Tunnels",
new long[] { 60*1000l, 60*10*1000l });
ctx.statManager().createRateStat("tunnel.participatingMessageCount",
"How many messages are sent through a participating tunnel?", "Tunnels",
ctx.statManager().createRequiredRateStat("tunnel.participatingMessageCount",
"Number of 1KB participating messages", "Tunnels",
new long[] { 60*1000l, 60*10*1000l, 60*60*1000l });
ctx.statManager().createRateStat("tunnel.ownedMessageCount",
"How many messages are sent through a tunnel we created (period == failures)?", "Tunnels",

View File

@ -52,13 +52,13 @@ class BuildExecutor implements Runnable {
_recentlyBuildingMap = new ConcurrentHashMap(4 * MAX_CONCURRENT_BUILDS);
_context.statManager().createRateStat("tunnel.concurrentBuilds", "How many builds are going at once", "Tunnels", new long[] { 60*1000, 5*60*1000, 60*60*1000 });
_context.statManager().createRateStat("tunnel.concurrentBuildsLagged", "How many builds are going at once when we reject further builds, due to job lag (period is lag)", "Tunnels", new long[] { 60*1000, 5*60*1000, 60*60*1000 });
_context.statManager().createRateStat("tunnel.buildExploratoryExpire", "How often an exploratory tunnel times out during creation", "Tunnels", new long[] { 10*60*1000, 60*60*1000 });
_context.statManager().createRateStat("tunnel.buildClientExpire", "How often a client tunnel times out during creation", "Tunnels", new long[] { 10*60*1000, 60*60*1000 });
_context.statManager().createRateStat("tunnel.buildExploratorySuccess", "Response time for success", "Tunnels", new long[] { 10*60*1000, 60*60*1000 });
_context.statManager().createRateStat("tunnel.buildClientSuccess", "Response time for success", "Tunnels", new long[] { 10*60*1000, 60*60*1000 });
_context.statManager().createRateStat("tunnel.buildExploratoryReject", "Response time for rejection", "Tunnels", new long[] { 10*60*1000, 60*60*1000 });
_context.statManager().createRateStat("tunnel.buildClientReject", "Response time for rejection", "Tunnels", new long[] { 10*60*1000, 60*60*1000 });
_context.statManager().createRateStat("tunnel.buildRequestTime", "How long it takes to build a tunnel request", "Tunnels", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRequiredRateStat("tunnel.buildExploratoryExpire", "No response to our build request", "Tunnels", new long[] { 10*60*1000, 60*60*1000 });
_context.statManager().createRequiredRateStat("tunnel.buildClientExpire", "No response to our build request", "Tunnels", new long[] { 10*60*1000, 60*60*1000 });
_context.statManager().createRequiredRateStat("tunnel.buildExploratorySuccess", "Response time for success (ms)", "Tunnels", new long[] { 10*60*1000, 60*60*1000 });
_context.statManager().createRequiredRateStat("tunnel.buildClientSuccess", "Response time for success (ms)", "Tunnels", new long[] { 10*60*1000, 60*60*1000 });
_context.statManager().createRequiredRateStat("tunnel.buildExploratoryReject", "Response time for rejection (ms)", "Tunnels", new long[] { 10*60*1000, 60*60*1000 });
_context.statManager().createRequiredRateStat("tunnel.buildClientReject", "Response time for rejection (ms)", "Tunnels", new long[] { 10*60*1000, 60*60*1000 });
_context.statManager().createRequiredRateStat("tunnel.buildRequestTime", "Time to build a tunnel request (ms)", "Tunnels", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRateStat("tunnel.buildRequestZeroHopTime", "How long it takes to build a zero hop tunnel", "Tunnels", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRateStat("tunnel.pendingRemaining", "How many inbound requests are pending after a pass (period is how long the pass takes)?", "Tunnels", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRateStat("tunnel.buildFailFirstHop", "How often we fail to build a OB tunnel because we can't contact the first hop", "Tunnels", new long[] { 60*1000, 10*60*1000 });
@ -455,6 +455,10 @@ class BuildExecutor implements Runnable {
}
}
/**
* This wakes up the executor, so call this after TunnelPool.addTunnel()
* so we don't build too many.
*/
public void buildComplete(PooledTunnelCreatorConfig cfg, TunnelPool pool) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Build complete for " + cfg);

View File

@ -69,18 +69,18 @@ class BuildHandler {
_context.statManager().createRateStat("tunnel.reject.30", "How often we reject a tunnel because of bandwidth overload", "Tunnels", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRateStat("tunnel.reject.50", "How often we reject a tunnel because of a critical issue (shutdown, etc)", "Tunnels", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRateStat("tunnel.decryptRequestTime", "How long it takes to decrypt a new tunnel build request", "Tunnels", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRateStat("tunnel.rejectTimeout", "How often we reject a tunnel because we can't find the next hop", "Tunnels", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRateStat("tunnel.rejectTimeout2", "How often we fail a tunnel because we can't contact the next hop", "Tunnels", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRequiredRateStat("tunnel.decryptRequestTime", "Time to decrypt a build request (ms)", "Tunnels", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRequiredRateStat("tunnel.rejectTimeout", "Reject tunnel count (unknown next hop)", "Tunnels", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRequiredRateStat("tunnel.rejectTimeout2", "Reject tunnel count (can't contact next hop)", "Tunnels", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRateStat("tunnel.rejectOverloaded", "How long we had to wait before processing the request (when it was rejected)", "Tunnels", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRateStat("tunnel.acceptLoad", "Delay before processing the accepted request", "Tunnels", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRequiredRateStat("tunnel.rejectOverloaded", "Delay to process rejected request (ms)", "Tunnels", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRequiredRateStat("tunnel.acceptLoad", "Delay to process accepted request (ms)", "Tunnels", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRateStat("tunnel.dropConnLimits", "Drop instead of reject due to conn limits", "Tunnels", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRateStat("tunnel.dropLoad", "How long we had to wait before finally giving up on an inbound request (period is queue count)?", "Tunnels", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRateStat("tunnel.dropLoadDelay", "How long we had to wait before finally giving up on an inbound request?", "Tunnels", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRateStat("tunnel.dropLoadBacklog", "How many requests were pending when they were so lagged that we had to drop a new inbound request??", "Tunnels", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRateStat("tunnel.dropLoadProactive", "What the estimated queue time was when we dropped an inbound request (period is num pending)", "Tunnels", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRateStat("tunnel.dropLoadProactiveAbort", "How often we would have proactively dropped a request, but allowed it through?", "Tunnels", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRequiredRateStat("tunnel.dropLoad", "Delay before dropping request (ms)?", "Tunnels", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRequiredRateStat("tunnel.dropLoadDelay", "Delay before abandoning request (ms)", "Tunnels", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRequiredRateStat("tunnel.dropLoadBacklog", "Pending request count when dropped", "Tunnels", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRequiredRateStat("tunnel.dropLoadProactive", "Delay estimate when dropped (ms)", "Tunnels", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRequiredRateStat("tunnel.dropLoadProactiveAbort", "Allowed requests during load", "Tunnels", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRateStat("tunnel.handleRemaining", "How many pending inbound requests were left on the queue after one pass?", "Tunnels", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRateStat("tunnel.buildReplyTooSlow", "How often a tunnel build reply came back after we had given up waiting for it?", "Tunnels", new long[] { 60*1000, 10*60*1000 });
@ -223,7 +223,7 @@ class BuildHandler {
_context.messageHistory().tunnelParticipantRejected(peer, "peer rejected after " + rtt + " with " + howBad + ": " + cfg.toString());
}
}
_exec.buildComplete(cfg, cfg.getTunnelPool());
if (allAgree) {
// wikked, completely build
if (cfg.isInbound())
@ -231,6 +231,8 @@ class BuildHandler {
else
_context.tunnelDispatcher().joinOutbound(cfg);
cfg.getTunnelPool().addTunnel(cfg); // self.self.self.foo!
// call buildComplete() after addTunnel() so we don't try another build.
_exec.buildComplete(cfg, cfg.getTunnelPool());
_exec.buildSuccessful(cfg);
ExpireJob expireJob = new ExpireJob(_context, cfg, cfg.getTunnelPool());
@ -242,6 +244,7 @@ class BuildHandler {
_context.statManager().addRateData("tunnel.buildClientSuccess", rtt, rtt);
} else {
// someone is no fun
_exec.buildComplete(cfg, cfg.getTunnelPool());
if (cfg.getDestination() == null)
_context.statManager().addRateData("tunnel.buildExploratoryReject", rtt, rtt);
else

View File

@ -21,20 +21,57 @@ class ClientPeerSelector extends TunnelPeerSelector {
return null;
if ( (length == 0) && (settings.getLength()+settings.getLengthVariance() > 0) )
return null;
HashSet matches = new HashSet(length);
List<Hash> rv;
if (length > 0) {
if (shouldSelectExplicit(settings))
return selectExplicit(ctx, settings, length);
Set<Hash> exclude = getExclude(ctx, settings.isInbound(), settings.isExploratory());
Set<Hash> matches = new HashSet(length);
if (length == 1) {
ctx.profileOrganizer().selectFastPeers(length, exclude, matches, 0);
matches.remove(ctx.routerHash());
rv = new ArrayList(matches);
} else {
// build a tunnel using 4 subtiers.
// For a 2-hop tunnel, the first hop comes from subtiers 0-1 and the last from subtiers 2-3.
// For a longer tunnels, the first hop comes from subtier 0, the middle from subtiers 2-3, and the last from subtier 1.
rv = new ArrayList(length + 1);
// OBEP or IB last hop
// group 0 or 1 if two hops, otherwise group 0
ctx.profileOrganizer().selectFastPeers(1, exclude, matches, settings.getRandomKey(), length == 2 ? 2 : 4);
matches.remove(ctx.routerHash());
exclude.addAll(matches);
rv.addAll(matches);
matches.clear();
if (length > 2) {
// middle hop(s)
// group 2 or 3
ctx.profileOrganizer().selectFastPeers(length - 2, exclude, matches, settings.getRandomKey(), 3);
matches.remove(ctx.routerHash());
if (matches.size() > 1) {
// order the middle peers for tunnels >= 4 hops
List<Hash> ordered = new ArrayList(matches);
orderPeers(ordered, settings.getRandomKey());
rv.addAll(ordered);
} else {
rv.addAll(matches);
}
exclude.addAll(matches);
matches.clear();
}
// IBGW or OB first hop
// group 2 or 3 if two hops, otherwise group 1
ctx.profileOrganizer().selectFastPeers(1, exclude, matches, settings.getRandomKey(), length == 2 ? 3 : 5);
matches.remove(ctx.routerHash());
rv.addAll(matches);
}
} else {
rv = new ArrayList(1);
}
Set exclude = getExclude(ctx, settings.isInbound(), settings.isExploratory());
ctx.profileOrganizer().selectFastPeers(length, exclude, matches, settings.getIPRestriction());
matches.remove(ctx.routerHash());
ArrayList<Hash> rv = new ArrayList(matches);
if (rv.size() > 1)
orderPeers(rv, settings.getRandomKey());
if (settings.isInbound())
rv.add(0, ctx.routerHash());
else

View File

@ -26,8 +26,6 @@ class PooledTunnelCreatorConfig extends TunnelCreatorConfig {
}
public PooledTunnelCreatorConfig(RouterContext ctx, int length, boolean isInbound, Hash destination) {
super(ctx, length, isInbound, destination);
_pool = null;
_live = false;
}
// calls TestJob

View File

@ -34,7 +34,6 @@ class TestJob extends JobImpl {
/** base to randomize the test delay on */
private static final int TEST_DELAY = 30*1000;
private static final long[] RATES = { 60*1000, 10*60*1000l, 60*60*1000l };
public TestJob(RouterContext ctx, PooledTunnelCreatorConfig cfg, TunnelPool pool) {
super(ctx);
@ -47,20 +46,7 @@ class TestJob extends JobImpl {
if ( (_pool == null) && (_log.shouldLog(Log.ERROR)) )
_log.error("Invalid tunnel test configuration: no pool for " + cfg, new Exception("origin"));
getTiming().setStartAfter(getDelay() + ctx.clock().now());
ctx.statManager().createRateStat("tunnel.testFailedTime", "How long did the failure take (max of 60s for full timeout)?", "Tunnels",
RATES);
ctx.statManager().createRateStat("tunnel.testExploratoryFailedTime", "How long did the failure of an exploratory tunnel take (max of 60s for full timeout)?", "Tunnels",
RATES);
ctx.statManager().createRateStat("tunnel.testFailedCompletelyTime", "How long did the complete failure take (max of 60s for full timeout)?", "Tunnels",
RATES);
ctx.statManager().createRateStat("tunnel.testExploratoryFailedCompletelyTime", "How long did the complete failure of an exploratory tunnel take (max of 60s for full timeout)?", "Tunnels",
RATES);
ctx.statManager().createRateStat("tunnel.testSuccessLength", "How long were the tunnels that passed the test?", "Tunnels",
RATES);
ctx.statManager().createRateStat("tunnel.testSuccessTime", "How long did tunnel testing take?", "Tunnels",
RATES);
ctx.statManager().createRateStat("tunnel.testAborted", "Tunnel test could not occur, since there weren't any tunnels to test with", "Tunnels",
RATES);
// stats are created in TunnelPoolManager
}
public String getName() { return "Test tunnel"; }

View File

@ -34,13 +34,16 @@ public abstract class TunnelPeerSelector {
* Which peers should go into the next tunnel for the given settings?
*
* @return ordered list of Hash objects (one per peer) specifying what order
* they should appear in a tunnel (endpoint first). This includes
* they should appear in a tunnel (ENDPOINT FIRST). This includes
* the local router in the list. If there are no tunnels or peers
* to build through, and the settings reject 0 hop tunnels, this will
* return null.
*/
public abstract List<Hash> selectPeers(RouterContext ctx, TunnelPoolSettings settings);
/**
* @return randomized number of hops 0-7, not including ourselves
*/
protected int getLength(RouterContext ctx, TunnelPoolSettings settings) {
int length = settings.getLength();
int override = settings.getLengthOverride();
@ -61,8 +64,8 @@ public abstract class TunnelPeerSelector {
}
if (length < 0)
length = 0;
if (length > 8) // as documented in tunnel.html
length = 8;
else if (length > 7) // as documented in tunnel.html
length = 7;
/*
if ( (ctx.tunnelManager().getOutboundTunnelCount() <= 0) ||
(ctx.tunnelManager().getFreeTunnelCount() <= 0) ) {

View File

@ -25,11 +25,11 @@ import net.i2p.util.Log;
* A group of tunnels for the router or a particular client, in a single direction.
*/
public class TunnelPool {
private final List _inProgress = new ArrayList();
private final List<PooledTunnelCreatorConfig> _inProgress = new ArrayList();
private final RouterContext _context;
private final Log _log;
private TunnelPoolSettings _settings;
private final ArrayList<TunnelInfo> _tunnels;
private final List<TunnelInfo> _tunnels;
private final TunnelPeerSelector _peerSelector;
private final TunnelPoolManager _manager;
private boolean _alive;
@ -80,7 +80,7 @@ public class TunnelPool {
_context.clientManager().requestLeaseSet(_settings.getDestination(), ls);
}
_context.statManager().createRateStat(_rateName,
"Tunnel Bandwidth", "Tunnels",
"Tunnel Bandwidth (Bytes/sec)", "Tunnels",
new long[] { 5*60*1000l });
}
@ -276,9 +276,12 @@ public class TunnelPool {
}
}
public void addTunnel(TunnelInfo info) {
/**
* Add to the pool.
*/
void addTunnel(TunnelInfo info) {
if (_log.shouldLog(Log.DEBUG))
_log.debug(toString() + ": Adding tunnel " + info, new Exception("Creator"));
_log.debug(toString() + ": Adding tunnel " + info /* , new Exception("Creator") */ );
LeaseSet ls = null;
synchronized (_tunnels) {
_tunnels.add(info);
@ -290,7 +293,10 @@ public class TunnelPool {
_context.clientManager().requestLeaseSet(_settings.getDestination(), ls);
}
public void removeTunnel(TunnelInfo info) {
/**
* Remove from the pool.
*/
void removeTunnel(TunnelInfo info) {
if (_log.shouldLog(Log.DEBUG))
_log.debug(toString() + ": Removing tunnel " + info);
int remaining = 0;
@ -467,8 +473,10 @@ public class TunnelPool {
}
/**
* Build a leaseSet with the required tunnels that aren't about to expire
* Build a leaseSet with the required tunnels that aren't about to expire.
* Caller must synchronize on _tunnels.
*
* @return null on failure
*/
private LeaseSet locked_buildNewLeaseSet() {
if (!_alive)
@ -599,7 +607,7 @@ public class TunnelPool {
if (rs == null) {
// Create the RateStat here rather than at the top because
// the user could change the length settings while running
_context.statManager().createRateStat(buildRateName(),
_context.statManager().createRequiredRateStat(buildRateName(),
"Tunnel Build Frequency", "Tunnels",
new long[] { TUNNEL_LIFETIME });
rs = _context.statManager().getRate(buildRateName());
@ -724,8 +732,8 @@ public class TunnelPool {
int inProgress = 0;
synchronized (_inProgress) {
inProgress = _inProgress.size();
for (int i = 0; i < _inProgress.size(); i++) {
PooledTunnelCreatorConfig cfg = (PooledTunnelCreatorConfig)_inProgress.get(i);
for (int i = 0; i < inProgress; i++) {
PooledTunnelCreatorConfig cfg = _inProgress.get(i);
if (cfg.getLength() <= 1)
fallback++;
}
@ -846,17 +854,25 @@ public class TunnelPool {
return rv;
}
/**
* @return null on failure
*/
PooledTunnelCreatorConfig configureNewTunnel() { return configureNewTunnel(false); }
/**
* @return null on failure
*/
private PooledTunnelCreatorConfig configureNewTunnel(boolean forceZeroHop) {
TunnelPoolSettings settings = getSettings();
List peers = null;
long expiration = _context.clock().now() + settings.getDuration();
// peers for new tunnel, including us, ENDPOINT FIRST
List<Hash> peers = null;
long expiration = _context.clock().now() + TunnelPoolSettings.DEFAULT_DURATION;
if (!forceZeroHop) {
peers = _peerSelector.selectPeers(_context, settings);
if ( (peers == null) || (peers.isEmpty()) ) {
// no inbound or outbound tunnels to send the request through, and
// no peers to build the tunnel with, and
// the pool is refusing 0 hop tunnels
if (peers == null) {
if (_log.shouldLog(Log.WARN))
@ -871,12 +887,13 @@ public class TunnelPool {
peers = new ArrayList(1);
peers.add(_context.routerHash());
}
PooledTunnelCreatorConfig cfg = new PooledTunnelCreatorConfig(_context, peers.size(), settings.isInbound(), settings.getDestination());
cfg.setTunnelPool(this);
// peers[] is ordered endpoint first, but cfg.getPeer() is ordered gateway first
// peers list is ordered endpoint first, but cfg.getPeer() is ordered gateway first
for (int i = 0; i < peers.size(); i++) {
int j = peers.size() - 1 - i;
cfg.setPeer(j, (Hash)peers.get(i));
cfg.setPeer(j, peers.get(i));
HopConfig hop = cfg.getConfig(j);
hop.setCreation(_context.clock().now());
hop.setExpiration(expiration);
@ -895,6 +912,9 @@ public class TunnelPool {
return cfg;
}
/**
* Remove from the _inprogress list
*/
void buildComplete(PooledTunnelCreatorConfig cfg) {
synchronized (_inProgress) { _inProgress.remove(cfg); }
cfg.setTunnelPool(this);

View File

@ -47,6 +47,7 @@ public class TunnelPoolManager implements TunnelManagerFacade {
private TunnelPool _outboundExploratory;
private final BuildExecutor _executor;
private boolean _isShutdown;
private static final long[] RATES = { 60*1000, 10*60*1000l, 60*60*1000l };
public TunnelPoolManager(RouterContext ctx) {
_context = ctx;
@ -66,12 +67,21 @@ public class TunnelPoolManager implements TunnelManagerFacade {
execThread.setDaemon(true);
execThread.start();
ctx.statManager().createRateStat("tunnel.testSuccessTime",
"How long do successful tunnel tests take?", "Tunnels",
new long[] { 60*1000, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("tunnel.participatingTunnels",
"How many tunnels are we participating in?", "Tunnels",
new long[] { 60*1000, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
// The following are for TestJob
ctx.statManager().createRequiredRateStat("tunnel.testFailedTime", "Time for tunnel test failure (ms)", "Tunnels",
RATES);
ctx.statManager().createRateStat("tunnel.testExploratoryFailedTime", "How long did the failure of an exploratory tunnel take (max of 60s for full timeout)?", "Tunnels",
RATES);
ctx.statManager().createRateStat("tunnel.testFailedCompletelyTime", "How long did the complete failure take (max of 60s for full timeout)?", "Tunnels",
RATES);
ctx.statManager().createRateStat("tunnel.testExploratoryFailedCompletelyTime", "How long did the complete failure of an exploratory tunnel take (max of 60s for full timeout)?", "Tunnels",
RATES);
ctx.statManager().createRateStat("tunnel.testSuccessLength", "How long were the tunnels that passed the test?", "Tunnels",
RATES);
ctx.statManager().createRequiredRateStat("tunnel.testSuccessTime", "Time for tunnel test success (ms)", "Tunnels",
RATES);
ctx.statManager().createRateStat("tunnel.testAborted", "Tunnel test could not occur, since there weren't any tunnels to test with", "Tunnels",
RATES);
}
/** pick an inbound tunnel not bound to a particular destination */
@ -329,7 +339,6 @@ public class TunnelPoolManager implements TunnelManagerFacade {
/** queue a recurring test job if appropriate */
void buildComplete(PooledTunnelCreatorConfig cfg) {
//buildComplete();
if (cfg.getLength() > 1 &&
(!_context.router().gracefulShutdownInProgress()) &&
!Boolean.valueOf(_context.getProperty("router.disableTunnelTesting")).booleanValue()) {
@ -357,9 +366,6 @@ public class TunnelPoolManager implements TunnelManagerFacade {
}
}
/** ?? */
void buildComplete() {}
public void startup() {
_isShutdown = false;
if (!_executor.isRunning()) {

View File

@ -17,6 +17,8 @@
package org.cybergarage.xml.parser;
import java.io.FilterInputStream;
import java.io.IOException;
import java.io.InputStream;
import javax.xml.parsers.DocumentBuilder;
@ -102,7 +104,7 @@ public class JaxpParser extends Parser
try {
DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();
DocumentBuilder builder = factory.newDocumentBuilder();
InputSource inSrc = new InputSource(inStream);
InputSource inSrc = new InputSource(new NullFilterInputStream(inStream));
Document doc = builder.parse(inSrc);
org.w3c.dom.Element docElem = doc.getDocumentElement();
@ -124,4 +126,27 @@ public class JaxpParser extends Parser
return root;
}
/**
* I2P -
* Filter out nulls, hopefully to avoid
* SAXParserException "Content not allowed in trailing section",
* which is apparently caused by nulls.
* Alternative is to remove all stuff between '>' and '<',
* which isn't so hard if we assume no CDATA.
*/
private static class NullFilterInputStream extends FilterInputStream {
public NullFilterInputStream(InputStream is) {
super(is);
}
@Override
public int read() throws IOException {
int rv;
while ((rv = super.read()) == 0) {
// try again
}
return rv;
}
}
}