propagate from branch 'i2p.i2p' (head e26fa9cbcc023c4c1d8bdc4eb8dbd4a964bb6148)
to branch 'i2p.i2p.zzz.upnp' (head b712f92f4dce03ce1f7d1b2ffc95b559b9b66140)
This commit is contained in:
92
router/java/nbproject/project.xml
Normal file
92
router/java/nbproject/project.xml
Normal file
@ -0,0 +1,92 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://www.netbeans.org/ns/project/1">
|
||||
<type>org.netbeans.modules.ant.freeform</type>
|
||||
<configuration>
|
||||
<general-data xmlns="http://www.netbeans.org/ns/freeform-project/1">
|
||||
<name>i2p_router</name>
|
||||
</general-data>
|
||||
<general-data xmlns="http://www.netbeans.org/ns/freeform-project/2">
|
||||
<!-- Do not use Project Properties customizer when editing this file manually. -->
|
||||
<name>i2p_router</name>
|
||||
<properties/>
|
||||
<folders>
|
||||
<source-folder>
|
||||
<label>src</label>
|
||||
<type>java</type>
|
||||
<location>src</location>
|
||||
<encoding>UTF-8</encoding>
|
||||
</source-folder>
|
||||
<source-folder>
|
||||
<label>test</label>
|
||||
<type>java</type>
|
||||
<location>test</location>
|
||||
<encoding>UTF-8</encoding>
|
||||
</source-folder>
|
||||
<source-folder>
|
||||
<label>i2p_router</label>
|
||||
<location>.</location>
|
||||
<encoding>UTF-8</encoding>
|
||||
</source-folder>
|
||||
</folders>
|
||||
<ide-actions>
|
||||
<action name="build">
|
||||
<target>build</target>
|
||||
</action>
|
||||
<action name="clean">
|
||||
<target>clean</target>
|
||||
</action>
|
||||
<action name="javadoc">
|
||||
<target>javadoc</target>
|
||||
</action>
|
||||
<action name="test">
|
||||
<target>test</target>
|
||||
</action>
|
||||
<action name="rebuild">
|
||||
<target>clean</target>
|
||||
<target>build</target>
|
||||
</action>
|
||||
</ide-actions>
|
||||
<export>
|
||||
<type>folder</type>
|
||||
<location>build/obj</location>
|
||||
<build-target>build</build-target>
|
||||
</export>
|
||||
<view>
|
||||
<items>
|
||||
<source-folder style="packages">
|
||||
<label>src</label>
|
||||
<location>src</location>
|
||||
</source-folder>
|
||||
<source-folder style="packages">
|
||||
<label>test</label>
|
||||
<location>test</location>
|
||||
</source-folder>
|
||||
<source-file>
|
||||
<location>build.xml</location>
|
||||
</source-file>
|
||||
</items>
|
||||
<context-menu>
|
||||
<ide-action name="build"/>
|
||||
<ide-action name="rebuild"/>
|
||||
<ide-action name="clean"/>
|
||||
<ide-action name="javadoc"/>
|
||||
<ide-action name="test"/>
|
||||
</context-menu>
|
||||
</view>
|
||||
</general-data>
|
||||
<java-data xmlns="http://www.netbeans.org/ns/freeform-project-java/2">
|
||||
<compilation-unit>
|
||||
<package-root>src</package-root>
|
||||
<classpath mode="compile">build/obj:../../core/java/build/i2p.jar</classpath>
|
||||
<built-to>build/obj</built-to>
|
||||
<javadoc-built-to>build/javadoc</javadoc-built-to>
|
||||
<source-level>1.5</source-level>
|
||||
</compilation-unit>
|
||||
<compilation-unit>
|
||||
<package-root>test</package-root>
|
||||
<unit-tests/>
|
||||
<source-level>1.5</source-level>
|
||||
</compilation-unit>
|
||||
</java-data>
|
||||
</configuration>
|
||||
</project>
|
@ -75,6 +75,11 @@ public class TunnelGatewayMessage extends I2NPMessageImpl {
|
||||
}
|
||||
DataHelper.toLong(out, curIndex, 2, _msgData.length);
|
||||
curIndex += 2;
|
||||
// where is this coming from?
|
||||
if (curIndex + _msgData.length > out.length) {
|
||||
_log.log(Log.ERROR, "output buffer too small idx: " + curIndex + " len: " + _msgData.length + " outlen: " + out.length);
|
||||
throw new I2NPMessageException("Too much data to write out (id=" + _tunnelId + " data=" + _msg + ")");
|
||||
}
|
||||
System.arraycopy(_msgData, 0, out, curIndex, _msgData.length);
|
||||
curIndex += _msgData.length;
|
||||
return curIndex;
|
||||
|
@ -754,37 +754,36 @@ public class Blocklist {
|
||||
// We already shitlisted in shitlist(peer), that's good enough
|
||||
}
|
||||
|
||||
/** write directly to the stream so we don't OOM on a huge list */
|
||||
public void renderStatusHTML(Writer out) throws IOException {
|
||||
StringBuffer buf = new StringBuffer(1024);
|
||||
buf.append("<h2>IP Blocklist</h2>");
|
||||
out.write("<h2>IP Blocklist</h2>");
|
||||
Set singles = new TreeSet();
|
||||
synchronized(_singleIPBlocklist) {
|
||||
singles.addAll(_singleIPBlocklist);
|
||||
}
|
||||
if (singles.size() > 0) {
|
||||
buf.append("<table><tr><td><b>Transient IPs</b></td></tr>");
|
||||
out.write("<table><tr><td><b>Transient IPs</b></td></tr>");
|
||||
for (Iterator iter = singles.iterator(); iter.hasNext(); ) {
|
||||
int ip = ((Integer) iter.next()).intValue();
|
||||
buf.append("<tr><td align=right>").append(toStr(ip)).append("</td></tr>\n");
|
||||
out.write("<tr><td align=right>"); out.write(toStr(ip)); out.write("</td></tr>\n");
|
||||
}
|
||||
buf.append("</table>");
|
||||
out.write("</table>");
|
||||
}
|
||||
if (_blocklistSize > 0) {
|
||||
buf.append("<table><tr><td align=center colspan=2><b>IPs from Blocklist File</b></td></tr><tr><td align=center><b>From</b></td><td align=center><b>To</b></td></tr>");
|
||||
out.write("<table><tr><td align=center colspan=2><b>IPs from Blocklist File</b></td></tr><tr><td align=center><b>From</b></td><td align=center><b>To</b></td></tr>");
|
||||
for (int i = 0; i < _blocklistSize; i++) {
|
||||
int from = getFrom(_blocklist[i]);
|
||||
buf.append("<tr><td align=right>").append(toStr(from)).append("</td><td align=right>");
|
||||
out.write("<tr><td align=right>"); out.write(toStr(from)); out.write("</td><td align=right>");
|
||||
int to = getTo(_blocklist[i]);
|
||||
if (to != from)
|
||||
buf.append(toStr(to)).append("</td></tr>\n");
|
||||
else
|
||||
buf.append(" </td></tr>\n");
|
||||
if (to != from) {
|
||||
out.write(toStr(to)); out.write("</td></tr>\n");
|
||||
} else
|
||||
out.write(" </td></tr>\n");
|
||||
}
|
||||
buf.append("</table>");
|
||||
out.write("</table>");
|
||||
} else {
|
||||
buf.append("<br>No blocklist file entries");
|
||||
out.write("<br>No blocklist file entries");
|
||||
}
|
||||
out.write(buf.toString());
|
||||
out.flush();
|
||||
}
|
||||
|
||||
|
@ -72,6 +72,7 @@ public abstract class ClientManagerFacade implements Service {
|
||||
public abstract void messageReceived(ClientMessage msg);
|
||||
|
||||
public boolean verifyClientLiveliness() { return true; }
|
||||
public boolean isAlive() { return true; }
|
||||
/**
|
||||
* Does the client specified want their leaseSet published?
|
||||
*/
|
||||
|
@ -10,6 +10,7 @@ package net.i2p.router;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Writer;
|
||||
import java.util.Set;
|
||||
|
||||
import net.i2p.data.Destination;
|
||||
import net.i2p.data.Hash;
|
||||
@ -44,6 +45,7 @@ class DummyTunnelManagerFacade implements TunnelManagerFacade {
|
||||
public void setInboundSettings(Hash client, TunnelPoolSettings settings) {}
|
||||
public void setOutboundSettings(Hash client, TunnelPoolSettings settings) {}
|
||||
public int getInboundBuildQueueSize() { return 0; }
|
||||
public Set<Hash> selectPeersInTooManyTunnels() { return null; }
|
||||
|
||||
public void renderStatusHTML(Writer out) throws IOException {}
|
||||
public void restart() {}
|
||||
|
@ -65,7 +65,6 @@ public class Router {
|
||||
private I2PThread.OOMEventListener _oomListener;
|
||||
private ShutdownHook _shutdownHook;
|
||||
private I2PThread _gracefulShutdownDetector;
|
||||
private Set _shutdownTasks;
|
||||
|
||||
public final static String PROP_CONFIG_FILE = "router.configLocation";
|
||||
|
||||
@ -91,8 +90,6 @@ public class Router {
|
||||
System.setProperty("sun.net.inetaddr.negative.ttl", DNS_CACHE_TIME);
|
||||
System.setProperty("networkaddress.cache.ttl", DNS_CACHE_TIME);
|
||||
System.setProperty("networkaddress.cache.negative.ttl", DNS_CACHE_TIME);
|
||||
// until we handle restricted routes and/or all peers support v6, try v4 first
|
||||
System.setProperty("java.net.preferIPv4Stack", "true");
|
||||
System.setProperty("http.agent", "I2P");
|
||||
// (no need for keepalive)
|
||||
System.setProperty("http.keepAlive", "false");
|
||||
@ -136,7 +133,9 @@ public class Router {
|
||||
envProps.setProperty(k, v);
|
||||
}
|
||||
}
|
||||
|
||||
// This doesn't work, guess it has to be in the static block above?
|
||||
// if (Boolean.valueOf(envProps.getProperty("router.disableIPv6")).booleanValue())
|
||||
// System.setProperty("java.net.preferIPv4Stack", "true");
|
||||
|
||||
_context = new RouterContext(this, envProps);
|
||||
_routerInfo = null;
|
||||
@ -171,7 +170,6 @@ public class Router {
|
||||
watchdog.setDaemon(true);
|
||||
watchdog.start();
|
||||
|
||||
_shutdownTasks = new HashSet(0);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -446,13 +444,14 @@ public class Router {
|
||||
*/
|
||||
private static final String _rebuildFiles[] = new String[] { "router.info",
|
||||
"router.keys",
|
||||
"netDb/my.info",
|
||||
"connectionTag.keys",
|
||||
"netDb/my.info", // no longer used
|
||||
"connectionTag.keys", // never used?
|
||||
"keyBackup/privateEncryption.key",
|
||||
"keyBackup/privateSigning.key",
|
||||
"keyBackup/publicEncryption.key",
|
||||
"keyBackup/publicSigning.key",
|
||||
"sessionKeys.dat" };
|
||||
"sessionKeys.dat" // no longer used
|
||||
};
|
||||
|
||||
static final String IDENTLOG = "identlog.txt";
|
||||
public static void killKeys() {
|
||||
@ -490,13 +489,12 @@ public class Router {
|
||||
*/
|
||||
public void rebuildNewIdentity() {
|
||||
killKeys();
|
||||
try {
|
||||
for (Iterator iter = _shutdownTasks.iterator(); iter.hasNext(); ) {
|
||||
Runnable task = (Runnable)iter.next();
|
||||
for (Runnable task : _context.getShutdownTasks()) {
|
||||
try {
|
||||
task.run();
|
||||
} catch (Throwable t) {
|
||||
_log.log(Log.CRIT, "Error running shutdown task", t);
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
_log.log(Log.CRIT, "Error running shutdown task", t);
|
||||
}
|
||||
// hard and ugly
|
||||
finalShutdown(EXIT_HARD_RESTART);
|
||||
@ -781,12 +779,6 @@ public class Router {
|
||||
buf.setLength(0);
|
||||
}
|
||||
|
||||
public void addShutdownTask(Runnable task) {
|
||||
synchronized (_shutdownTasks) {
|
||||
_shutdownTasks.add(task);
|
||||
}
|
||||
}
|
||||
|
||||
public static final int EXIT_GRACEFUL = 2;
|
||||
public static final int EXIT_HARD = 3;
|
||||
public static final int EXIT_OOM = 10;
|
||||
@ -799,13 +791,12 @@ public class Router {
|
||||
I2PThread.removeOOMEventListener(_oomListener);
|
||||
// Run the shutdown hooks first in case they want to send some goodbye messages
|
||||
// Maybe we need a delay after this too?
|
||||
try {
|
||||
for (Iterator iter = _shutdownTasks.iterator(); iter.hasNext(); ) {
|
||||
Runnable task = (Runnable)iter.next();
|
||||
for (Runnable task : _context.getShutdownTasks()) {
|
||||
try {
|
||||
task.run();
|
||||
} catch (Throwable t) {
|
||||
_log.log(Log.CRIT, "Error running shutdown task", t);
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
_log.log(Log.CRIT, "Error running shutdown task", t);
|
||||
}
|
||||
try { _context.clientManager().shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the client manager", t); }
|
||||
try { _context.jobQueue().shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the job queue", t); }
|
||||
@ -859,6 +850,10 @@ public class Router {
|
||||
public void shutdownGracefully() {
|
||||
shutdownGracefully(EXIT_GRACEFUL);
|
||||
}
|
||||
/**
|
||||
* Call this with EXIT_HARD or EXIT_HARD_RESTART for a non-blocking,
|
||||
* hard, non-graceful shutdown with a brief delay to allow a UI response
|
||||
*/
|
||||
public void shutdownGracefully(int exitCode) {
|
||||
_gracefulExitCode = exitCode;
|
||||
_config.setProperty(PROP_SHUTDOWN_IN_PROGRESS, "true");
|
||||
@ -887,7 +882,9 @@ public class Router {
|
||||
}
|
||||
/** How long until the graceful shutdown will kill us? */
|
||||
public long getShutdownTimeRemaining() {
|
||||
if (_gracefulExitCode <= 0) return -1;
|
||||
if (_gracefulExitCode <= 0) return -1; // maybe Long.MAX_VALUE would be better?
|
||||
if (_gracefulExitCode == EXIT_HARD || _gracefulExitCode == EXIT_HARD_RESTART)
|
||||
return 0;
|
||||
long exp = _context.tunnelManager().getLastParticipatingExpiration();
|
||||
if (exp < 0)
|
||||
return -1;
|
||||
@ -906,9 +903,20 @@ public class Router {
|
||||
while (true) {
|
||||
boolean shutdown = (null != _config.getProperty(PROP_SHUTDOWN_IN_PROGRESS));
|
||||
if (shutdown) {
|
||||
if (_context.tunnelManager().getParticipatingCount() <= 0) {
|
||||
if (_log.shouldLog(Log.CRIT))
|
||||
if (_gracefulExitCode == EXIT_HARD || _gracefulExitCode == EXIT_HARD_RESTART ||
|
||||
_context.tunnelManager().getParticipatingCount() <= 0) {
|
||||
if (_gracefulExitCode == EXIT_HARD)
|
||||
_log.log(Log.CRIT, "Shutting down after a brief delay");
|
||||
else if (_gracefulExitCode == EXIT_HARD_RESTART)
|
||||
_log.log(Log.CRIT, "Restarting after a brief delay");
|
||||
else
|
||||
_log.log(Log.CRIT, "Graceful shutdown progress - no more tunnels, safe to die");
|
||||
// Allow time for a UI reponse
|
||||
try {
|
||||
synchronized (Thread.currentThread()) {
|
||||
Thread.currentThread().wait(2*1000);
|
||||
}
|
||||
} catch (InterruptedException ie) {}
|
||||
shutdown(_gracefulExitCode);
|
||||
return;
|
||||
} else {
|
||||
@ -1198,13 +1206,13 @@ public class Router {
|
||||
return Math.max(send, recv);
|
||||
}
|
||||
|
||||
}
|
||||
/* following classes are now private static inner classes, didn't bother to reindent */
|
||||
|
||||
/**
|
||||
* coalesce the stats framework every minute
|
||||
*
|
||||
*/
|
||||
class CoalesceStatsEvent implements SimpleTimer.TimedEvent {
|
||||
private static class CoalesceStatsEvent implements SimpleTimer.TimedEvent {
|
||||
private RouterContext _ctx;
|
||||
public CoalesceStatsEvent(RouterContext ctx) {
|
||||
_ctx = ctx;
|
||||
@ -1270,7 +1278,7 @@ class CoalesceStatsEvent implements SimpleTimer.TimedEvent {
|
||||
* This is done here because we want to make sure the key is updated before anyone
|
||||
* uses it.
|
||||
*/
|
||||
class UpdateRoutingKeyModifierJob extends JobImpl {
|
||||
private static class UpdateRoutingKeyModifierJob extends JobImpl {
|
||||
private Log _log;
|
||||
private Calendar _cal = new GregorianCalendar(TimeZone.getTimeZone("GMT"));
|
||||
public UpdateRoutingKeyModifierJob(RouterContext ctx) {
|
||||
@ -1302,7 +1310,7 @@ class UpdateRoutingKeyModifierJob extends JobImpl {
|
||||
}
|
||||
}
|
||||
|
||||
class MarkLiveliness implements Runnable {
|
||||
private static class MarkLiveliness implements Runnable {
|
||||
private RouterContext _context;
|
||||
private Router _router;
|
||||
private File _pingFile;
|
||||
@ -1334,7 +1342,7 @@ class MarkLiveliness implements Runnable {
|
||||
}
|
||||
}
|
||||
|
||||
class ShutdownHook extends Thread {
|
||||
private static class ShutdownHook extends Thread {
|
||||
private RouterContext _context;
|
||||
private static int __id = 0;
|
||||
private int _id;
|
||||
@ -1351,7 +1359,7 @@ class ShutdownHook extends Thread {
|
||||
}
|
||||
|
||||
/** update the router.info file whenever its, er, updated */
|
||||
class PersistRouterInfoJob extends JobImpl {
|
||||
private static class PersistRouterInfoJob extends JobImpl {
|
||||
private Log _log;
|
||||
public PersistRouterInfoJob(RouterContext ctx) {
|
||||
super(ctx);
|
||||
@ -1381,3 +1389,5 @@ class PersistRouterInfoJob extends JobImpl {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -12,13 +12,10 @@ import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseFacade;
|
||||
import net.i2p.router.peermanager.Calculator;
|
||||
import net.i2p.router.peermanager.CapacityCalculator;
|
||||
import net.i2p.router.peermanager.IntegrationCalculator;
|
||||
import net.i2p.router.peermanager.IsFailingCalculator;
|
||||
import net.i2p.router.peermanager.PeerManagerFacadeImpl;
|
||||
import net.i2p.router.peermanager.ProfileManagerImpl;
|
||||
import net.i2p.router.peermanager.ProfileOrganizer;
|
||||
import net.i2p.router.peermanager.ReliabilityCalculator;
|
||||
import net.i2p.router.peermanager.SpeedCalculator;
|
||||
import net.i2p.router.peermanager.StrictSpeedCalculator;
|
||||
import net.i2p.router.transport.CommSystemFacadeImpl;
|
||||
import net.i2p.router.transport.FIFOBandwidthLimiter;
|
||||
import net.i2p.router.transport.OutboundMessageRegistry;
|
||||
@ -62,12 +59,9 @@ public class RouterContext extends I2PAppContext {
|
||||
private MessageStateMonitor _messageStateMonitor;
|
||||
private RouterThrottle _throttle;
|
||||
private RouterClock _clock;
|
||||
private Calculator _isFailingCalc;
|
||||
private Calculator _integrationCalc;
|
||||
private Calculator _speedCalc;
|
||||
private Calculator _reliabilityCalc;
|
||||
private Calculator _capacityCalc;
|
||||
private Calculator _oldSpeedCalc;
|
||||
|
||||
|
||||
private static List _contexts = new ArrayList(1);
|
||||
@ -132,11 +126,8 @@ public class RouterContext extends I2PAppContext {
|
||||
_messageValidator = new MessageValidator(this);
|
||||
//_throttle = new RouterThrottleImpl(this);
|
||||
_throttle = new RouterDoSThrottle(this);
|
||||
_isFailingCalc = new IsFailingCalculator(this);
|
||||
_integrationCalc = new IntegrationCalculator(this);
|
||||
_speedCalc = new SpeedCalculator(this);
|
||||
_oldSpeedCalc = new StrictSpeedCalculator(this);
|
||||
_reliabilityCalc = new ReliabilityCalculator(this);
|
||||
_capacityCalc = new CapacityCalculator(this);
|
||||
}
|
||||
|
||||
@ -264,15 +255,10 @@ public class RouterContext extends I2PAppContext {
|
||||
*/
|
||||
public RouterThrottle throttle() { return _throttle; }
|
||||
|
||||
/** how do we rank the failure of profiles? */
|
||||
public Calculator isFailingCalculator() { return _isFailingCalc; }
|
||||
/** how do we rank the integration of profiles? */
|
||||
public Calculator integrationCalculator() { return _integrationCalc; }
|
||||
/** how do we rank the speed of profiles? */
|
||||
public Calculator speedCalculator() { return _speedCalc; }
|
||||
public Calculator oldSpeedCalculator() { return _oldSpeedCalc; }
|
||||
/** how do we rank the reliability of profiles? */
|
||||
public Calculator reliabilityCalculator() { return _reliabilityCalc; }
|
||||
/** how do we rank the capacity of profiles? */
|
||||
public Calculator capacityCalculator() { return _capacityCalc; }
|
||||
|
||||
@ -298,10 +284,8 @@ public class RouterContext extends I2PAppContext {
|
||||
buf.append(_statPublisher).append('\n');
|
||||
buf.append(_shitlist).append('\n');
|
||||
buf.append(_messageValidator).append('\n');
|
||||
buf.append(_isFailingCalc).append('\n');
|
||||
buf.append(_integrationCalc).append('\n');
|
||||
buf.append(_speedCalc).append('\n');
|
||||
buf.append(_reliabilityCalc).append('\n');
|
||||
return buf.toString();
|
||||
}
|
||||
|
||||
|
@ -17,7 +17,7 @@ import net.i2p.CoreVersion;
|
||||
public class RouterVersion {
|
||||
public final static String ID = "$Revision: 1.548 $ $Date: 2008-06-07 23:00:00 $";
|
||||
public final static String VERSION = CoreVersion.VERSION;
|
||||
public final static long BUILD = 5;
|
||||
public final static long BUILD = 21;
|
||||
public static void main(String args[]) {
|
||||
System.out.println("I2P Router version: " + VERSION + "-" + BUILD);
|
||||
System.out.println("Router ID: " + RouterVersion.ID);
|
||||
|
@ -29,12 +29,9 @@ public class StatisticsManager implements Service {
|
||||
private Log _log;
|
||||
private RouterContext _context;
|
||||
private boolean _includePeerRankings;
|
||||
private int _publishedStats;
|
||||
|
||||
public final static String PROP_PUBLISH_RANKINGS = "router.publishPeerRankings";
|
||||
public final static String DEFAULT_PROP_PUBLISH_RANKINGS = "true";
|
||||
public final static String PROP_MAX_PUBLISHED_PEERS = "router.publishPeerMax";
|
||||
public final static int DEFAULT_MAX_PUBLISHED_PEERS = 10;
|
||||
|
||||
private final DecimalFormat _fmt;
|
||||
private final DecimalFormat _pct;
|
||||
@ -52,45 +49,12 @@ public class StatisticsManager implements Service {
|
||||
startup();
|
||||
}
|
||||
public void startup() {
|
||||
String val = _context.router().getConfigSetting(PROP_PUBLISH_RANKINGS);
|
||||
try {
|
||||
if (val == null) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Peer publishing setting " + PROP_PUBLISH_RANKINGS
|
||||
+ " not set - using default " + DEFAULT_PROP_PUBLISH_RANKINGS);
|
||||
val = DEFAULT_PROP_PUBLISH_RANKINGS;
|
||||
} else {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Peer publishing setting " + PROP_PUBLISH_RANKINGS
|
||||
+ " set to " + val);
|
||||
}
|
||||
boolean v = Boolean.TRUE.toString().equalsIgnoreCase(val);
|
||||
_includePeerRankings = v;
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Setting includePeerRankings = " + v);
|
||||
} catch (Throwable t) {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Error determining whether to publish rankings ["
|
||||
+ PROP_PUBLISH_RANKINGS + "=" + val
|
||||
+ "], so we're defaulting to FALSE");
|
||||
_includePeerRankings = false;
|
||||
}
|
||||
val = _context.router().getConfigSetting(PROP_MAX_PUBLISHED_PEERS);
|
||||
if (val == null) {
|
||||
_publishedStats = DEFAULT_MAX_PUBLISHED_PEERS;
|
||||
} else {
|
||||
try {
|
||||
int num = Integer.parseInt(val);
|
||||
_publishedStats = num;
|
||||
} catch (NumberFormatException nfe) {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Invalid max number of peers to publish [" + val
|
||||
+ "], defaulting to " + DEFAULT_MAX_PUBLISHED_PEERS, nfe);
|
||||
_publishedStats = DEFAULT_MAX_PUBLISHED_PEERS;
|
||||
}
|
||||
}
|
||||
String val = _context.getProperty(PROP_PUBLISH_RANKINGS, DEFAULT_PROP_PUBLISH_RANKINGS);
|
||||
_includePeerRankings = Boolean.valueOf(val);
|
||||
}
|
||||
|
||||
static final boolean CommentOutIn072 = RouterVersion.VERSION.equals("0.7.1");
|
||||
|
||||
/** Retrieve a snapshot of the statistics that should be published */
|
||||
public Properties publishStatistics() {
|
||||
Properties stats = new Properties();
|
||||
@ -124,9 +88,6 @@ public class StatisticsManager implements Service {
|
||||
***/
|
||||
|
||||
if (_includePeerRankings) {
|
||||
if (false)
|
||||
stats.putAll(_context.profileManager().summarizePeers(_publishedStats));
|
||||
|
||||
long publishedUptime = _context.router().getUptime();
|
||||
// Don't publish these for first hour
|
||||
if (publishedUptime > 60*60*1000)
|
||||
@ -172,12 +133,16 @@ public class StatisticsManager implements Service {
|
||||
//includeRate("stream.con.sendDuplicateSize", stats, new long[] { 60*60*1000 });
|
||||
//includeRate("stream.con.receiveDuplicateSize", stats, new long[] { 60*60*1000 });
|
||||
|
||||
// Round smaller uptimes to 1 hour, to frustrate uptime tracking
|
||||
// Round 2nd hour to 90m since peers use 2h minimum to route
|
||||
if (publishedUptime < 60*60*1000) publishedUptime = 60*60*1000;
|
||||
else if (publishedUptime < 2*60*60*1000) publishedUptime = 90*60*1000;
|
||||
|
||||
stats.setProperty("stat_uptime", DataHelper.formatDuration(publishedUptime));
|
||||
if (CommentOutIn072) {
|
||||
// Round smaller uptimes to 1 hour, to frustrate uptime tracking
|
||||
// Round 2nd hour to 90m since peers use 2h minimum to route
|
||||
if (publishedUptime < 60*60*1000) publishedUptime = 60*60*1000;
|
||||
else if (publishedUptime < 2*60*60*1000) publishedUptime = 90*60*1000;
|
||||
stats.setProperty("stat_uptime", DataHelper.formatDuration(publishedUptime));
|
||||
} else {
|
||||
// So that we will still get build requests
|
||||
stats.setProperty("stat_uptime", "90m");
|
||||
}
|
||||
//stats.setProperty("stat__rateKey", "avg;maxAvg;pctLifetime;[sat;satLim;maxSat;maxSatLim;][num;lifetimeFreq;maxFreq]");
|
||||
|
||||
//includeRate("tunnel.decryptRequestTime", stats, new long[] { 60*1000, 10*60*1000 });
|
||||
@ -185,12 +150,13 @@ public class StatisticsManager implements Service {
|
||||
//includeRate("udp.packetVerifyTime", stats, new long[] { 60*1000 });
|
||||
|
||||
//includeRate("tunnel.buildRequestTime", stats, new long[] { 10*60*1000 });
|
||||
includeRate("tunnel.buildClientExpire", stats, new long[] { 10*60*1000 });
|
||||
includeRate("tunnel.buildClientReject", stats, new long[] { 10*60*1000 });
|
||||
includeRate("tunnel.buildClientSuccess", stats, new long[] { 10*60*1000 });
|
||||
includeRate("tunnel.buildExploratoryExpire", stats, new long[] { 10*60*1000 });
|
||||
includeRate("tunnel.buildExploratoryReject", stats, new long[] { 10*60*1000 });
|
||||
includeRate("tunnel.buildExploratorySuccess", stats, new long[] { 10*60*1000 });
|
||||
long rate = CommentOutIn072 ? 10*60*1000 : 60*60*1000;
|
||||
includeRate("tunnel.buildClientExpire", stats, new long[] { rate });
|
||||
includeRate("tunnel.buildClientReject", stats, new long[] { rate });
|
||||
includeRate("tunnel.buildClientSuccess", stats, new long[] { rate });
|
||||
includeRate("tunnel.buildExploratoryExpire", stats, new long[] { rate });
|
||||
includeRate("tunnel.buildExploratoryReject", stats, new long[] { rate });
|
||||
includeRate("tunnel.buildExploratorySuccess", stats, new long[] { rate });
|
||||
//includeRate("tunnel.rejectTimeout", stats, new long[] { 10*60*1000 });
|
||||
//includeRate("tunnel.rejectOverloaded", stats, new long[] { 10*60*1000 });
|
||||
//includeRate("tunnel.acceptLoad", stats, new long[] { 10*60*1000 });
|
||||
|
@ -10,6 +10,7 @@ package net.i2p.router;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Writer;
|
||||
import java.util.Set;
|
||||
|
||||
import net.i2p.data.Destination;
|
||||
import net.i2p.data.Hash;
|
||||
@ -62,6 +63,9 @@ public interface TunnelManagerFacade extends Service {
|
||||
/** count how many inbound tunnel requests we have received but not yet processed */
|
||||
public int getInboundBuildQueueSize();
|
||||
|
||||
/** @return Set of peers that should not be allowed to be in another tunnel */
|
||||
public Set<Hash> selectPeersInTooManyTunnels();
|
||||
|
||||
/**
|
||||
* the client connected (or updated their settings), so make sure we have
|
||||
* the tunnels for them, and whenever necessary, ask them to authorize
|
||||
|
@ -31,7 +31,7 @@ public class ClientListenerRunner implements Runnable {
|
||||
private int _port;
|
||||
private boolean _bindAllInterfaces;
|
||||
private boolean _running;
|
||||
private long _nextFailDelay = 1000;
|
||||
private boolean _listening;
|
||||
|
||||
public static final String BIND_ALL_INTERFACES = "i2cp.tcp.bindAllInterfaces";
|
||||
|
||||
@ -41,6 +41,7 @@ public class ClientListenerRunner implements Runnable {
|
||||
_manager = manager;
|
||||
_port = port;
|
||||
_running = false;
|
||||
_listening = false;
|
||||
|
||||
String val = context.getProperty(BIND_ALL_INTERFACES, "False");
|
||||
_bindAllInterfaces = Boolean.valueOf(val).booleanValue();
|
||||
@ -48,6 +49,7 @@ public class ClientListenerRunner implements Runnable {
|
||||
|
||||
public void setPort(int port) { _port = port; }
|
||||
public int getPort() { return _port; }
|
||||
public boolean isListening() { return _running && _listening; }
|
||||
|
||||
/**
|
||||
* Start up the socket listener, listens for connections, and
|
||||
@ -58,7 +60,7 @@ public class ClientListenerRunner implements Runnable {
|
||||
*/
|
||||
public void runServer() {
|
||||
_running = true;
|
||||
int curDelay = 0;
|
||||
int curDelay = 1000;
|
||||
while (_running) {
|
||||
try {
|
||||
if (_bindAllInterfaces) {
|
||||
@ -77,7 +79,8 @@ public class ClientListenerRunner implements Runnable {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("ServerSocket created, before accept: " + _socket);
|
||||
|
||||
curDelay = 0;
|
||||
curDelay = 1000;
|
||||
_listening = true;
|
||||
while (_running) {
|
||||
try {
|
||||
Socket socket = _socket.accept();
|
||||
@ -96,6 +99,7 @@ public class ClientListenerRunner implements Runnable {
|
||||
} catch (Throwable t) {
|
||||
if (_context.router().isAlive())
|
||||
_log.error("Fatal error running client listener - killing the thread!", t);
|
||||
_listening = false;
|
||||
return;
|
||||
}
|
||||
}
|
||||
@ -104,6 +108,7 @@ public class ClientListenerRunner implements Runnable {
|
||||
_log.error("Error listening on port " + _port, ioe);
|
||||
}
|
||||
|
||||
_listening = false;
|
||||
if (_socket != null) {
|
||||
try { _socket.close(); } catch (IOException ioe) {}
|
||||
_socket = null;
|
||||
@ -111,14 +116,16 @@ public class ClientListenerRunner implements Runnable {
|
||||
|
||||
if (!_context.router().isAlive()) break;
|
||||
|
||||
_log.error("Error listening, waiting " + _nextFailDelay + "ms before we try again");
|
||||
try { Thread.sleep(_nextFailDelay); } catch (InterruptedException ie) {}
|
||||
curDelay += _nextFailDelay;
|
||||
_nextFailDelay *= 5;
|
||||
if (curDelay < 60*1000)
|
||||
_log.error("Error listening, waiting " + (curDelay/1000) + "s before we try again");
|
||||
else
|
||||
_log.log(Log.CRIT, "I2CP error listening to port " + _port + " - is another I2P instance running? Resolve conflicts and restart");
|
||||
try { Thread.sleep(curDelay); } catch (InterruptedException ie) {}
|
||||
curDelay = Math.min(curDelay*3, 60*1000);
|
||||
}
|
||||
|
||||
if (_context.router().isAlive())
|
||||
_log.error("CANCELING I2CP LISTEN. delay = " + curDelay, new Exception("I2CP Listen cancelled!!!"));
|
||||
_log.error("CANCELING I2CP LISTEN", new Exception("I2CP Listen cancelled!!!"));
|
||||
_running = false;
|
||||
}
|
||||
|
||||
|
@ -108,6 +108,8 @@ public class ClientManager {
|
||||
}
|
||||
}
|
||||
|
||||
public boolean isAlive() { return _listener.isListening(); }
|
||||
|
||||
public void registerConnection(ClientConnectionRunner runner) {
|
||||
synchronized (_pendingRunners) {
|
||||
_pendingRunners.add(runner);
|
||||
|
@ -74,6 +74,8 @@ public class ClientManagerFacadeImpl extends ClientManagerFacade {
|
||||
startup();
|
||||
}
|
||||
|
||||
public boolean isAlive() { return _manager != null && _manager.isAlive(); }
|
||||
|
||||
private static final long MAX_TIME_TO_REBUILD = 10*60*1000;
|
||||
public boolean verifyClientLiveliness() {
|
||||
if (_manager == null) return true;
|
||||
|
@ -11,10 +11,12 @@ package net.i2p.router.client;
|
||||
import java.util.Properties;
|
||||
|
||||
import net.i2p.data.Payload;
|
||||
import net.i2p.data.i2cp.BandwidthLimitsMessage;
|
||||
import net.i2p.data.i2cp.CreateLeaseSetMessage;
|
||||
import net.i2p.data.i2cp.CreateSessionMessage;
|
||||
import net.i2p.data.i2cp.DestLookupMessage;
|
||||
import net.i2p.data.i2cp.DestroySessionMessage;
|
||||
import net.i2p.data.i2cp.GetBandwidthLimitsMessage;
|
||||
import net.i2p.data.i2cp.GetDateMessage;
|
||||
import net.i2p.data.i2cp.I2CPMessage;
|
||||
import net.i2p.data.i2cp.I2CPMessageException;
|
||||
@ -93,6 +95,9 @@ class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventLi
|
||||
case ReconfigureSessionMessage.MESSAGE_TYPE:
|
||||
handleReconfigureSession(reader, (ReconfigureSessionMessage)message);
|
||||
break;
|
||||
case GetBandwidthLimitsMessage.MESSAGE_TYPE:
|
||||
handleGetBWLimits(reader, (GetBandwidthLimitsMessage)message);
|
||||
break;
|
||||
default:
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Unhandled I2CP type received: " + message.getType());
|
||||
@ -274,6 +279,24 @@ class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventLi
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Divide router limit by 1.75 for overhead.
|
||||
* This could someday give a different answer to each client.
|
||||
* But it's not enforced anywhere.
|
||||
*/
|
||||
private void handleGetBWLimits(I2CPMessageReader reader, GetBandwidthLimitsMessage message) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Got BW Limits request");
|
||||
int in = _context.bandwidthLimiter().getInboundKBytesPerSecond() * 4 / 7;
|
||||
int out = _context.bandwidthLimiter().getOutboundKBytesPerSecond() * 4 / 7;
|
||||
BandwidthLimitsMessage msg = new BandwidthLimitsMessage(in, out);
|
||||
try {
|
||||
_runner.doSend(msg);
|
||||
} catch (I2CPMessageException ime) {
|
||||
_log.error("Error writing out the session status message", ime);
|
||||
}
|
||||
}
|
||||
|
||||
// this *should* be mod 65536, but UnsignedInteger is still b0rked. FIXME
|
||||
private final static int MAX_SESSION_ID = 32767;
|
||||
|
||||
|
@ -34,6 +34,8 @@ import net.i2p.router.Router;
|
||||
import net.i2p.router.RouterContext;
|
||||
import net.i2p.router.TunnelInfo;
|
||||
import net.i2p.util.Log;
|
||||
import net.i2p.util.SimpleScheduler;
|
||||
import net.i2p.util.SimpleTimer;
|
||||
|
||||
/**
|
||||
* Send a client message out a random outbound tunnel and into a random inbound
|
||||
@ -98,6 +100,11 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
*/
|
||||
private static final int BUNDLE_PROBABILITY_DEFAULT = 100;
|
||||
|
||||
private static final Object _initializeLock = new Object();
|
||||
private static boolean _initialized = false;
|
||||
private static final int CLEAN_INTERVAL = 5*60*1000;
|
||||
private static final int REPLY_REQUEST_INTERVAL = 60*1000;
|
||||
|
||||
/**
|
||||
* Send the sucker
|
||||
*/
|
||||
@ -105,20 +112,26 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
super(ctx);
|
||||
_log = ctx.logManager().getLog(OutboundClientMessageOneShotJob.class);
|
||||
|
||||
ctx.statManager().createFrequencyStat("client.sendMessageFailFrequency", "How often does a client fail to send a message?", "ClientMessages", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.sendMessageSize", "How large are messages sent by the client?", "ClientMessages", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.sendAckTime", "Message round trip time", "ClientMessages", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.timeoutCongestionTunnel", "How lagged our tunnels are when a send times out?", "ClientMessages", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.timeoutCongestionMessage", "How fast we process messages locally when a send times out?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.timeoutCongestionInbound", "How much faster we are receiving data than our average bps when a send times out?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.leaseSetFoundLocally", "How often we tried to look for a leaseSet and found it locally?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.leaseSetFoundRemoteTime", "How long we tried to look for a remote leaseSet (when we succeeded)?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.leaseSetFailedRemoteTime", "How long we tried to look for a remote leaseSet (when we failed)?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.dispatchPrepareTime", "How long until we've queued up the dispatch job (since we started)?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.dispatchTime", "How long until we've dispatched the message (since we started)?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.dispatchSendTime", "How long the actual dispatching takes?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.dispatchNoTunnels", "How long after start do we run out of tunnels to send/receive with?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.dispatchNoACK", "Repeated message sends to a peer (no ack required)", "ClientMessages", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l });
|
||||
synchronized (_initializeLock) {
|
||||
if (!_initialized) {
|
||||
SimpleScheduler.getInstance().addPeriodicEvent(new OCMOSJCacheCleaner(ctx), CLEAN_INTERVAL, CLEAN_INTERVAL);
|
||||
ctx.statManager().createFrequencyStat("client.sendMessageFailFrequency", "How often does a client fail to send a message?", "ClientMessages", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.sendMessageSize", "How large are messages sent by the client?", "ClientMessages", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.sendAckTime", "Message round trip time", "ClientMessages", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.timeoutCongestionTunnel", "How lagged our tunnels are when a send times out?", "ClientMessages", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.timeoutCongestionMessage", "How fast we process messages locally when a send times out?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.timeoutCongestionInbound", "How much faster we are receiving data than our average bps when a send times out?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.leaseSetFoundLocally", "How often we tried to look for a leaseSet and found it locally?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.leaseSetFoundRemoteTime", "How long we tried to look for a remote leaseSet (when we succeeded)?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.leaseSetFailedRemoteTime", "How long we tried to look for a remote leaseSet (when we failed)?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.dispatchPrepareTime", "How long until we've queued up the dispatch job (since we started)?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.dispatchTime", "How long until we've dispatched the message (since we started)?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.dispatchSendTime", "How long the actual dispatching takes?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.dispatchNoTunnels", "How long after start do we run out of tunnels to send/receive with?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.dispatchNoACK", "Repeated message sends to a peer (no ack required)", "ClientMessages", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l });
|
||||
_initialized = true;
|
||||
}
|
||||
}
|
||||
long timeoutMs = OVERALL_TIMEOUT_MS_DEFAULT;
|
||||
_clientMessage = msg;
|
||||
_clientMessageId = msg.getMessageId();
|
||||
@ -200,8 +213,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
*
|
||||
* Key the cache on the source+dest pair.
|
||||
*/
|
||||
private static HashMap _leaseSetCache = new HashMap();
|
||||
private static long _lscleanTime = 0;
|
||||
private static HashMap<String, LeaseSet> _leaseSetCache = new HashMap();
|
||||
private LeaseSet getReplyLeaseSet(boolean force) {
|
||||
LeaseSet newLS = getContext().netDb().lookupLeaseSetLocally(_from.calculateHash());
|
||||
if (newLS == null)
|
||||
@ -235,12 +247,8 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
// If the last leaseSet we sent him is still good, don't bother sending again
|
||||
long now = getContext().clock().now();
|
||||
synchronized (_leaseSetCache) {
|
||||
if (now - _lscleanTime > 5*60*1000) { // clean out periodically
|
||||
cleanLeaseSetCache(_leaseSetCache);
|
||||
_lscleanTime = now;
|
||||
}
|
||||
if (!force) {
|
||||
LeaseSet ls = (LeaseSet) _leaseSetCache.get(hashPair());
|
||||
LeaseSet ls = _leaseSetCache.get(hashPair());
|
||||
if (ls != null) {
|
||||
if (ls.equals(newLS)) {
|
||||
// still good, send it 10% of the time
|
||||
@ -305,8 +313,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
* lease).
|
||||
*
|
||||
*/
|
||||
private static HashMap _leaseCache = new HashMap();
|
||||
private static long _lcleanTime = 0;
|
||||
private static HashMap<String, Lease> _leaseCache = new HashMap();
|
||||
private boolean getNextLease() {
|
||||
_leaseSet = getContext().netDb().lookupLeaseSetLocally(_to.calculateHash());
|
||||
if (_leaseSet == null) {
|
||||
@ -319,11 +326,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
// Use the same lease if it's still good
|
||||
// Even if _leaseSet changed, _leaseSet.getEncryptionKey() didn't...
|
||||
synchronized (_leaseCache) {
|
||||
if (now - _lcleanTime > 5*60*1000) { // clean out periodically
|
||||
cleanLeaseCache(_leaseCache);
|
||||
_lcleanTime = now;
|
||||
}
|
||||
_lease = (Lease) _leaseCache.get(hashPair());
|
||||
_lease = _leaseCache.get(hashPair());
|
||||
if (_lease != null) {
|
||||
// if outbound tunnel length == 0 && lease.firsthop.isBacklogged() don't use it ??
|
||||
if (!_lease.isExpired(Router.CLOCK_FUDGE_FACTOR)) {
|
||||
@ -444,6 +447,14 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* This cache is used to ensure that we request a reply every so often.
|
||||
* Hopefully this allows the router to recognize a failed tunnel and switch,
|
||||
* before upper layers like streaming lib fail, even for low-bandwidth
|
||||
* connections like IRC.
|
||||
*/
|
||||
private static HashMap<String, Long> _lastReplyRequestCache = new HashMap();
|
||||
|
||||
/**
|
||||
* Send the message to the specified tunnel by creating a new garlic message containing
|
||||
* the (already created) payload clove as well as a new delivery status message. This garlic
|
||||
@ -454,18 +465,27 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
*/
|
||||
private void send() {
|
||||
if (_finished) return;
|
||||
if (getContext().clock().now() >= _overallExpiration) {
|
||||
long now = getContext().clock().now();
|
||||
if (now >= _overallExpiration) {
|
||||
dieFatal();
|
||||
return;
|
||||
}
|
||||
|
||||
int existingTags = GarlicMessageBuilder.estimateAvailableTags(getContext(), _leaseSet.getEncryptionKey());
|
||||
_outTunnel = selectOutboundTunnel(_to);
|
||||
// boolean wantACK = _wantACK || existingTags <= 30 || getContext().random().nextInt(100) < 5;
|
||||
// what's the point of 5% random? possible improvements or replacements:
|
||||
// - wantACK if we changed their inbound lease (getNextLease() sets _wantACK)
|
||||
// - wantACK if we changed our outbound tunnel (selectOutboundTunnel() sets _wantACK)
|
||||
// - wantACK if we haven't in last 1m (requires a new static cache probably)
|
||||
boolean wantACK = _wantACK || existingTags <= 30 || getContext().random().nextInt(100) < 5;
|
||||
// DONE (getNextLease() is called before this): wantACK if we changed their inbound lease (getNextLease() sets _wantACK)
|
||||
// DONE (selectOutboundTunnel() moved above here): wantACK if we changed our outbound tunnel (selectOutboundTunnel() sets _wantACK)
|
||||
// DONE (added new cache): wantACK if we haven't in last 1m (requires a new static cache probably)
|
||||
boolean wantACK;
|
||||
synchronized (_lastReplyRequestCache) {
|
||||
Long lastSent = _lastReplyRequestCache.get(hashPair());
|
||||
wantACK = _wantACK || existingTags <= 30 ||
|
||||
lastSent == null || lastSent.longValue() < now - REPLY_REQUEST_INTERVAL;
|
||||
if (wantACK)
|
||||
_lastReplyRequestCache.put(hashPair(), Long.valueOf(now));
|
||||
}
|
||||
|
||||
PublicKey key = _leaseSet.getEncryptionKey();
|
||||
SessionKey sessKey = new SessionKey();
|
||||
@ -499,7 +519,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
// we dont receive the reply? hmm...)
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn(getJobId() + ": Unable to create the garlic message (no tunnels left or too lagged) to " + _toString);
|
||||
getContext().statManager().addRateData("client.dispatchNoTunnels", getContext().clock().now() - _start, 0);
|
||||
getContext().statManager().addRateData("client.dispatchNoTunnels", now - _start, 0);
|
||||
dieFatal();
|
||||
return;
|
||||
}
|
||||
@ -537,12 +557,12 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
} else {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn(getJobId() + ": Could not find any outbound tunnels to send the payload through... this might take a while");
|
||||
getContext().statManager().addRateData("client.dispatchNoTunnels", getContext().clock().now() - _start, 0);
|
||||
getContext().statManager().addRateData("client.dispatchNoTunnels", now - _start, 0);
|
||||
dieFatal();
|
||||
}
|
||||
_clientMessage = null;
|
||||
_clove = null;
|
||||
getContext().statManager().addRateData("client.dispatchPrepareTime", getContext().clock().now() - _start, 0);
|
||||
getContext().statManager().addRateData("client.dispatchPrepareTime", now - _start, 0);
|
||||
if (!wantACK)
|
||||
getContext().statManager().addRateData("client.dispatchNoACK", 1, 0);
|
||||
}
|
||||
@ -580,7 +600,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
/**
|
||||
* This is the place where we make I2P go fast.
|
||||
*
|
||||
* We have four static caches.
|
||||
* We have five static caches.
|
||||
* - The LeaseSet cache is used to decide whether to bundle our own leaseset,
|
||||
* which minimizes overhead.
|
||||
* - The Lease cache is used to persistently send to the same lease for the destination,
|
||||
@ -588,6 +608,8 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
* - The Tunnel and BackloggedTunnel caches are used to persistently use the same outbound tunnel
|
||||
* for the same destination,
|
||||
* which keeps the streaming lib happy by minimizing out-of-order delivery.
|
||||
* - The last reply requested cache ensures that a reply is requested every so often,
|
||||
* so that failed tunnels are recognized.
|
||||
*
|
||||
*/
|
||||
|
||||
@ -607,7 +629,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
* (needed for cleanTunnelCache)
|
||||
* 44 = 32 * 4 / 3
|
||||
*/
|
||||
private Hash sourceFromHashPair(String s) {
|
||||
private static Hash sourceFromHashPair(String s) {
|
||||
return new Hash(Base64.decode(s.substring(44, 88)));
|
||||
}
|
||||
|
||||
@ -627,17 +649,17 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
}
|
||||
if (_lease != null) {
|
||||
synchronized(_leaseCache) {
|
||||
Lease l = (Lease) _leaseCache.get(key);
|
||||
Lease l = _leaseCache.get(key);
|
||||
if (l != null && l.equals(_lease))
|
||||
_leaseCache.remove(key);
|
||||
}
|
||||
}
|
||||
if (_outTunnel != null) {
|
||||
synchronized(_tunnelCache) {
|
||||
TunnelInfo t =(TunnelInfo) _backloggedTunnelCache.get(key);
|
||||
TunnelInfo t = _backloggedTunnelCache.get(key);
|
||||
if (t != null && t.equals(_outTunnel))
|
||||
_backloggedTunnelCache.remove(key);
|
||||
t = (TunnelInfo) _tunnelCache.get(key);
|
||||
t = _tunnelCache.get(key);
|
||||
if (t != null && t.equals(_outTunnel))
|
||||
_tunnelCache.remove(key);
|
||||
}
|
||||
@ -648,19 +670,14 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
* Clean out old leaseSets from a set.
|
||||
* Caller must synchronize on tc.
|
||||
*/
|
||||
private void cleanLeaseSetCache(HashMap tc) {
|
||||
long now = getContext().clock().now();
|
||||
List deleteList = new ArrayList();
|
||||
private static void cleanLeaseSetCache(RouterContext ctx, HashMap tc) {
|
||||
long now = ctx.clock().now();
|
||||
for (Iterator iter = tc.entrySet().iterator(); iter.hasNext(); ) {
|
||||
Map.Entry entry = (Map.Entry)iter.next();
|
||||
String k = (String) entry.getKey();
|
||||
LeaseSet l = (LeaseSet) entry.getValue();
|
||||
if (l.getEarliestLeaseDate() < now)
|
||||
deleteList.add(k);
|
||||
}
|
||||
for (Iterator iter = deleteList.iterator(); iter.hasNext(); ) {
|
||||
String k = (String) iter.next();
|
||||
tc.remove(k);
|
||||
iter.remove();
|
||||
}
|
||||
}
|
||||
|
||||
@ -668,18 +685,13 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
* Clean out old leases from a set.
|
||||
* Caller must synchronize on tc.
|
||||
*/
|
||||
private void cleanLeaseCache(HashMap tc) {
|
||||
List deleteList = new ArrayList();
|
||||
private static void cleanLeaseCache(HashMap tc) {
|
||||
for (Iterator iter = tc.entrySet().iterator(); iter.hasNext(); ) {
|
||||
Map.Entry entry = (Map.Entry)iter.next();
|
||||
String k = (String) entry.getKey();
|
||||
Lease l = (Lease) entry.getValue();
|
||||
if (l.isExpired(Router.CLOCK_FUDGE_FACTOR))
|
||||
deleteList.add(k);
|
||||
}
|
||||
for (Iterator iter = deleteList.iterator(); iter.hasNext(); ) {
|
||||
String k = (String) iter.next();
|
||||
tc.remove(k);
|
||||
iter.remove();
|
||||
}
|
||||
}
|
||||
|
||||
@ -687,18 +699,48 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
* Clean out old tunnels from a set.
|
||||
* Caller must synchronize on tc.
|
||||
*/
|
||||
private void cleanTunnelCache(HashMap tc) {
|
||||
List deleteList = new ArrayList();
|
||||
private static void cleanTunnelCache(RouterContext ctx, HashMap tc) {
|
||||
for (Iterator iter = tc.entrySet().iterator(); iter.hasNext(); ) {
|
||||
Map.Entry entry = (Map.Entry)iter.next();
|
||||
String k = (String) entry.getKey();
|
||||
TunnelInfo tunnel = (TunnelInfo) entry.getValue();
|
||||
if (!getContext().tunnelManager().isValidTunnel(sourceFromHashPair(k), tunnel))
|
||||
deleteList.add(k);
|
||||
if (!ctx.tunnelManager().isValidTunnel(sourceFromHashPair(k), tunnel))
|
||||
iter.remove();
|
||||
}
|
||||
for (Iterator iter = deleteList.iterator(); iter.hasNext(); ) {
|
||||
String k = (String) iter.next();
|
||||
tc.remove(k);
|
||||
}
|
||||
|
||||
/**
|
||||
* Clean out old reply times
|
||||
* Caller must synchronize on tc.
|
||||
*/
|
||||
private static void cleanReplyCache(RouterContext ctx, HashMap tc) {
|
||||
long now = ctx.clock().now();
|
||||
for (Iterator iter = tc.values().iterator(); iter.hasNext(); ) {
|
||||
Long l = (Long) iter.next();
|
||||
if (l.longValue() < now - CLEAN_INTERVAL)
|
||||
iter.remove();
|
||||
}
|
||||
}
|
||||
|
||||
private static class OCMOSJCacheCleaner implements SimpleTimer.TimedEvent {
|
||||
private RouterContext _ctx;
|
||||
private OCMOSJCacheCleaner(RouterContext ctx) {
|
||||
_ctx = ctx;
|
||||
}
|
||||
public void timeReached() {
|
||||
synchronized(_leaseSetCache) {
|
||||
cleanLeaseSetCache(_ctx, _leaseSetCache);
|
||||
}
|
||||
synchronized(_leaseCache) {
|
||||
cleanLeaseCache(_leaseCache);
|
||||
}
|
||||
synchronized(_tunnelCache) {
|
||||
cleanTunnelCache(_ctx, _tunnelCache);
|
||||
cleanTunnelCache(_ctx, _backloggedTunnelCache);
|
||||
}
|
||||
synchronized(_lastReplyRequestCache) {
|
||||
cleanReplyCache(_ctx, _lastReplyRequestCache);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -710,25 +752,19 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
* Key the caches on the source+dest pair.
|
||||
*
|
||||
*/
|
||||
private static HashMap _tunnelCache = new HashMap();
|
||||
private static HashMap _backloggedTunnelCache = new HashMap();
|
||||
private static long _cleanTime = 0;
|
||||
private static HashMap<String, TunnelInfo> _tunnelCache = new HashMap();
|
||||
private static HashMap<String, TunnelInfo> _backloggedTunnelCache = new HashMap();
|
||||
private TunnelInfo selectOutboundTunnel(Destination to) {
|
||||
TunnelInfo tunnel;
|
||||
long now = getContext().clock().now();
|
||||
synchronized (_tunnelCache) {
|
||||
if (now - _cleanTime > 5*60*1000) { // clean out periodically
|
||||
cleanTunnelCache(_tunnelCache);
|
||||
cleanTunnelCache(_backloggedTunnelCache);
|
||||
_cleanTime = now;
|
||||
}
|
||||
/**
|
||||
* If old tunnel is valid and no longer backlogged, use it.
|
||||
* This prevents an active anonymity attack, where a peer could tell
|
||||
* if you were the originator by backlogging the tunnel, then removing the
|
||||
* backlog and seeing if traffic came back or not.
|
||||
*/
|
||||
tunnel = (TunnelInfo) _backloggedTunnelCache.get(hashPair());
|
||||
tunnel = _backloggedTunnelCache.get(hashPair());
|
||||
if (tunnel != null) {
|
||||
if (getContext().tunnelManager().isValidTunnel(_from.calculateHash(), tunnel)) {
|
||||
if (!getContext().commSystem().isBacklogged(tunnel.getPeer(1))) {
|
||||
@ -743,7 +779,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
_backloggedTunnelCache.remove(hashPair());
|
||||
}
|
||||
// Use the same tunnel unless backlogged
|
||||
tunnel = (TunnelInfo) _tunnelCache.get(hashPair());
|
||||
tunnel = _tunnelCache.get(hashPair());
|
||||
if (tunnel != null) {
|
||||
if (getContext().tunnelManager().isValidTunnel(_from.calculateHash(), tunnel)) {
|
||||
if (tunnel.getLength() <= 1 || !getContext().commSystem().isBacklogged(tunnel.getPeer(1)))
|
||||
|
@ -1,82 +0,0 @@
|
||||
package net.i2p.router.peermanager;
|
||||
|
||||
import net.i2p.router.RouterContext;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
/**
|
||||
* Simple boolean calculation to determine whether the given profile is "failing" -
|
||||
* meaning we shouldn't bother trying to get them to do something. However, if we
|
||||
* have a specific need to contact them in particular - e.g. instructions in a garlic
|
||||
* or leaseSet - we will try. The currently implemented algorithm determines that
|
||||
* a profile is failing if withing the last few minutes, they've done something bad: <ul>
|
||||
* <li>It has a comm error (TCP disconnect, etc) in the last minute or two</li>
|
||||
* <li>They've failed to respond to a db message in the last minute or two</li>
|
||||
* <li>They've rejected a tunnel in the last 5 minutes</li>
|
||||
* <li>They've been unreachable any time in the last 5 minutes</li>
|
||||
* </ul>
|
||||
*
|
||||
*/
|
||||
public class IsFailingCalculator extends Calculator {
|
||||
private Log _log;
|
||||
private RouterContext _context;
|
||||
|
||||
/** if they haven't b0rked in the last 2 minutes, they're ok */
|
||||
private final static long GRACE_PERIOD = 2*60*1000;
|
||||
|
||||
public IsFailingCalculator(RouterContext context) {
|
||||
_context = context;
|
||||
_log = context.logManager().getLog(IsFailingCalculator.class);
|
||||
}
|
||||
|
||||
public boolean calcBoolean(PeerProfile profile) {
|
||||
// have we failed in the last 119 seconds?
|
||||
/*
|
||||
if ( (profile.getCommError().getRate(60*1000).getCurrentEventCount() > 0) ||
|
||||
(profile.getCommError().getRate(60*1000).getLastEventCount() > 0) ||
|
||||
(profile.getCommError().getRate(10*60*1000).getCurrentEventCount() > 0) ) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Peer " + profile.getPeer().toBase64()
|
||||
+ " is failing because it had a comm error recently ");
|
||||
return true;
|
||||
} else {
|
||||
*/
|
||||
//if ( (profile.getDBHistory().getFailedLookupRate().getRate(60*1000).getCurrentEventCount() > 0) ||
|
||||
// (profile.getDBHistory().getFailedLookupRate().getRate(60*1000).getLastEventCount() > 0) ) {
|
||||
// // are they overloaded (or disconnected)?
|
||||
// return true;
|
||||
//}
|
||||
|
||||
// this doesn't make sense with probabalistic rejections - we should be
|
||||
// adequately dampening the capacity so these peers aren't queried
|
||||
|
||||
//Rate rejectRate = profile.getTunnelHistory().getRejectionRate().getRate(10*60*1000);
|
||||
//if (rejectRate.getCurrentEventCount() >= 2) {
|
||||
// if (_log.shouldLog(Log.DEBUG))
|
||||
// _log.debug("Peer " + profile.getPeer().toBase64()
|
||||
// + " is failing because they rejected some tunnels recently");
|
||||
// return true;
|
||||
//}
|
||||
|
||||
////
|
||||
// the right way to behave would be to use some statistical
|
||||
// analysis on the failure rate, and only mark the peer as failing
|
||||
// if their rate exceeded the expected rate (mean, median, stddev, etc)
|
||||
////
|
||||
|
||||
//Rate failedRate = profile.getTunnelHistory().getFailedRate().getRate(60*1000);
|
||||
//if (failedRate.getCurrentEventCount() >= 2) {
|
||||
// if (_log.shouldLog(Log.DEBUG))
|
||||
// _log.debug("Peer " + profile.getPeer().toBase64()
|
||||
// + " is failing because too many of their tunnels failed recently");
|
||||
// return true;
|
||||
//}
|
||||
|
||||
// if they have rejected us saying they're totally broken anytime in the last
|
||||
// 10 minutes, dont bother 'em
|
||||
if (profile.getTunnelHistory().getLastRejectedCritical() > _context.clock().now() - 10*60*1000)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
//}
|
||||
}
|
||||
}
|
@ -8,6 +8,21 @@ import net.i2p.router.RouterContext;
|
||||
import net.i2p.stat.RateStat;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
/**
|
||||
* Copied from http://www.i2p2.i2p/how_peerselection.html
|
||||
*
|
||||
* See also main() below for additional commentary by zzz.
|
||||
*
|
||||
* Currently, there is no 'ejection' strategy to get rid of the profiles for peers that
|
||||
* are no longer active (or when the network consists of thousands of peers, to get rid
|
||||
* of peers that are performing poorly). However, the size of each profile is fairly small,
|
||||
* and is unrelated to how much data is collected about the peer, so that a router can
|
||||
* keep a few thousand active peer profiles before the overhead becomes a serious concern.
|
||||
* Once it becomes necessary, we can simply compact the poorly performing profiles
|
||||
* (keeping only the most basic data) and maintain hundreds of thousands of profiles
|
||||
* in memory. Beyond that size, we can simply eject the peers (e.g. keeping the best 100,000).
|
||||
*/
|
||||
|
||||
public class PeerProfile {
|
||||
private Log _log;
|
||||
private RouterContext _context;
|
||||
@ -22,25 +37,19 @@ public class PeerProfile {
|
||||
private double _tunnelTestResponseTimeAvg;
|
||||
// periodic rates
|
||||
private RateStat _sendSuccessSize = null;
|
||||
private RateStat _sendFailureSize = null;
|
||||
private RateStat _receiveSize = null;
|
||||
private RateStat _dbResponseTime = null;
|
||||
private RateStat _tunnelCreateResponseTime = null;
|
||||
private RateStat _tunnelTestResponseTime = null;
|
||||
private RateStat _tunnelTestResponseTimeSlow = null;
|
||||
private RateStat _commError = null;
|
||||
private RateStat _dbIntroduction = null;
|
||||
// calculation bonuses
|
||||
private long _speedBonus;
|
||||
private long _reliabilityBonus;
|
||||
private long _capacityBonus;
|
||||
private long _integrationBonus;
|
||||
// calculation values
|
||||
private double _speedValue;
|
||||
private double _reliabilityValue;
|
||||
private double _capacityValue;
|
||||
private double _integrationValue;
|
||||
private double _oldSpeedValue;
|
||||
private boolean _isFailing;
|
||||
// good vs bad behavior
|
||||
private TunnelHistory _tunnelHistory;
|
||||
@ -57,7 +66,6 @@ public class PeerProfile {
|
||||
_log = context.logManager().getLog(PeerProfile.class);
|
||||
_expanded = false;
|
||||
_speedValue = 0;
|
||||
_reliabilityValue = 0;
|
||||
_capacityValue = 0;
|
||||
_integrationValue = 0;
|
||||
_isFailing = false;
|
||||
@ -96,6 +104,11 @@ public class PeerProfile {
|
||||
* given period?)
|
||||
* Also mark active if it is connected, as this will tend to encourage use
|
||||
* of already-connected peers.
|
||||
*
|
||||
* Note: this appears to be the only use for these two RateStats.
|
||||
*
|
||||
* @param period must be one of the periods in the RateStat constructors below
|
||||
* (5*60*1000 or 60*60*1000)
|
||||
*/
|
||||
public boolean getIsActive(long period) {
|
||||
if ( (getSendSuccessSize().getRate(period).getCurrentEventCount() > 0) ||
|
||||
@ -139,8 +152,6 @@ public class PeerProfile {
|
||||
|
||||
/** how large successfully sent messages are, calculated over a 1 minute, 1 hour, and 1 day period */
|
||||
public RateStat getSendSuccessSize() { return _sendSuccessSize; }
|
||||
/** how large messages that could not be sent were, calculated over a 1 minute, 1 hour, and 1 day period */
|
||||
public RateStat getSendFailureSize() { return _sendFailureSize; }
|
||||
/** how large received messages are, calculated over a 1 minute, 1 hour, and 1 day period */
|
||||
public RateStat getReceiveSize() { return _receiveSize; }
|
||||
/** how long it takes to get a db response from the peer (in milliseconds), calculated over a 1 minute, 1 hour, and 1 day period */
|
||||
@ -149,10 +160,6 @@ public class PeerProfile {
|
||||
public RateStat getTunnelCreateResponseTime() { return _tunnelCreateResponseTime; }
|
||||
/** how long it takes to successfully test a tunnel this peer participates in (in milliseconds), calculated over a 10 minute, 1 hour, and 1 day period */
|
||||
public RateStat getTunnelTestResponseTime() { return _tunnelTestResponseTime; }
|
||||
/** how long it takes to successfully test the peer (in milliseconds) when the time exceeds 5s */
|
||||
public RateStat getTunnelTestResponseTimeSlow() { return _tunnelTestResponseTimeSlow; }
|
||||
/** how long between communication errors with the peer (disconnection, etc), calculated over a 1 minute, 1 hour, and 1 day period */
|
||||
public RateStat getCommError() { return _commError; }
|
||||
/** how many new peers we get from dbSearchReplyMessages or dbStore messages, calculated over a 1 hour, 1 day, and 1 week period */
|
||||
public RateStat getDbIntroduction() { return _dbIntroduction; }
|
||||
|
||||
@ -164,14 +171,6 @@ public class PeerProfile {
|
||||
public long getSpeedBonus() { return _speedBonus; }
|
||||
public void setSpeedBonus(long bonus) { _speedBonus = bonus; }
|
||||
|
||||
/**
|
||||
* extra factor added to the reliability ranking - this can be updated in the profile
|
||||
* written to disk to affect how the algorithm ranks reliability. Negative values are
|
||||
* penalties
|
||||
*/
|
||||
public long getReliabilityBonus() { return _reliabilityBonus; }
|
||||
public void setReliabilityBonus(long bonus) { _reliabilityBonus = bonus; }
|
||||
|
||||
/**
|
||||
* extra factor added to the capacity ranking - this can be updated in the profile
|
||||
* written to disk to affect how the algorithm ranks capacity. Negative values are
|
||||
@ -195,14 +194,6 @@ public class PeerProfile {
|
||||
*
|
||||
*/
|
||||
public double getSpeedValue() { return _speedValue; }
|
||||
public double getOldSpeedValue() { return _oldSpeedValue; }
|
||||
/**
|
||||
* How likely are they to stay up and pass on messages over the next few minutes.
|
||||
* Positive numbers means more likely, negative numbers means its probably not
|
||||
* even worth trying.
|
||||
*
|
||||
*/
|
||||
public double getReliabilityValue() { return _reliabilityValue; }
|
||||
/**
|
||||
* How many tunnels do we think this peer can handle over the next hour?
|
||||
*
|
||||
@ -315,6 +306,11 @@ public class PeerProfile {
|
||||
}
|
||||
}
|
||||
}
|
||||
/**
|
||||
* @return the average of the three fastest one-minute data transfers, on a per-tunnel basis,
|
||||
* through this peer. Ever. Except that the peak values are cut in half
|
||||
* once a day by coalesceThroughput(). This seems way too seldom.
|
||||
*/
|
||||
public double getPeakTunnel1mThroughputKBps() {
|
||||
double rv = 0;
|
||||
for (int i = 0; i < THROUGHPUT_COUNT; i++)
|
||||
@ -334,13 +330,10 @@ public class PeerProfile {
|
||||
*/
|
||||
public void shrinkProfile() {
|
||||
_sendSuccessSize = null;
|
||||
_sendFailureSize = null;
|
||||
_receiveSize = null;
|
||||
_dbResponseTime = null;
|
||||
_tunnelCreateResponseTime = null;
|
||||
_tunnelTestResponseTime = null;
|
||||
_tunnelTestResponseTimeSlow = null;
|
||||
_commError = null;
|
||||
_dbIntroduction = null;
|
||||
_tunnelHistory = null;
|
||||
_dbHistory = null;
|
||||
@ -358,21 +351,15 @@ public class PeerProfile {
|
||||
public void expandProfile() {
|
||||
String group = (null == _peer ? "profileUnknown" : _peer.toBase64().substring(0,6));
|
||||
if (_sendSuccessSize == null)
|
||||
_sendSuccessSize = new RateStat("sendSuccessSize", "How large successfully sent messages are", group, new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
if (_sendFailureSize == null)
|
||||
_sendFailureSize = new RateStat("sendFailureSize", "How large messages that could not be sent were", group, new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000 } );
|
||||
_sendSuccessSize = new RateStat("sendSuccessSize", "How large successfully sent messages are", group, new long[] { 5*60*1000l, 60*60*1000l });
|
||||
if (_receiveSize == null)
|
||||
_receiveSize = new RateStat("receiveSize", "How large received messages are", group, new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000 } );
|
||||
_receiveSize = new RateStat("receiveSize", "How large received messages are", group, new long[] { 5*60*1000l, 60*60*1000l } );
|
||||
if (_dbResponseTime == null)
|
||||
_dbResponseTime = new RateStat("dbResponseTime", "how long it takes to get a db response from the peer (in milliseconds)", group, new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000 } );
|
||||
if (_tunnelCreateResponseTime == null)
|
||||
_tunnelCreateResponseTime = new RateStat("tunnelCreateResponseTime", "how long it takes to get a tunnel create response from the peer (in milliseconds)", group, new long[] { 10*60*1000l, 30*60*1000l, 60*60*1000l, 24*60*60*1000 } );
|
||||
if (_tunnelTestResponseTime == null)
|
||||
_tunnelTestResponseTime = new RateStat("tunnelTestResponseTime", "how long it takes to successfully test a tunnel this peer participates in (in milliseconds)", group, new long[] { 10*60*1000l, 30*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000 } );
|
||||
if (_tunnelTestResponseTimeSlow == null)
|
||||
_tunnelTestResponseTimeSlow = new RateStat("tunnelTestResponseTimeSlow", "how long it takes to successfully test a peer when the time exceeds 5s", group, new long[] { 10*60*1000l, 30*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l, });
|
||||
if (_commError == null)
|
||||
_commError = new RateStat("commErrorRate", "how long between communication errors with the peer (e.g. disconnection)", group, new long[] { 60*1000l, 10*60*1000l, 60*60*1000l, 24*60*60*1000 } );
|
||||
if (_dbIntroduction == null)
|
||||
_dbIntroduction = new RateStat("dbIntroduction", "how many new peers we get from dbSearchReplyMessages or dbStore messages", group, new long[] { 60*60*1000l, 6*60*60*1000l, 24*60*60*1000l });
|
||||
|
||||
@ -382,18 +369,17 @@ public class PeerProfile {
|
||||
_dbHistory = new DBHistory(_context, group);
|
||||
|
||||
_sendSuccessSize.setStatLog(_context.statManager().getStatLog());
|
||||
_sendFailureSize.setStatLog(_context.statManager().getStatLog());
|
||||
_receiveSize.setStatLog(_context.statManager().getStatLog());
|
||||
_dbResponseTime.setStatLog(_context.statManager().getStatLog());
|
||||
_tunnelCreateResponseTime.setStatLog(_context.statManager().getStatLog());
|
||||
_tunnelTestResponseTime.setStatLog(_context.statManager().getStatLog());
|
||||
_tunnelTestResponseTimeSlow.setStatLog(_context.statManager().getStatLog());
|
||||
_commError.setStatLog(_context.statManager().getStatLog());
|
||||
_dbIntroduction.setStatLog(_context.statManager().getStatLog());
|
||||
_expanded = true;
|
||||
}
|
||||
/** once a day, on average, cut the measured throughtput values in half */
|
||||
private static final long DROP_PERIOD_MINUTES = 24*60;
|
||||
/** let's try once an hour times 3/4 */
|
||||
private static final int DROP_PERIOD_MINUTES = 60;
|
||||
private static final double DEGRADE_FACTOR = 0.75;
|
||||
private long _lastCoalesceDate = System.currentTimeMillis();
|
||||
private void coalesceThroughput() {
|
||||
long now = System.currentTimeMillis();
|
||||
@ -410,46 +396,19 @@ public class PeerProfile {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (false && _log.shouldLog(Log.WARN) ) {
|
||||
StringBuffer buf = new StringBuffer(128);
|
||||
buf.append("Updating throughput after ").append(tot).append(" to ");
|
||||
for (int i = 0; i < THROUGHPUT_COUNT; i++)
|
||||
buf.append(_peakThroughput[i]).append(',');
|
||||
buf.append(" for ").append(_peer.toBase64());
|
||||
_log.warn(buf.toString());
|
||||
}
|
||||
} else {
|
||||
if (_context.random().nextLong(DROP_PERIOD_MINUTES*2) <= 0) {
|
||||
for (int i = 0; i < THROUGHPUT_COUNT; i++)
|
||||
_peakThroughput[i] /= 2;
|
||||
|
||||
if (false && _log.shouldLog(Log.WARN) ) {
|
||||
StringBuffer buf = new StringBuffer(128);
|
||||
buf.append("Degrading the throughput measurements to ");
|
||||
for (int i = 0; i < THROUGHPUT_COUNT; i++)
|
||||
buf.append(_peakThroughput[i]).append(',');
|
||||
buf.append(" for ").append(_peer.toBase64());
|
||||
_log.warn(buf.toString());
|
||||
}
|
||||
if (_context.random().nextInt(DROP_PERIOD_MINUTES*2) <= 0) {
|
||||
for (int i = 0; i < THROUGHPUT_COUNT; i++)
|
||||
_peakThroughput[i] *= DEGRADE_FACTOR;
|
||||
}
|
||||
}
|
||||
|
||||
// we degrade the tunnel throughput here too, regardless of the current
|
||||
// activity
|
||||
if (_context.random().nextLong(DROP_PERIOD_MINUTES*2) <= 0) {
|
||||
if (_context.random().nextInt(DROP_PERIOD_MINUTES*2) <= 0) {
|
||||
for (int i = 0; i < THROUGHPUT_COUNT; i++) {
|
||||
_peakTunnelThroughput[i] /= 2;
|
||||
_peakTunnel1mThroughput[i] /= 2;
|
||||
}
|
||||
|
||||
if (_log.shouldLog(Log.WARN) ) {
|
||||
StringBuffer buf = new StringBuffer(128);
|
||||
buf.append("Degrading the tunnel throughput measurements to ");
|
||||
for (int i = 0; i < THROUGHPUT_COUNT; i++)
|
||||
buf.append(_peakTunnel1mThroughput[i]).append(',');
|
||||
buf.append(" for ").append(_peer.toBase64());
|
||||
_log.warn(buf.toString());
|
||||
_peakTunnelThroughput[i] *= DEGRADE_FACTOR;
|
||||
_peakTunnel1mThroughput[i] *= DEGRADE_FACTOR;
|
||||
}
|
||||
}
|
||||
_peakThroughputCurrentTotal = 0;
|
||||
@ -460,37 +419,30 @@ public class PeerProfile {
|
||||
/** update the stats and rates (this should be called once a minute) */
|
||||
public void coalesceStats() {
|
||||
if (!_expanded) return;
|
||||
_commError.coalesceStats();
|
||||
_dbIntroduction.coalesceStats();
|
||||
_dbResponseTime.coalesceStats();
|
||||
_receiveSize.coalesceStats();
|
||||
_sendFailureSize.coalesceStats();
|
||||
_sendSuccessSize.coalesceStats();
|
||||
_tunnelCreateResponseTime.coalesceStats();
|
||||
_tunnelTestResponseTime.coalesceStats();
|
||||
_tunnelTestResponseTimeSlow.coalesceStats();
|
||||
_dbHistory.coalesceStats();
|
||||
_tunnelHistory.coalesceStats();
|
||||
|
||||
coalesceThroughput();
|
||||
|
||||
_speedValue = calculateSpeed();
|
||||
_oldSpeedValue = calculateOldSpeed();
|
||||
_reliabilityValue = calculateReliability();
|
||||
_capacityValue = calculateCapacity();
|
||||
_integrationValue = calculateIntegration();
|
||||
_isFailing = calculateIsFailing();
|
||||
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Coalesced: speed [" + _speedValue + "] reliability [" + _reliabilityValue + "] capacity [" + _capacityValue + "] integration [" + _integrationValue + "] failing? [" + _isFailing + "]");
|
||||
_log.debug("Coalesced: speed [" + _speedValue + "] capacity [" + _capacityValue + "] integration [" + _integrationValue + "] failing? [" + _isFailing + "]");
|
||||
}
|
||||
|
||||
private double calculateSpeed() { return _context.speedCalculator().calc(this); }
|
||||
private double calculateOldSpeed() { return _context.oldSpeedCalculator().calc(this); }
|
||||
private double calculateReliability() { return _context.reliabilityCalculator().calc(this); }
|
||||
private double calculateCapacity() { return _context.capacityCalculator().calc(this); }
|
||||
private double calculateIntegration() { return _context.integrationCalculator().calc(this); }
|
||||
private boolean calculateIsFailing() { return _context.isFailingCalculator().calcBoolean(this); }
|
||||
private boolean calculateIsFailing() { return false; }
|
||||
void setIsFailing(boolean val) { _isFailing = val; }
|
||||
|
||||
public int hashCode() { return (_peer == null ? 0 : _peer.hashCode()); }
|
||||
@ -504,12 +456,36 @@ public class PeerProfile {
|
||||
public String toString() { return "Profile: " + getPeer().toBase64(); }
|
||||
|
||||
/**
|
||||
* New measurement is 12KB per expanded profile. (2009-03 zzz)
|
||||
* And nowhere in the code is shrinkProfile() called so
|
||||
* the size of compact profiles doesn't matter right now.
|
||||
* This is far bigger than the NetDB entry, which is only about 1.5KB
|
||||
* now that most of the stats have been removed.
|
||||
*
|
||||
* The biggest user in the profile is the Rates. (144 bytes per according to jhat).
|
||||
* PeerProfile: 9 RateStats, 3-5 Rates each - 35 total
|
||||
* DBHistory: 2 RateStats, 3 each - 6 total
|
||||
* TunnelHistory: 4 RateStats, 5 each - 20 total
|
||||
* --- ---------
|
||||
* 15 61 total
|
||||
* *60 bytes *144 bytes
|
||||
* --- ---------
|
||||
* 900 bytes 8784 bytes
|
||||
*
|
||||
* The RateStat itself is 32 bytes and the Rate[] is 28 so that adds
|
||||
* about 1KB.
|
||||
*
|
||||
* So two obvious things to do are cut out some of the Rates,
|
||||
* and call shrinkProfile().
|
||||
*
|
||||
* Obsolete calculation follows:
|
||||
*
|
||||
* Calculate the memory consumption of profiles. Measured to be ~3739 bytes
|
||||
* for an expanded profile, and ~212 bytes for a compacted one.
|
||||
*
|
||||
*/
|
||||
public static void main2(String args[]) {
|
||||
RouterContext ctx = new RouterContext(null);
|
||||
public static void main(String args[]) {
|
||||
RouterContext ctx = new RouterContext(new net.i2p.router.Router());
|
||||
testProfileSize(ctx, 100, 0); // 560KB
|
||||
testProfileSize(ctx, 1000, 0); // 3.9MB
|
||||
testProfileSize(ctx, 10000, 0); // 37MB
|
||||
@ -524,7 +500,7 @@ public class PeerProfile {
|
||||
* PeerProfile [filename]*
|
||||
* </pre>
|
||||
*/
|
||||
public static void main(String args[]) {
|
||||
public static void main2(String args[]) {
|
||||
RouterContext ctx = new RouterContext(new net.i2p.router.Router());
|
||||
DecimalFormat fmt = new DecimalFormat("0,000.0");
|
||||
fmt.setPositivePrefix("+");
|
||||
@ -540,7 +516,6 @@ public class PeerProfile {
|
||||
//profile.coalesceStats();
|
||||
buf.append("Peer " + profile.getPeer().toBase64()
|
||||
+ ":\t Speed:\t" + fmt.format(profile.calculateSpeed())
|
||||
+ " Reliability:\t" + fmt.format(profile.calculateReliability())
|
||||
+ " Capacity:\t" + fmt.format(profile.calculateCapacity())
|
||||
+ " Integration:\t" + fmt.format(profile.calculateIntegration())
|
||||
+ " Active?\t" + profile.getIsActive()
|
||||
|
@ -50,7 +50,6 @@ public class ProfileManagerImpl implements ProfileManager {
|
||||
PeerProfile data = getProfile(peer);
|
||||
if (data == null) return;
|
||||
data.setLastSendFailed(_context.clock().now());
|
||||
data.getSendFailureSize().addData(0, 0); // yeah, should be a frequency...
|
||||
}
|
||||
|
||||
/**
|
||||
@ -61,7 +60,6 @@ public class ProfileManagerImpl implements ProfileManager {
|
||||
PeerProfile data = getProfile(peer);
|
||||
if (data == null) return;
|
||||
data.setLastSendFailed(_context.clock().now());
|
||||
data.getSendFailureSize().addData(0, 0); // yeah, should be a frequency...
|
||||
}
|
||||
|
||||
/**
|
||||
@ -74,8 +72,6 @@ public class ProfileManagerImpl implements ProfileManager {
|
||||
PeerProfile data = getProfile(peer);
|
||||
if (data == null) return;
|
||||
data.setLastSendFailed(_context.clock().now());
|
||||
data.getSendFailureSize().addData(1, 0); // yeah, should be a frequency...
|
||||
data.getCommError().addData(1, 0); // see above
|
||||
}
|
||||
|
||||
/**
|
||||
@ -125,8 +121,6 @@ public class ProfileManagerImpl implements ProfileManager {
|
||||
if (data == null) return;
|
||||
data.updateTunnelTestTimeAverage(responseTimeMs);
|
||||
data.getTunnelTestResponseTime().addData(responseTimeMs, responseTimeMs);
|
||||
if (responseTimeMs > getSlowThreshold())
|
||||
data.getTunnelTestResponseTimeSlow().addData(responseTimeMs, responseTimeMs);
|
||||
}
|
||||
|
||||
public void tunnelDataPushed(Hash peer, long rtt, int size) {
|
||||
|
@ -1028,7 +1028,7 @@ public class ProfileOrganizer {
|
||||
|
||||
/**
|
||||
* called after locking the reorganizeLock, place the profile in the appropriate tier.
|
||||
* This is where we implement the (betterThanAverage ? goToPierX : goToPierY) algorithms
|
||||
* This is where we implement the (betterThanAverage ? goToTierX : goToTierY) algorithms
|
||||
*
|
||||
*/
|
||||
private void locked_placeProfile(PeerProfile profile) {
|
||||
@ -1153,7 +1153,6 @@ public class ProfileOrganizer {
|
||||
organizer.isHighCapacity(peer) ? "IR " :
|
||||
organizer.isFailing(peer) ? "IX " : "I ") + "]: "
|
||||
+ "\t Speed:\t" + fmt.format(profile.getSpeedValue())
|
||||
+ " Reliability:\t" + fmt.format(profile.getReliabilityValue())
|
||||
+ " Capacity:\t" + fmt.format(profile.getCapacityValue())
|
||||
+ " Integration:\t" + fmt.format(profile.getIntegrationValue())
|
||||
+ " Active?\t" + profile.getIsActive()
|
||||
@ -1164,7 +1163,6 @@ public class ProfileOrganizer {
|
||||
organizer.isHighCapacity(peer) ? "R " :
|
||||
organizer.isFailing(peer) ? "X " : " ") + "]: "
|
||||
+ "\t Speed:\t" + fmt.format(profile.getSpeedValue())
|
||||
+ " Reliability:\t" + fmt.format(profile.getReliabilityValue())
|
||||
+ " Capacity:\t" + fmt.format(profile.getCapacityValue())
|
||||
+ " Integration:\t" + fmt.format(profile.getIntegrationValue())
|
||||
+ " Active?\t" + profile.getIsActive()
|
||||
|
@ -95,7 +95,6 @@ class ProfilePersistenceHelper {
|
||||
if (_us != null)
|
||||
buf.append("# as calculated by ").append(_us.toBase64()).append(NL);
|
||||
buf.append("#").append(NL);
|
||||
buf.append("# reliability: ").append(profile.getReliabilityValue()).append(NL);
|
||||
buf.append("# capacity: ").append(profile.getCapacityValue()).append(NL);
|
||||
buf.append("# integration: ").append(profile.getIntegrationValue()).append(NL);
|
||||
buf.append("# speedValue: ").append(profile.getSpeedValue()).append(NL);
|
||||
@ -134,15 +133,12 @@ class ProfilePersistenceHelper {
|
||||
|
||||
if (profile.getIsExpanded()) {
|
||||
// only write out expanded data if, uh, we've got it
|
||||
profile.getCommError().store(out, "commError");
|
||||
profile.getDbIntroduction().store(out, "dbIntroduction");
|
||||
profile.getDbResponseTime().store(out, "dbResponseTime");
|
||||
profile.getReceiveSize().store(out, "receiveSize");
|
||||
profile.getSendFailureSize().store(out, "sendFailureSize");
|
||||
profile.getSendSuccessSize().store(out, "sendSuccessSize");
|
||||
profile.getTunnelCreateResponseTime().store(out, "tunnelCreateResponseTime");
|
||||
profile.getTunnelTestResponseTime().store(out, "tunnelTestResponseTime");
|
||||
profile.getTunnelTestResponseTimeSlow().store(out, "tunnelTestResponseTimeSlow");
|
||||
}
|
||||
}
|
||||
|
||||
@ -217,15 +213,12 @@ class ProfilePersistenceHelper {
|
||||
profile.getTunnelHistory().load(props);
|
||||
profile.getDBHistory().load(props);
|
||||
|
||||
profile.getCommError().load(props, "commError", true);
|
||||
profile.getDbIntroduction().load(props, "dbIntroduction", true);
|
||||
profile.getDbResponseTime().load(props, "dbResponseTime", true);
|
||||
profile.getReceiveSize().load(props, "receiveSize", true);
|
||||
profile.getSendFailureSize().load(props, "sendFailureSize", true);
|
||||
profile.getSendSuccessSize().load(props, "sendSuccessSize", true);
|
||||
profile.getTunnelCreateResponseTime().load(props, "tunnelCreateResponseTime", true);
|
||||
profile.getTunnelTestResponseTime().load(props, "tunnelTestResponseTime", true);
|
||||
profile.getTunnelTestResponseTimeSlow().load(props, "tunnelTestResponseTimeSlow", true);
|
||||
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Loaded the profile for " + peer.toBase64() + " from " + file.getName());
|
||||
|
@ -1,91 +0,0 @@
|
||||
package net.i2p.router.peermanager;
|
||||
|
||||
import net.i2p.router.RouterContext;
|
||||
import net.i2p.stat.RateStat;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
/**
|
||||
* Determine how reliable the peer is - how likely they'll be able to respond or
|
||||
* otherwise carry out whatever we ask them to (or even merely be reachable)
|
||||
*
|
||||
*/
|
||||
public class ReliabilityCalculator extends Calculator {
|
||||
private Log _log;
|
||||
private RouterContext _context;
|
||||
|
||||
public ReliabilityCalculator(RouterContext context) {
|
||||
_context = context;
|
||||
_log = context.logManager().getLog(ReliabilityCalculator.class);
|
||||
}
|
||||
|
||||
public double calc(PeerProfile profile) {
|
||||
// if we've never succeeded (even if we've never tried), the reliability is zip
|
||||
if (profile.getSendSuccessSize().getRate(60*60*1000).getLifetimeEventCount() < 0)
|
||||
return profile.getReliabilityBonus();
|
||||
|
||||
long val = 0;
|
||||
val += profile.getSendSuccessSize().getRate(60*1000).getCurrentEventCount() * 20;
|
||||
val += profile.getSendSuccessSize().getRate(60*1000).getLastEventCount() * 10;
|
||||
val += profile.getSendSuccessSize().getRate(60*60*1000).getLastEventCount() * 1;
|
||||
val += profile.getSendSuccessSize().getRate(60*60*1000).getCurrentEventCount() * 5;
|
||||
|
||||
val += profile.getTunnelCreateResponseTime().getRate(10*60*1000).getLastEventCount() * 5;
|
||||
val += profile.getTunnelCreateResponseTime().getRate(60*60*1000).getCurrentEventCount();
|
||||
val += profile.getTunnelCreateResponseTime().getRate(60*60*1000).getLastEventCount();
|
||||
|
||||
//val -= profile.getSendFailureSize().getRate(60*1000).getLastEventCount() * 5;
|
||||
//val -= profile.getSendFailureSize().getRate(60*60*1000).getCurrentEventCount()*2;
|
||||
//val -= profile.getSendFailureSize().getRate(60*60*1000).getLastEventCount()*2;
|
||||
|
||||
RateStat rejRate = profile.getTunnelHistory().getRejectionRate();
|
||||
if (rejRate.getRate(60*1000).getCurrentEventCount() > 0)
|
||||
val -= 200;
|
||||
if (rejRate.getRate(60*1000).getLastEventCount() > 0)
|
||||
val -= 100;
|
||||
if (rejRate.getRate(10*60*1000).getCurrentEventCount() > 0)
|
||||
val -= 10;
|
||||
if (rejRate.getRate(10*60*1000).getCurrentEventCount() > 0)
|
||||
val -= 5;
|
||||
|
||||
// penalize them heavily for dropping netDb requests (though these could have
|
||||
// failed due to tunnel timeouts, so don't be too mean)
|
||||
if (profile.getDBHistory().getFailedLookupRate().getRate(60*1000).getCurrentEventCount() > 0)
|
||||
val -= 10;
|
||||
if (profile.getDBHistory().getFailedLookupRate().getRate(60*1000).getLastEventCount() > 0)
|
||||
val -= 5;
|
||||
|
||||
// scream and shout on network errors
|
||||
if (profile.getCommError().getRate(60*1000).getCurrentEventCount() > 0)
|
||||
val -= 200;
|
||||
if (profile.getCommError().getRate(60*1000).getLastEventCount() > 0)
|
||||
val -= 200;
|
||||
|
||||
if (profile.getCommError().getRate(60*60*1000).getCurrentEventCount() > 0)
|
||||
val -= 10;
|
||||
if (profile.getCommError().getRate(60*60*1000).getLastEventCount() > 0)
|
||||
val -= 10;
|
||||
|
||||
val -= profile.getCommError().getRate(24*60*60*1000).getCurrentEventCount() * 1;
|
||||
|
||||
//long now = _context.clock().now();
|
||||
|
||||
long timeSinceRejection = 61*60*1000; // now - profile.getTunnelHistory().getLastRejected();
|
||||
if (timeSinceRejection > 60*60*1000) {
|
||||
// noop. rejection was over 60 minutes ago
|
||||
} else if (timeSinceRejection > 10*60*1000) {
|
||||
val -= 10; // 10-60 minutes ago we got a rejection
|
||||
} else if (timeSinceRejection > 60*1000) {
|
||||
val -= 50; // 1-10 minutes ago we got a rejection
|
||||
} else {
|
||||
val -= 100; // we got a rejection within the last minute
|
||||
}
|
||||
|
||||
//if ( (profile.getLastSendSuccessful() > 0) && (now - 24*60*60*1000 > profile.getLastSendSuccessful()) ) {
|
||||
// // we know they're real, but we havent sent them a message successfully in over a day.
|
||||
// val -= 1000;
|
||||
//}
|
||||
|
||||
val += profile.getReliabilityBonus();
|
||||
return val;
|
||||
}
|
||||
}
|
@ -1,90 +0,0 @@
|
||||
package net.i2p.router.peermanager;
|
||||
|
||||
import net.i2p.router.RouterContext;
|
||||
import net.i2p.stat.Rate;
|
||||
import net.i2p.stat.RateStat;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
/**
|
||||
* Simple speed calculator that just counts how many messages go through the
|
||||
* tunnel.
|
||||
*
|
||||
*/
|
||||
public class StrictSpeedCalculator extends Calculator {
|
||||
private Log _log;
|
||||
private RouterContext _context;
|
||||
|
||||
public StrictSpeedCalculator(RouterContext context) {
|
||||
_context = context;
|
||||
_log = context.logManager().getLog(StrictSpeedCalculator.class);
|
||||
}
|
||||
|
||||
public double calc(PeerProfile profile) {
|
||||
return countSuccesses(profile);
|
||||
}
|
||||
private double countSuccesses(PeerProfile profile) {
|
||||
RateStat success = profile.getTunnelHistory().getProcessSuccessRate();
|
||||
RateStat failure = profile.getTunnelHistory().getProcessFailureRate();
|
||||
return messagesPerMinute(success, failure);
|
||||
}
|
||||
private double messagesPerMinute(RateStat success, RateStat failure) {
|
||||
double rv = 0.0d;
|
||||
if (success != null) {
|
||||
Rate rate = null;
|
||||
long periods[] = success.getPeriods();
|
||||
for (int i = 0; i < periods.length; i++) {
|
||||
rate = success.getRate(periods[i]);
|
||||
if ( (rate != null) && (rate.getCurrentTotalValue() > 0) )
|
||||
break;
|
||||
}
|
||||
|
||||
double value = rate.getCurrentTotalValue();
|
||||
value += rate.getLastTotalValue();
|
||||
rv = value * 10.0d * 60.0d * 1000.0d / (double)rate.getPeriod();
|
||||
|
||||
// if any of the messages are getting fragmented and cannot be
|
||||
// handled, penalize like crazy
|
||||
Rate fail = failure.getRate(rate.getPeriod());
|
||||
if (fail.getCurrentTotalValue() > 0)
|
||||
rv /= fail.getCurrentTotalValue();
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
|
||||
/*
|
||||
public double calc(PeerProfile profile) {
|
||||
double successCount = countSuccesses(profile);
|
||||
double failureCount = countFailures(profile);
|
||||
|
||||
double rv = successCount - 5*failureCount;
|
||||
if (rv < 0)
|
||||
rv = 0;
|
||||
return rv;
|
||||
}
|
||||
private double countSuccesses(PeerProfile profile) {
|
||||
RateStat success = profile.getTunnelHistory().getProcessSuccessRate();
|
||||
return messagesPerMinute(success);
|
||||
}
|
||||
private double countFailures(PeerProfile profile) {
|
||||
RateStat failure = profile.getTunnelHistory().getProcessFailureRate();
|
||||
return messagesPerMinute(failure);
|
||||
}
|
||||
private double messagesPerMinute(RateStat stat) {
|
||||
double rv = 0.0d;
|
||||
if (stat != null) {
|
||||
Rate rate = null;
|
||||
long periods[] = stat.getPeriods();
|
||||
for (int i = 0; i < periods.length; i++) {
|
||||
rate = stat.getRate(periods[i]);
|
||||
if ( (rate != null) && (rate.getCurrentTotalValue() > 0) )
|
||||
break;
|
||||
}
|
||||
|
||||
double value = rate.getCurrentTotalValue();
|
||||
value += rate.getLastTotalValue();
|
||||
rv = value * 60.0d * 1000.0d / (double)rate.getPeriod();
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
*/
|
||||
}
|
@ -26,8 +26,6 @@ public class TunnelHistory {
|
||||
private volatile long _lastFailed;
|
||||
private RateStat _rejectRate;
|
||||
private RateStat _failRate;
|
||||
private RateStat _processSuccessRate;
|
||||
private RateStat _processFailureRate;
|
||||
private String _statGroup;
|
||||
|
||||
/** probabalistic tunnel rejection due to a flood of requests */
|
||||
@ -47,14 +45,10 @@ public class TunnelHistory {
|
||||
}
|
||||
|
||||
private void createRates(String statGroup) {
|
||||
_rejectRate = new RateStat("tunnelHistory.rejectRate", "How often does this peer reject a tunnel request?", statGroup, new long[] { 60*1000l, 10*60*1000l, 30*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_failRate = new RateStat("tunnelHistory.failRate", "How often do tunnels this peer accepts fail?", statGroup, new long[] { 60*1000l, 10*60*1000l, 30*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_processSuccessRate = new RateStat("tunnelHistory.processSuccessRate", "How many messages does a tunnel process?", statGroup, new long[] { 5*60*1000l, 10*60*1000l, 30*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_processFailureRate = new RateStat("tunnelHistory.processfailureRate", "How many messages does a tunnel fail?", statGroup, new long[] { 5*60*1000l, 10*60*1000l, 30*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_rejectRate = new RateStat("tunnelHistory.rejectRate", "How often does this peer reject a tunnel request?", statGroup, new long[] { 10*60*1000l, 30*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_failRate = new RateStat("tunnelHistory.failRate", "How often do tunnels this peer accepts fail?", statGroup, new long[] { 10*60*1000l, 30*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_rejectRate.setStatLog(_context.statManager().getStatLog());
|
||||
_failRate.setStatLog(_context.statManager().getStatLog());
|
||||
_processSuccessRate.setStatLog(_context.statManager().getStatLog());
|
||||
_processFailureRate.setStatLog(_context.statManager().getStatLog());
|
||||
}
|
||||
|
||||
/** total tunnels the peer has agreed to participate in */
|
||||
@ -77,10 +71,7 @@ public class TunnelHistory {
|
||||
public long getLastFailed() { return _lastFailed; }
|
||||
|
||||
public void incrementProcessed(int processedSuccessfully, int failedProcessing) {
|
||||
if (processedSuccessfully > 0)
|
||||
_processSuccessRate.addData(processedSuccessfully, 0);
|
||||
if (failedProcessing > 0)
|
||||
_processFailureRate.addData(failedProcessing, 0);
|
||||
// old strict speed calculator
|
||||
}
|
||||
|
||||
public void incrementAgreedTo() {
|
||||
@ -129,16 +120,12 @@ public class TunnelHistory {
|
||||
|
||||
public RateStat getRejectionRate() { return _rejectRate; }
|
||||
public RateStat getFailedRate() { return _failRate; }
|
||||
public RateStat getProcessSuccessRate() { return _processSuccessRate; }
|
||||
public RateStat getProcessFailureRate() { return _processFailureRate; }
|
||||
|
||||
public void coalesceStats() {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Coallescing stats");
|
||||
_rejectRate.coalesceStats();
|
||||
_failRate.coalesceStats();
|
||||
_processFailureRate.coalesceStats();
|
||||
_processSuccessRate.coalesceStats();
|
||||
}
|
||||
|
||||
private final static String NL = System.getProperty("line.separator");
|
||||
@ -161,8 +148,6 @@ public class TunnelHistory {
|
||||
out.write(buf.toString().getBytes());
|
||||
_rejectRate.store(out, "tunnelHistory.rejectRate");
|
||||
_failRate.store(out, "tunnelHistory.failRate");
|
||||
_processSuccessRate.store(out, "tunnelHistory.processSuccessRate");
|
||||
_processFailureRate.store(out, "tunnelHistory.processFailureRate");
|
||||
}
|
||||
|
||||
private void add(StringBuffer buf, String name, long val, String description) {
|
||||
@ -187,12 +172,6 @@ public class TunnelHistory {
|
||||
_failRate.load(props, "tunnelHistory.failRate", true);
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Loading tunnelHistory.failRate");
|
||||
_processFailureRate.load(props, "tunnelHistory.processFailureRate", true);
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Loading tunnelHistory.processFailureRate");
|
||||
_processSuccessRate.load(props, "tunnelHistory.processSuccessRate", true);
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Loading tunnelHistory.processSuccessRate");
|
||||
} catch (IllegalArgumentException iae) {
|
||||
_log.warn("TunnelHistory rates are corrupt, resetting", iae);
|
||||
createRates(_statGroup);
|
||||
|
@ -133,6 +133,10 @@ public class CommSystemFacadeImpl extends CommSystemFacade {
|
||||
return _manager.wasUnreachable(dest);
|
||||
}
|
||||
|
||||
public byte[] getIP(Hash dest) {
|
||||
return _manager.getIP(dest);
|
||||
}
|
||||
|
||||
public List getMostRecentErrorMessages() {
|
||||
return _manager.getMostRecentErrorMessages();
|
||||
}
|
||||
|
@ -33,11 +33,11 @@ public class FIFOBandwidthRefiller implements Runnable {
|
||||
public static final String PROP_OUTBOUND_BANDWIDTH_PEAK = "i2np.bandwidth.outboundBurstKBytes";
|
||||
//public static final String PROP_REPLENISH_FREQUENCY = "i2np.bandwidth.replenishFrequencyMs";
|
||||
|
||||
// no longer allow unlimited bandwidth - the user must specify a value, and if they do not, it is 32/16KBps
|
||||
public static final int DEFAULT_INBOUND_BANDWIDTH = 48;
|
||||
public static final int DEFAULT_OUTBOUND_BANDWIDTH = 24;
|
||||
public static final int DEFAULT_INBOUND_BURST_BANDWIDTH = 64;
|
||||
public static final int DEFAULT_OUTBOUND_BURST_BANDWIDTH = 32;
|
||||
// no longer allow unlimited bandwidth - the user must specify a value, else use defaults below (KBps)
|
||||
public static final int DEFAULT_INBOUND_BANDWIDTH = 64;
|
||||
public static final int DEFAULT_OUTBOUND_BANDWIDTH = 32;
|
||||
public static final int DEFAULT_INBOUND_BURST_BANDWIDTH = 80;
|
||||
public static final int DEFAULT_OUTBOUND_BURST_BANDWIDTH = 40;
|
||||
|
||||
public static final int DEFAULT_BURST_SECONDS = 60;
|
||||
|
||||
|
@ -1,9 +1,9 @@
|
||||
package net.i2p.router.transport;
|
||||
/*
|
||||
* free (adj.): unencumbered; not under the control of others
|
||||
* Written by jrandom in 2003 and released into the public domain
|
||||
* with no warranty of any kind, either expressed or implied.
|
||||
* It probably won't make your computer catch on fire, or eat
|
||||
* Written by jrandom in 2003 and released into the public domain
|
||||
* with no warranty of any kind, either expressed or implied.
|
||||
* It probably won't make your computer catch on fire, or eat
|
||||
* your children, but it might. Use at your own risk.
|
||||
*
|
||||
*/
|
||||
@ -14,12 +14,12 @@ import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Date;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.Vector;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.RouterAddress;
|
||||
@ -34,6 +34,7 @@ import net.i2p.router.OutNetMessage;
|
||||
import net.i2p.router.Router;
|
||||
import net.i2p.router.RouterContext;
|
||||
import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseFacade;
|
||||
import net.i2p.util.ConcurrentHashSet;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
/**
|
||||
@ -44,11 +45,13 @@ public abstract class TransportImpl implements Transport {
|
||||
private Log _log;
|
||||
private TransportEventListener _listener;
|
||||
private RouterAddress _currentAddress;
|
||||
private List _sendPool;
|
||||
private final List _sendPool;
|
||||
protected RouterContext _context;
|
||||
/** map from routerIdentHash to timestamp (Long) that the peer was last unreachable */
|
||||
private Map _unreachableEntries;
|
||||
private Set _wasUnreachableEntries;
|
||||
private final Map<Hash, Long> _unreachableEntries;
|
||||
private Set<Hash> _wasUnreachableEntries;
|
||||
/** global router ident -> IP */
|
||||
private static Map<Hash, byte[]> _IPMap = new ConcurrentHashMap(128);
|
||||
|
||||
/**
|
||||
* Initialize the new transport
|
||||
@ -57,7 +60,7 @@ public abstract class TransportImpl implements Transport {
|
||||
public TransportImpl(RouterContext context) {
|
||||
_context = context;
|
||||
_log = _context.logManager().getLog(TransportImpl.class);
|
||||
|
||||
|
||||
_context.statManager().createRateStat("transport.sendMessageFailureLifetime", "How long the lifetime of messages that fail are?", "Transport", new long[] { 60*1000l, 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("transport.sendMessageSize", "How large are the messages sent?", "Transport", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("transport.receiveMessageSize", "How large are the messages received?", "Transport", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
@ -67,10 +70,10 @@ public abstract class TransportImpl implements Transport {
|
||||
_context.statManager().createRateStat("transport.expiredOnQueueLifetime", "How long a message that expires on our outbound queue is processed", "Transport", new long[] { 60*1000l, 10*60*1000l, 60*60*1000l, 24*60*60*1000l } );
|
||||
_sendPool = new ArrayList(16);
|
||||
_unreachableEntries = new HashMap(16);
|
||||
_wasUnreachableEntries = new HashSet(16);
|
||||
_wasUnreachableEntries = new ConcurrentHashSet(16);
|
||||
_currentAddress = null;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* How many peers can we talk to right now?
|
||||
*
|
||||
@ -107,18 +110,18 @@ public abstract class TransportImpl implements Transport {
|
||||
* Can we initiate or accept a connection to another peer, saving some margin
|
||||
*/
|
||||
public boolean haveCapacity() { return true; }
|
||||
|
||||
|
||||
/**
|
||||
* Return our peer clock skews on a transport.
|
||||
* Vector composed of Long, each element representing a peer skew in seconds.
|
||||
* Dummy version. Transports override it.
|
||||
*/
|
||||
public Vector getClockSkews() { return new Vector(); }
|
||||
|
||||
|
||||
public List getMostRecentErrorMessages() { return Collections.EMPTY_LIST; }
|
||||
/**
|
||||
* Nonblocking call to pull the next outbound message
|
||||
* off the queue.
|
||||
* off the queue.
|
||||
*
|
||||
* @return the next message or null if none are available
|
||||
*/
|
||||
@ -131,7 +134,7 @@ public abstract class TransportImpl implements Transport {
|
||||
msg.beginSend();
|
||||
return msg;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* The transport is done sending this message
|
||||
*
|
||||
@ -163,7 +166,7 @@ public abstract class TransportImpl implements Transport {
|
||||
}
|
||||
/**
|
||||
* The transport is done sending this message. This is the method that actually
|
||||
* does all of the cleanup - firing off jobs, requeueing, updating stats, etc.
|
||||
* does all of the cleanup - firing off jobs, requeueing, updating stats, etc.
|
||||
*
|
||||
* @param msg message in question
|
||||
* @param sendSuccessful true if the peer received it
|
||||
@ -176,64 +179,64 @@ public abstract class TransportImpl implements Transport {
|
||||
msg.timestamp("afterSend(successful)");
|
||||
else
|
||||
msg.timestamp("afterSend(failed)");
|
||||
|
||||
|
||||
if (!sendSuccessful)
|
||||
msg.transportFailed(getStyle());
|
||||
|
||||
if (msToSend > 1000) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("afterSend slow: [success=" + sendSuccessful + "] " + msg.getMessageSize() + "byte "
|
||||
+ msg.getMessageType() + " " + msg.getMessageId() + " to "
|
||||
+ msg.getTarget().getIdentity().calculateHash().toBase64().substring(0,6) + " took " + msToSend
|
||||
_log.warn("afterSend slow: [success=" + sendSuccessful + "] " + msg.getMessageSize() + "byte "
|
||||
+ msg.getMessageType() + " " + msg.getMessageId() + " to "
|
||||
+ msg.getTarget().getIdentity().calculateHash().toBase64().substring(0,6) + " took " + msToSend
|
||||
+ "/" + msg.getTransmissionTime());
|
||||
}
|
||||
//if (true)
|
||||
//if (true)
|
||||
// _log.error("(not error) I2NP message sent? " + sendSuccessful + " " + msg.getMessageId() + " after " + msToSend + "/" + msg.getTransmissionTime());
|
||||
|
||||
|
||||
long lifetime = msg.getLifetime();
|
||||
if (lifetime > 3000) {
|
||||
int level = Log.WARN;
|
||||
if (!sendSuccessful)
|
||||
level = Log.INFO;
|
||||
if (_log.shouldLog(level))
|
||||
_log.log(level, "afterSend slow (" + lifetime + "/" + msToSend + "/" + msg.getTransmissionTime() + "): [success=" + sendSuccessful + "]" + msg.getMessageSize() + "byte "
|
||||
+ msg.getMessageType() + " " + msg.getMessageId() + " from " + _context.routerHash().toBase64().substring(0,6)
|
||||
_log.log(level, "afterSend slow (" + lifetime + "/" + msToSend + "/" + msg.getTransmissionTime() + "): [success=" + sendSuccessful + "]" + msg.getMessageSize() + "byte "
|
||||
+ msg.getMessageType() + " " + msg.getMessageId() + " from " + _context.routerHash().toBase64().substring(0,6)
|
||||
+ " to " + msg.getTarget().getIdentity().calculateHash().toBase64().substring(0,6) + ": " + msg.toString());
|
||||
} else {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("afterSend: [success=" + sendSuccessful + "]" + msg.getMessageSize() + "byte "
|
||||
+ msg.getMessageType() + " " + msg.getMessageId() + " from " + _context.routerHash().toBase64().substring(0,6)
|
||||
_log.info("afterSend: [success=" + sendSuccessful + "]" + msg.getMessageSize() + "byte "
|
||||
+ msg.getMessageType() + " " + msg.getMessageId() + " from " + _context.routerHash().toBase64().substring(0,6)
|
||||
+ " to " + msg.getTarget().getIdentity().calculateHash().toBase64().substring(0,6) + "\n" + msg.toString());
|
||||
}
|
||||
|
||||
if (sendSuccessful) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Send message " + msg.getMessageType() + " to "
|
||||
+ msg.getTarget().getIdentity().getHash().toBase64() + " with transport "
|
||||
_log.debug("Send message " + msg.getMessageType() + " to "
|
||||
+ msg.getTarget().getIdentity().getHash().toBase64() + " with transport "
|
||||
+ getStyle() + " successfully");
|
||||
Job j = msg.getOnSendJob();
|
||||
if (j != null)
|
||||
if (j != null)
|
||||
_context.jobQueue().addJob(j);
|
||||
log = true;
|
||||
msg.discardData();
|
||||
} else {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Failed to send message " + msg.getMessageType()
|
||||
+ " to " + msg.getTarget().getIdentity().getHash().toBase64()
|
||||
_log.info("Failed to send message " + msg.getMessageType()
|
||||
+ " to " + msg.getTarget().getIdentity().getHash().toBase64()
|
||||
+ " with transport " + getStyle() + " (details: " + msg + ")");
|
||||
if (msg.getExpiration() < _context.clock().now())
|
||||
_context.statManager().addRateData("transport.expiredOnQueueLifetime", lifetime, lifetime);
|
||||
|
||||
|
||||
if (allowRequeue) {
|
||||
if ( ( (msg.getExpiration() <= 0) || (msg.getExpiration() > _context.clock().now()) )
|
||||
if ( ( (msg.getExpiration() <= 0) || (msg.getExpiration() > _context.clock().now()) )
|
||||
&& (msg.getMessage() != null) ) {
|
||||
// this may not be the last transport available - keep going
|
||||
_context.outNetMessagePool().add(msg);
|
||||
// don't discard the data yet!
|
||||
} else {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("No more time left (" + new Date(msg.getExpiration())
|
||||
+ ", expiring without sending successfully the "
|
||||
_log.info("No more time left (" + new Date(msg.getExpiration())
|
||||
+ ", expiring without sending successfully the "
|
||||
+ msg.getMessageType());
|
||||
if (msg.getOnFailedSendJob() != null)
|
||||
_context.jobQueue().addJob(msg.getOnFailedSendJob());
|
||||
@ -247,8 +250,8 @@ public abstract class TransportImpl implements Transport {
|
||||
} else {
|
||||
MessageSelector selector = msg.getReplySelector();
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Failed and no requeue allowed for a "
|
||||
+ msg.getMessageSize() + " byte "
|
||||
_log.info("Failed and no requeue allowed for a "
|
||||
+ msg.getMessageSize() + " byte "
|
||||
+ msg.getMessageType() + " message with selector " + selector, new Exception("fail cause"));
|
||||
if (msg.getOnFailedSendJob() != null)
|
||||
_context.jobQueue().addJob(msg.getOnFailedSendJob());
|
||||
@ -265,9 +268,9 @@ public abstract class TransportImpl implements Transport {
|
||||
String type = msg.getMessageType();
|
||||
// the udp transport logs some further details
|
||||
/*
|
||||
_context.messageHistory().sendMessage(type, msg.getMessageId(),
|
||||
_context.messageHistory().sendMessage(type, msg.getMessageId(),
|
||||
msg.getExpiration(),
|
||||
msg.getTarget().getIdentity().getHash(),
|
||||
msg.getTarget().getIdentity().getHash(),
|
||||
sendSuccessful);
|
||||
*/
|
||||
}
|
||||
@ -277,23 +280,23 @@ public abstract class TransportImpl implements Transport {
|
||||
long allTime = now - msg.getCreated();
|
||||
if (allTime > 5*1000) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Took too long from preperation to afterSend(ok? " + sendSuccessful
|
||||
+ "): " + allTime + "ms/" + sendTime + "ms after failing on: "
|
||||
_log.info("Took too long from preperation to afterSend(ok? " + sendSuccessful
|
||||
+ "): " + allTime + "ms/" + sendTime + "ms after failing on: "
|
||||
+ msg.getFailedTransports() + " and succeeding on " + getStyle());
|
||||
if ( (allTime > 60*1000) && (sendSuccessful) ) {
|
||||
// WTF!!@#
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("WTF, more than a minute slow? " + msg.getMessageType()
|
||||
+ " of id " + msg.getMessageId() + " (send begin on "
|
||||
+ new Date(msg.getSendBegin()) + " / created on "
|
||||
_log.warn("WTF, more than a minute slow? " + msg.getMessageType()
|
||||
+ " of id " + msg.getMessageId() + " (send begin on "
|
||||
+ new Date(msg.getSendBegin()) + " / created on "
|
||||
+ new Date(msg.getCreated()) + "): " + msg, msg.getCreatedBy());
|
||||
_context.messageHistory().messageProcessingError(msg.getMessageId(),
|
||||
msg.getMessageType(),
|
||||
_context.messageHistory().messageProcessingError(msg.getMessageId(),
|
||||
msg.getMessageType(),
|
||||
"Took too long to send [" + allTime + "ms]");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
if (sendSuccessful) {
|
||||
_context.statManager().addRateData("transport.sendProcessingTime", lifetime, lifetime);
|
||||
_context.profileManager().messageSent(msg.getTarget().getIdentity().getHash(), getStyle(), sendTime, msg.getMessageSize());
|
||||
@ -303,7 +306,7 @@ public abstract class TransportImpl implements Transport {
|
||||
_context.statManager().addRateData("transport.sendMessageFailureLifetime", lifetime, lifetime);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Asynchronously send the message as requested in the message and, if the
|
||||
* send is successful, queue up any msg.getOnSendJob job, and register it
|
||||
@ -319,14 +322,14 @@ public abstract class TransportImpl implements Transport {
|
||||
}
|
||||
boolean duplicate = false;
|
||||
synchronized (_sendPool) {
|
||||
if (_sendPool.contains(msg))
|
||||
if (_sendPool.contains(msg))
|
||||
duplicate = true;
|
||||
else
|
||||
_sendPool.add(msg);
|
||||
}
|
||||
if (duplicate) {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Message already is in the queue? wtf. msg = " + msg,
|
||||
_log.error("Message already is in the queue? wtf. msg = " + msg,
|
||||
new Exception("wtf, requeued?"));
|
||||
}
|
||||
|
||||
@ -342,15 +345,15 @@ public abstract class TransportImpl implements Transport {
|
||||
* and it should not block
|
||||
*/
|
||||
protected abstract void outboundMessageReady();
|
||||
|
||||
|
||||
/**
|
||||
* Message received from the I2NPMessageReader - send it to the listener
|
||||
*
|
||||
*/
|
||||
public void messageReceived(I2NPMessage inMsg, RouterIdentity remoteIdent, Hash remoteIdentHash, long msToReceive, int bytesReceived) {
|
||||
//if (true)
|
||||
//if (true)
|
||||
// _log.error("(not error) I2NP message received: " + inMsg.getUniqueId() + " after " + msToReceive);
|
||||
|
||||
|
||||
int level = Log.INFO;
|
||||
if (msToReceive > 5000)
|
||||
level = Log.WARN;
|
||||
@ -381,7 +384,7 @@ public abstract class TransportImpl implements Transport {
|
||||
_context.profileManager().messageReceived(remoteIdentHash, getStyle(), msToReceive, bytesReceived);
|
||||
_context.statManager().addRateData("transport.receiveMessageSize", bytesReceived, msToReceive);
|
||||
}
|
||||
|
||||
|
||||
_context.statManager().addRateData("transport.receiveMessageTime", msToReceive, msToReceive);
|
||||
if (msToReceive > 1000) {
|
||||
_context.statManager().addRateData("transport.receiveMessageTimeSlow", msToReceive, msToReceive);
|
||||
@ -390,7 +393,7 @@ public abstract class TransportImpl implements Transport {
|
||||
//// this functionality is built into the InNetMessagePool
|
||||
//String type = inMsg.getClass().getName();
|
||||
//MessageHistory.getInstance().receiveMessage(type, inMsg.getUniqueId(), inMsg.getMessageExpiration(), remoteIdentHash, true);
|
||||
|
||||
|
||||
if (_listener != null) {
|
||||
_listener.messageReceived(inMsg, remoteIdent, remoteIdentHash);
|
||||
} else {
|
||||
@ -398,9 +401,9 @@ public abstract class TransportImpl implements Transport {
|
||||
_log.error("WTF! Null listener! this = " + toString(), new Exception("Null listener"));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/** What addresses are we currently listening to? */
|
||||
public RouterAddress getCurrentAddress() {
|
||||
public RouterAddress getCurrentAddress() {
|
||||
return _currentAddress;
|
||||
}
|
||||
/**
|
||||
@ -415,19 +418,19 @@ public abstract class TransportImpl implements Transport {
|
||||
if ("SSU".equals(getStyle()))
|
||||
_context.commSystem().notifyReplaceAddress(address);
|
||||
}
|
||||
|
||||
|
||||
/** Who to notify on message availability */
|
||||
public void setListener(TransportEventListener listener) { _listener = listener; }
|
||||
/** Make this stuff pretty (only used in the old console) */
|
||||
public void renderStatusHTML(Writer out) throws IOException {}
|
||||
public void renderStatusHTML(Writer out, String urlBase, int sortFlags) throws IOException { renderStatusHTML(out); }
|
||||
|
||||
|
||||
public RouterContext getContext() { return _context; }
|
||||
public short getReachabilityStatus() { return CommSystemFacade.STATUS_UNKNOWN; }
|
||||
public void recheckReachability() {}
|
||||
public boolean isBacklogged(Hash dest) { return false; }
|
||||
public boolean isEstablished(Hash dest) { return false; }
|
||||
|
||||
|
||||
private static final long UNREACHABLE_PERIOD = 5*60*1000;
|
||||
public boolean isUnreachable(Hash peer) {
|
||||
long now = _context.clock().now();
|
||||
@ -485,10 +488,8 @@ public abstract class TransportImpl implements Transport {
|
||||
* This is NOT reset if the peer contacts us and it is never expired.
|
||||
*/
|
||||
public boolean wasUnreachable(Hash peer) {
|
||||
synchronized (_wasUnreachableEntries) {
|
||||
if (_wasUnreachableEntries.contains(peer))
|
||||
return true;
|
||||
}
|
||||
if (_wasUnreachableEntries.contains(peer))
|
||||
return true;
|
||||
RouterInfo ri = _context.netDb().lookupRouterInfoLocally(peer);
|
||||
if (ri == null)
|
||||
return false;
|
||||
@ -498,20 +499,26 @@ public abstract class TransportImpl implements Transport {
|
||||
* Maintain the WasUnreachable list
|
||||
*/
|
||||
public void markWasUnreachable(Hash peer, boolean yes) {
|
||||
synchronized (_wasUnreachableEntries) {
|
||||
if (yes)
|
||||
_wasUnreachableEntries.add(peer);
|
||||
else
|
||||
_wasUnreachableEntries.remove(peer);
|
||||
}
|
||||
if (yes)
|
||||
_wasUnreachableEntries.add(peer);
|
||||
else
|
||||
_wasUnreachableEntries.remove(peer);
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn(this.getStyle() + " setting wasUnreachable to " + yes + " for " + peer);
|
||||
}
|
||||
|
||||
public /* static */ void setIP(Hash peer, byte[] ip) {
|
||||
_IPMap.put(peer, ip);
|
||||
}
|
||||
|
||||
public static byte[] getIP(Hash peer) {
|
||||
return _IPMap.get(peer);
|
||||
}
|
||||
|
||||
public static boolean isPubliclyRoutable(byte addr[]) {
|
||||
if (addr.length == 4) {
|
||||
if ((addr[0]&0xFF) == 127) return false;
|
||||
if ((addr[0]&0xFF) == 10) return false;
|
||||
if ((addr[0]&0xFF) == 10) return false;
|
||||
if ( ((addr[0]&0xFF) == 172) && ((addr[1]&0xFF) >= 16) && ((addr[1]&0xFF) <= 31) ) return false;
|
||||
if ( ((addr[0]&0xFF) == 192) && ((addr[1]&0xFF) == 168) ) return false;
|
||||
if ((addr[0]&0xFF) >= 224) return false; // no multicast
|
||||
|
@ -33,7 +33,7 @@ import net.i2p.util.Log;
|
||||
|
||||
public class TransportManager implements TransportEventListener {
|
||||
private Log _log;
|
||||
private List _transports;
|
||||
private List<Transport> _transports;
|
||||
private RouterContext _context;
|
||||
private UPnPManager _upnpManager;
|
||||
|
||||
@ -233,6 +233,19 @@ public class TransportManager implements TransportEventListener {
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* IP of the peer from the last connection (in or out, any transport).
|
||||
* This may be different from that advertised in the netDb,
|
||||
* as the peer may be hidden, or connect from a different IP, or
|
||||
* change his netDb later, in an attempt to avoid restrictions.
|
||||
*
|
||||
* For blocking purposes, etc. it's worth checking both
|
||||
* the netDb addresses and this address.
|
||||
*/
|
||||
public byte[] getIP(Hash dest) {
|
||||
return TransportImpl.getIP(dest);
|
||||
}
|
||||
|
||||
Map getAddresses() {
|
||||
Map rv = new HashMap(_transports.size());
|
||||
for (int i = 0; i < _transports.size(); i++) {
|
||||
|
@ -45,7 +45,7 @@ import net.i2p.util.Log;
|
||||
public class EstablishState {
|
||||
private RouterContext _context;
|
||||
private Log _log;
|
||||
|
||||
|
||||
// bob receives (and alice sends)
|
||||
private byte _X[];
|
||||
private byte _hX_xor_bobIdentHash[];
|
||||
@ -60,28 +60,28 @@ public class EstablishState {
|
||||
private transient long _tsB;
|
||||
private transient long _tsA;
|
||||
private transient byte _e_bobSig[];
|
||||
|
||||
|
||||
/** previously received encrypted block (or the IV) */
|
||||
private byte _prevEncrypted[];
|
||||
/** current encrypted block we are reading */
|
||||
private byte _curEncrypted[];
|
||||
/**
|
||||
* next index in _curEncrypted to write to (equals _curEncrypted length if the block is
|
||||
* next index in _curEncrypted to write to (equals _curEncrypted length if the block is
|
||||
* ready to decrypt)
|
||||
*/
|
||||
private int _curEncryptedOffset;
|
||||
/** decryption buffer */
|
||||
private byte _curDecrypted[];
|
||||
|
||||
|
||||
/** bytes received so far */
|
||||
private int _received;
|
||||
/** bytes sent so far */
|
||||
private int _sent;
|
||||
|
||||
|
||||
private byte _extra[];
|
||||
|
||||
|
||||
private DHSessionKeyBuilder _dh;
|
||||
|
||||
|
||||
private NTCPTransport _transport;
|
||||
private NTCPConnection _con;
|
||||
private boolean _corrupt;
|
||||
@ -92,7 +92,7 @@ public class EstablishState {
|
||||
private boolean _verified;
|
||||
private boolean _confirmWritten;
|
||||
private boolean _failedBySkew;
|
||||
|
||||
|
||||
public EstablishState(RouterContext ctx, NTCPTransport transport, NTCPConnection con) {
|
||||
_context = ctx;
|
||||
_log = ctx.logManager().getLog(getClass());
|
||||
@ -113,15 +113,15 @@ public class EstablishState {
|
||||
byte hx[] = ctx.sha().calculateHash(_X).getData();
|
||||
DataHelper.xor(hx, 0, con.getRemotePeer().calculateHash().getData(), 0, _hX_xor_bobIdentHash, 0, hx.length);
|
||||
}
|
||||
|
||||
|
||||
_prevEncrypted = new byte[16];
|
||||
_curEncrypted = new byte[16];
|
||||
_curEncryptedOffset = 0;
|
||||
_curDecrypted = new byte[16];
|
||||
|
||||
|
||||
_received = 0;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* parse the contents of the buffer as part of the handshake. if the
|
||||
* handshake is completed and there is more data remaining, the buffer is
|
||||
@ -133,7 +133,7 @@ public class EstablishState {
|
||||
throw new IllegalStateException(prefix() + "received after completion [corrupt?" + _corrupt + " verified? " + _verified + "] on " + _con);
|
||||
if (!src.hasRemaining())
|
||||
return; // nothing to receive
|
||||
|
||||
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug(prefix()+"receive " + src);
|
||||
if (_con.isInbound())
|
||||
@ -141,15 +141,15 @@ public class EstablishState {
|
||||
else
|
||||
receiveOutbound(src);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* we have written all of the data required to confirm the connection
|
||||
* establishment
|
||||
*/
|
||||
public boolean confirmWritten() { return _confirmWritten; }
|
||||
|
||||
|
||||
public boolean getFailedBySkew() { return _failedBySkew; }
|
||||
|
||||
|
||||
/** we are Bob, so receive these bytes as part of an inbound connection */
|
||||
private void receiveInbound(ByteBuffer src) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
@ -178,7 +178,7 @@ public class EstablishState {
|
||||
if (_dh.getSessionKey() == null) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug(prefix()+"Enough data for a DH received");
|
||||
|
||||
|
||||
// first verify that Alice knows who she is trying to talk with and that the X
|
||||
// isn't corrupt
|
||||
Hash hX = _context.sha().calculateHash(_X);
|
||||
@ -201,7 +201,7 @@ public class EstablishState {
|
||||
System.arraycopy(realXor, 16, _prevEncrypted, 0, _prevEncrypted.length);
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug(prefix()+"DH session key calculated (" + _dh.getSessionKey().toBase64() + ")");
|
||||
|
||||
|
||||
// now prepare our response: Y+E(H(X+Y)+tsB+padding, sk, Y[239:255])
|
||||
_Y = _dh.getMyPublicValueBytes();
|
||||
byte xy[] = new byte[_X.length+_Y.length];
|
||||
@ -233,7 +233,7 @@ public class EstablishState {
|
||||
byte write[] = new byte[_Y.length + _e_hXY_tsB.length];
|
||||
System.arraycopy(_Y, 0, write, 0, _Y.length);
|
||||
System.arraycopy(_e_hXY_tsB, 0, write, _Y.length, _e_hXY_tsB.length);
|
||||
|
||||
|
||||
// ok, now that is prepared, we want to actually send it, so make sure we are up for writing
|
||||
_transport.getPumper().wantsWrite(_con, write);
|
||||
if (!src.hasRemaining()) return;
|
||||
@ -243,7 +243,7 @@ public class EstablishState {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// ok, we are onto the encrypted area
|
||||
while (src.hasRemaining() && !_corrupt) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
@ -256,12 +256,12 @@ public class EstablishState {
|
||||
_context.aes().decrypt(_curEncrypted, 0, _curDecrypted, 0, _dh.getSessionKey(), _prevEncrypted, 0, _curEncrypted.length);
|
||||
//if (_log.shouldLog(Log.DEBUG))
|
||||
// _log.debug(prefix()+"full block read and decrypted: " + Base64.encode(_curDecrypted));
|
||||
|
||||
|
||||
byte swap[] = new byte[16];
|
||||
_prevEncrypted = _curEncrypted;
|
||||
_curEncrypted = swap;
|
||||
_curEncryptedOffset = 0;
|
||||
|
||||
|
||||
if (_aliceIdentSize <= 0) { // we are on the first decrypted block
|
||||
_aliceIdentSize = (int)DataHelper.fromLong(_curDecrypted, 0, 2);
|
||||
_sz_aliceIdent_tsA_padding_aliceSigSize = 2 + _aliceIdentSize + 4 + Signature.SIGNATURE_BYTES;
|
||||
@ -292,8 +292,8 @@ public class EstablishState {
|
||||
if (!_corrupt && _verified && src.hasRemaining())
|
||||
prepareExtra(src);
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug(prefix()+"verifying size (sz=" + _sz_aliceIdent_tsA_padding_aliceSig.size()
|
||||
+ " expected=" + _sz_aliceIdent_tsA_padding_aliceSigSize
|
||||
_log.debug(prefix()+"verifying size (sz=" + _sz_aliceIdent_tsA_padding_aliceSig.size()
|
||||
+ " expected=" + _sz_aliceIdent_tsA_padding_aliceSigSize
|
||||
+ " corrupt=" + _corrupt
|
||||
+ " verified=" + _verified + " extra=" + (_extra != null ? _extra.length : 0) + ")");
|
||||
return;
|
||||
@ -310,11 +310,11 @@ public class EstablishState {
|
||||
_log.debug(prefix()+"done with the data, not yet complete or corrupt");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/** we are Alice, so receive these bytes as part of an outbound connection */
|
||||
private void receiveOutbound(ByteBuffer src) {
|
||||
if (_log.shouldLog(Log.DEBUG)) _log.debug(prefix()+"Receive outbound " + src + " received=" + _received);
|
||||
|
||||
|
||||
// recv Y+E(H(X+Y)+tsB, sk, Y[239:255])
|
||||
while (_received < _Y.length && src.hasRemaining()) {
|
||||
byte c = src.get();
|
||||
@ -361,7 +361,7 @@ public class EstablishState {
|
||||
_tsA = _context.clock().now()/1000; // our (Alice's) timestamp in seconds
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug(prefix()+"h(X+Y) is correct, tsA-tsB=" + (_tsA-_tsB));
|
||||
|
||||
|
||||
// the skew is not authenticated yet, but it is certainly fatal to
|
||||
// the establishment, so fail hard if appropriate
|
||||
long diff = 1000*Math.abs(_tsA-_tsB);
|
||||
@ -374,7 +374,7 @@ public class EstablishState {
|
||||
} else if (_log.shouldLog(Log.DEBUG)) {
|
||||
_log.debug(prefix()+"Clock skew: " + diff + " ms");
|
||||
}
|
||||
|
||||
|
||||
// now prepare and send our response
|
||||
// send E(#+Alice.identity+tsA+padding+S(X+Y+Bob.identHash+tsA+tsB), sk, hX_xor_Bob.identHash[16:31])
|
||||
int sigSize = _X.length+_Y.length+Hash.HASH_LENGTH+4+4;//+12;
|
||||
@ -390,11 +390,11 @@ public class EstablishState {
|
||||
//_context.random().nextBytes(sigPad);
|
||||
//System.arraycopy(sigPad, 0, preSign, _X.length+_Y.length+Hash.HASH_LENGTH+4+4, padSig);
|
||||
Signature sig = _context.dsa().sign(preSign, _context.keyManager().getSigningPrivateKey());
|
||||
|
||||
|
||||
//if (_log.shouldLog(Log.DEBUG)) {
|
||||
// _log.debug(prefix()+"signing " + Base64.encode(preSign));
|
||||
//}
|
||||
|
||||
|
||||
byte ident[] = _context.router().getRouterInfo().getIdentity().toByteArray();
|
||||
int min = 2+ident.length+4+Signature.SIGNATURE_BYTES;
|
||||
int rem = min % 16;
|
||||
@ -409,10 +409,10 @@ public class EstablishState {
|
||||
_context.random().nextBytes(pad);
|
||||
System.arraycopy(pad, 0, preEncrypt, 2+ident.length+4, padding);
|
||||
System.arraycopy(sig.getData(), 0, preEncrypt, 2+ident.length+4+padding, Signature.SIGNATURE_BYTES);
|
||||
|
||||
|
||||
_prevEncrypted = new byte[preEncrypt.length];
|
||||
_context.aes().encrypt(preEncrypt, 0, _prevEncrypted, 0, _dh.getSessionKey(), _hX_xor_bobIdentHash, _hX_xor_bobIdentHash.length-16, preEncrypt.length);
|
||||
|
||||
|
||||
if (_log.shouldLog(Log.DEBUG)) {
|
||||
//_log.debug(prefix() + "unencrypted response to Bob: " + Base64.encode(preEncrypt));
|
||||
//_log.debug(prefix() + "encrypted response to Bob: " + Base64.encode(_prevEncrypted));
|
||||
@ -423,7 +423,7 @@ public class EstablishState {
|
||||
}
|
||||
if (_received >= _Y.length + _e_hXY_tsB.length && src.hasRemaining()) {
|
||||
// we are receiving their confirmation
|
||||
|
||||
|
||||
// recv E(S(X+Y+Alice.identHash+tsA+tsB)+padding, sk, prev)
|
||||
int off = 0;
|
||||
if (_e_bobSig == null) {
|
||||
@ -439,7 +439,7 @@ public class EstablishState {
|
||||
if (_log.shouldLog(Log.DEBUG)) _log.debug(prefix()+"recv bobSig received=" + _received);
|
||||
_e_bobSig[off++] = src.get();
|
||||
_received++;
|
||||
|
||||
|
||||
if (off >= _e_bobSig.length) {
|
||||
//if (_log.shouldLog(Log.DEBUG))
|
||||
// _log.debug(prefix() + "received E(S(X+Y+Alice.identHash+tsA+tsB)+padding, sk, prev): " + Base64.encode(_e_bobSig));
|
||||
@ -449,7 +449,7 @@ public class EstablishState {
|
||||
byte bobSigData[] = new byte[Signature.SIGNATURE_BYTES];
|
||||
System.arraycopy(bobSig, 0, bobSigData, 0, Signature.SIGNATURE_BYTES);
|
||||
Signature sig = new Signature(bobSigData);
|
||||
|
||||
|
||||
byte toVerify[] = new byte[_X.length+_Y.length+Hash.HASH_LENGTH+4+4];
|
||||
int voff = 0;
|
||||
System.arraycopy(_X, 0, toVerify, voff, _X.length); voff += _X.length;
|
||||
@ -457,12 +457,11 @@ public class EstablishState {
|
||||
System.arraycopy(_context.routerHash().getData(), 0, toVerify, voff, Hash.HASH_LENGTH); voff += Hash.HASH_LENGTH;
|
||||
DataHelper.toLong(toVerify, voff, 4, _tsA); voff += 4;
|
||||
DataHelper.toLong(toVerify, voff, 4, _tsB); voff += 4;
|
||||
|
||||
|
||||
_verified = _context.dsa().verifySignature(sig, toVerify, _con.getRemotePeer().getSigningPublicKey());
|
||||
if (!_verified) {
|
||||
_context.statManager().addRateData("ntcp.invalidSignature", 1, 0);
|
||||
fail("Signature was invalid - attempt to spoof " + _con.getRemotePeer().calculateHash().toBase64() + "?");
|
||||
return;
|
||||
} else {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug(prefix() + "signature verified from Bob. done!");
|
||||
@ -472,18 +471,22 @@ public class EstablishState {
|
||||
byte nextReadIV[] = new byte[16];
|
||||
System.arraycopy(_e_bobSig, _e_bobSig.length-16, nextReadIV, 0, nextReadIV.length);
|
||||
_con.finishOutboundEstablishment(_dh.getSessionKey(), (_tsA-_tsB), nextWriteIV, nextReadIV); // skew in seconds
|
||||
return;
|
||||
// if socket gets closed this will be null - prevent NPE
|
||||
InetAddress ia = _con.getChannel().socket().getInetAddress();
|
||||
if (ia != null)
|
||||
_transport.setIP(_con.getRemotePeer().calculateHash(), ia.getAddress());
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/** did the handshake fail for some reason? */
|
||||
public boolean isCorrupt() { return _err != null; }
|
||||
/** @return is the handshake complete and valid? */
|
||||
public boolean isComplete() { return _verified; }
|
||||
|
||||
|
||||
/**
|
||||
* we are establishing an outbound connection, so prepare ourselves by
|
||||
* queueing up the write of the first part of the handshake
|
||||
@ -501,10 +504,10 @@ public class EstablishState {
|
||||
_log.debug(prefix()+"prepare outbound with received=" + _received);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* make sure the signatures are correct, and if they are, update the
|
||||
* NIOConnection with the session key / peer ident / clock skew / iv.
|
||||
* NIOConnection with the session key / peer ident / clock skew / iv.
|
||||
* The NIOConnection itself is responsible for registering with the
|
||||
* transport
|
||||
*/
|
||||
@ -513,10 +516,10 @@ public class EstablishState {
|
||||
byte b[] = _sz_aliceIdent_tsA_padding_aliceSig.toByteArray();
|
||||
//if (_log.shouldLog(Log.DEBUG))
|
||||
// _log.debug(prefix()+"decrypted sz(etc) data: " + Base64.encode(b));
|
||||
|
||||
|
||||
try {
|
||||
RouterIdentity alice = new RouterIdentity();
|
||||
int sz = (int)DataHelper.fromLong(b, 0, 2);
|
||||
int sz = (int)DataHelper.fromLong(b, 0, 2); // TO-DO: Hey zzz... Throws an NPE for me... see below, for my "quick fix", need to find out the real reason
|
||||
if ( (sz <= 0) || (sz > b.length-2-4-Signature.SIGNATURE_BYTES) ) {
|
||||
_context.statManager().addRateData("ntcp.invalidInboundSize", sz, 0);
|
||||
fail("size is invalid", new Exception("size is " + sz));
|
||||
@ -526,7 +529,7 @@ public class EstablishState {
|
||||
System.arraycopy(b, 2, aliceData, 0, sz);
|
||||
alice.fromByteArray(aliceData);
|
||||
long tsA = DataHelper.fromLong(b, 2+sz, 4);
|
||||
|
||||
|
||||
ByteArrayOutputStream baos = new ByteArrayOutputStream(768);
|
||||
baos.write(_X);
|
||||
baos.write(_Y);
|
||||
@ -534,7 +537,7 @@ public class EstablishState {
|
||||
baos.write(DataHelper.toLong(4, tsA));
|
||||
baos.write(DataHelper.toLong(4, _tsB));
|
||||
//baos.write(b, 2+sz+4, b.length-2-sz-4-Signature.SIGNATURE_BYTES);
|
||||
|
||||
|
||||
byte toVerify[] = baos.toByteArray();
|
||||
if (_log.shouldLog(Log.DEBUG)) {
|
||||
_log.debug(prefix()+"checking " + Base64.encode(toVerify, 0, 16));
|
||||
@ -546,18 +549,24 @@ public class EstablishState {
|
||||
Signature sig = new Signature(s);
|
||||
_verified = _context.dsa().verifySignature(sig, toVerify, alice.getSigningPublicKey());
|
||||
if (_verified) {
|
||||
// get inet-addr
|
||||
InetAddress addr = this._con.getChannel().socket().getInetAddress();
|
||||
byte[] ip = (addr == null) ? null : addr.getAddress();
|
||||
if (_context.shitlist().isShitlistedForever(alice.calculateHash())) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Dropping inbound connection from permanently shitlisted peer: " + alice.calculateHash().toBase64());
|
||||
// So next time we will not accept the con from this IP,
|
||||
// rather than doing the whole handshake
|
||||
_context.blocklist().add(_con.getChannel().socket().getInetAddress().getAddress());
|
||||
if(ip != null)
|
||||
_context.blocklist().add(ip);
|
||||
fail("Peer is shitlisted forever: " + alice.calculateHash().toBase64());
|
||||
return;
|
||||
}
|
||||
if(ip != null)
|
||||
_transport.setIP(alice.calculateHash(), ip);
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug(prefix() + "verification successful for " + _con);
|
||||
|
||||
|
||||
long diff = 1000*Math.abs(tsA-_tsB);
|
||||
if (diff >= Router.CLOCK_FUDGE_FACTOR) {
|
||||
_context.statManager().addRateData("ntcp.invalidInboundSkew", diff, 0);
|
||||
@ -588,9 +597,11 @@ public class EstablishState {
|
||||
} catch (DataFormatException dfe) {
|
||||
_context.statManager().addRateData("ntcp.invalidInboundDFE", 1, 0);
|
||||
fail("Error verifying peer", dfe);
|
||||
} catch(NullPointerException npe) {
|
||||
fail("Error verifying peer", npe); // TO-DO: zzz This is that quick-fix. -- Sponge
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private void sendInboundConfirm(RouterIdentity alice, long tsA) {
|
||||
// send Alice E(S(X+Y+Alice.identHash+tsA+tsB), sk, prev)
|
||||
byte toSign[] = new byte[256+256+32+4+4];
|
||||
@ -601,7 +612,7 @@ public class EstablishState {
|
||||
System.arraycopy(h.getData(), 0, toSign, off, 32); off += 32;
|
||||
DataHelper.toLong(toSign, off, 4, tsA); off += 4;
|
||||
DataHelper.toLong(toSign, off, 4, _tsB); off += 4;
|
||||
|
||||
|
||||
Signature sig = _context.dsa().sign(toSign, _context.keyManager().getSigningPrivateKey());
|
||||
byte preSig[] = new byte[Signature.SIGNATURE_BYTES+8];
|
||||
byte pad[] = new byte[8];
|
||||
@ -610,12 +621,12 @@ public class EstablishState {
|
||||
System.arraycopy(pad, 0, preSig, Signature.SIGNATURE_BYTES, pad.length);
|
||||
_e_bobSig = new byte[preSig.length];
|
||||
_context.aes().encrypt(preSig, 0, _e_bobSig, 0, _dh.getSessionKey(), _e_hXY_tsB, _e_hXY_tsB.length-16, _e_bobSig.length);
|
||||
|
||||
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug(prefix() + "Sending encrypted inbound confirmation");
|
||||
_transport.getPumper().wantsWrite(_con, _e_bobSig);
|
||||
}
|
||||
|
||||
|
||||
/** anything left over in the byte buffer after verification is extra */
|
||||
private void prepareExtra(ByteBuffer buf) {
|
||||
int remaining = buf.remaining();
|
||||
@ -627,13 +638,13 @@ public class EstablishState {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug(prefix() + "prepare extra " + remaining + " (total received: " + _received + ")");
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* if complete, this will contain any bytes received as part of the
|
||||
* handshake that were after the actual handshake. This may return null.
|
||||
*/
|
||||
public byte[] getExtraBytes() { return _extra; }
|
||||
|
||||
|
||||
private void fail(String reason) { fail(reason, null); }
|
||||
private void fail(String reason, Exception e) { fail(reason, e, false); }
|
||||
private void fail(String reason, Exception e, boolean bySkew) {
|
||||
@ -644,11 +655,12 @@ public class EstablishState {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn(prefix()+"Failed to establish: " + _err, e);
|
||||
}
|
||||
|
||||
|
||||
public String getError() { return _err; }
|
||||
public Exception getException() { return _e; }
|
||||
|
||||
|
||||
private String prefix() { return toString(); }
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer(64);
|
||||
buf.append("est").append(System.identityHashCode(this));
|
||||
@ -660,7 +672,7 @@ public class EstablishState {
|
||||
buf.append(": ");
|
||||
return buf.toString();
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* a check info connection will receive 256 bytes containing:
|
||||
* - 32 bytes of uninterpreted, ignored data
|
||||
@ -694,7 +706,7 @@ public class EstablishState {
|
||||
off += 4;
|
||||
long skewSeconds = (ctx.clock().now()/1000)-now;
|
||||
if (log.shouldLog(Log.INFO))
|
||||
log.info("Check info received: our IP: " + ourIP + " our port: " + port
|
||||
log.info("Check info received: our IP: " + ourIP + " our port: " + port
|
||||
+ " skew: " + skewSeconds + " s");
|
||||
} catch (UnknownHostException uhe) {
|
||||
// ipSize is invalid
|
||||
@ -708,7 +720,7 @@ public class EstablishState {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public static void checkHost(String args[]) {
|
||||
if (args.length != 3) {
|
||||
System.err.println("Usage: EstablishState ipOrHostname portNum peerHashBase64");
|
||||
@ -737,7 +749,7 @@ public class EstablishState {
|
||||
Hash h = ctx.sha().calculateHash(toSend, 32, toSend.length-32-32);
|
||||
DataHelper.xor(peer, 0, h.getData(), 0, toSend, toSend.length-32, peer.length);
|
||||
System.out.println("check hash: " + h.toBase64());
|
||||
|
||||
|
||||
out.write(toSend);
|
||||
out.flush();
|
||||
try { Thread.sleep(1000); } catch (InterruptedException ie) {}
|
||||
@ -746,7 +758,7 @@ public class EstablishState {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public static void main(String args[]) {
|
||||
if (args.length == 3) {
|
||||
checkHost(args);
|
||||
@ -771,7 +783,7 @@ public class EstablishState {
|
||||
out.write(hx_xor_bih);
|
||||
out.flush();
|
||||
// DONE SENDING X+(H(X) xor Bob.identHash)----------------------------->
|
||||
|
||||
|
||||
// NOW READ Y+E(H(X+Y)+tsB+padding, sk, Y[239:255])
|
||||
InputStream in = s.getInputStream();
|
||||
byte toRead[] = new byte[256+(32+4+12)];
|
||||
@ -799,9 +811,9 @@ public class EstablishState {
|
||||
System.out.println("encrypted H(X+Y)+tsB+padding: " + Base64.encode(toRead, Y.length, toRead.length-Y.length));
|
||||
System.out.println("unencrypted H(X+Y)+tsB+padding: " + Base64.encode(decrypted));
|
||||
long tsB = DataHelper.fromLong(decrypted, 32, 4);
|
||||
|
||||
|
||||
//try { Thread.sleep(40*1000); } catch (InterruptedException ie) {}
|
||||
|
||||
|
||||
RouterIdentity alice = new RouterIdentity();
|
||||
Object k[] = ctx.keyGenerator().generatePKIKeypair();
|
||||
PublicKey pub = (PublicKey)k[0];
|
||||
@ -812,16 +824,16 @@ public class EstablishState {
|
||||
alice.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null));
|
||||
alice.setPublicKey(pub);
|
||||
alice.setSigningPublicKey(spub);
|
||||
|
||||
|
||||
// SEND E(#+Alice.identity+tsA+padding+S(X+Y+Bob.identHash+tsA+tsB+padding), sk, hX_xor_Bob.identHash[16:31])--->
|
||||
|
||||
|
||||
ByteArrayOutputStream baos = new ByteArrayOutputStream(512);
|
||||
byte aliceb[] = alice.toByteArray();
|
||||
long tsA = ctx.clock().now()/1000l;
|
||||
baos.write(DataHelper.toLong(2, aliceb.length));
|
||||
baos.write(aliceb);
|
||||
baos.write(DataHelper.toLong(4, tsA));
|
||||
|
||||
|
||||
int base = baos.size() + Signature.SIGNATURE_BYTES;
|
||||
int rem = base % 16;
|
||||
int padding = 0;
|
||||
@ -831,7 +843,7 @@ public class EstablishState {
|
||||
ctx.random().nextBytes(pad);
|
||||
baos.write(pad);
|
||||
base += padding;
|
||||
|
||||
|
||||
ByteArrayOutputStream sbaos = new ByteArrayOutputStream(512);
|
||||
sbaos.write(X);
|
||||
sbaos.write(Y);
|
||||
@ -841,21 +853,21 @@ public class EstablishState {
|
||||
//sbaos.write(pad);
|
||||
Signature sig = ctx.dsa().sign(sbaos.toByteArray(), spriv);
|
||||
baos.write(sig.toByteArray());
|
||||
|
||||
|
||||
byte unencrypted[] = baos.toByteArray();
|
||||
byte toWrite[] = new byte[unencrypted.length];
|
||||
System.out.println("unencrypted.length = " + unencrypted.length + " alice.size = " + aliceb.length + " padding = " + padding + " base = " + base);
|
||||
ctx.aes().encrypt(unencrypted, 0, toWrite, 0, dh.getSessionKey(), hx_xor_bih, 16, unencrypted.length);
|
||||
|
||||
|
||||
out.write(toWrite);
|
||||
out.flush();
|
||||
|
||||
|
||||
System.out.println("unencrypted: " + Base64.encode(unencrypted));
|
||||
System.out.println("encrypted: " + Base64.encode(toWrite));
|
||||
System.out.println("Local peer: " + alice.calculateHash().toBase64());
|
||||
|
||||
// now check bob's signature
|
||||
|
||||
|
||||
SigningPublicKey bobPubKey = null;
|
||||
try {
|
||||
RouterInfo info = new RouterInfo();
|
||||
@ -865,9 +877,9 @@ public class EstablishState {
|
||||
e.printStackTrace();
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
System.out.println("Reading in bob's sig");
|
||||
|
||||
|
||||
byte bobRead[] = new byte[48];
|
||||
read = 0;
|
||||
while (read < bobRead.length) {
|
||||
@ -883,7 +895,7 @@ public class EstablishState {
|
||||
byte bobSigData[] = new byte[Signature.SIGNATURE_BYTES];
|
||||
System.arraycopy(preSig, 0, bobSigData, 0, Signature.SIGNATURE_BYTES); // ignore the padding
|
||||
System.out.println("Bob's sig: " + Base64.encode(bobSigData));
|
||||
|
||||
|
||||
byte signed[] = new byte[256+256+32+4+4];
|
||||
int off = 0;
|
||||
System.arraycopy(X, 0, signed, off, 256); off += 256;
|
||||
@ -895,18 +907,18 @@ public class EstablishState {
|
||||
|
||||
Signature bobSig = new Signature(bobSigData);
|
||||
boolean ok = ctx.dsa().verifySignature(bobSig, signed, bobPubKey);
|
||||
|
||||
|
||||
System.out.println("bob's sig matches? " + ok);
|
||||
|
||||
|
||||
try { Thread.sleep(5*1000); } catch (InterruptedException ie) {}
|
||||
byte fakeI2NPbuf[] = new byte[128];
|
||||
ctx.random().nextBytes(fakeI2NPbuf);
|
||||
out.write(fakeI2NPbuf);
|
||||
out.flush();
|
||||
|
||||
|
||||
try { Thread.sleep(30*1000); } catch (InterruptedException ie) {}
|
||||
s.close();
|
||||
} catch (Exception e) {
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
@ -66,22 +66,25 @@ public class EventPumper implements Runnable {
|
||||
public void startPumping() {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Starting pumper");
|
||||
_alive = true;
|
||||
_wantsRead = new ArrayList(16);
|
||||
_wantsWrite = new ArrayList(4);
|
||||
_wantsRegister = new ArrayList(1);
|
||||
_wantsConRegister = new ArrayList(4);
|
||||
try {
|
||||
_selector = Selector.open();
|
||||
_alive = true;
|
||||
new I2PThread(this, "NTCP Pumper", true).start();
|
||||
} catch (IOException ioe) {
|
||||
_log.error("Error opening the selector", ioe);
|
||||
_log.log(Log.CRIT, "Error opening the NTCP selector", ioe);
|
||||
} catch (java.lang.InternalError jlie) {
|
||||
// "unable to get address of epoll functions, pre-2.6 kernel?"
|
||||
_log.log(Log.CRIT, "Error opening the NTCP selector", jlie);
|
||||
}
|
||||
new I2PThread(this, "NTCP Pumper", true).start();
|
||||
}
|
||||
|
||||
public void stopPumping() {
|
||||
_alive = false;
|
||||
if (_selector.isOpen())
|
||||
if (_selector != null && _selector.isOpen())
|
||||
_selector.wakeup();
|
||||
}
|
||||
|
||||
|
@ -104,6 +104,15 @@ public class NTCPConnection implements FIFOBandwidthLimiter.CompleteListener {
|
||||
|
||||
private static final int META_FREQUENCY = 10*60*1000;
|
||||
private static final int INFO_FREQUENCY = 6*60*60*1000;
|
||||
/**
|
||||
* Why this is 16K, and where it is documented, good question?
|
||||
* We claim we can do 32K datagrams so this is a problem.
|
||||
* Needs to be fixed. But SSU can handle it?
|
||||
* In the meantime, don't let the transport bid on big messages.
|
||||
*/
|
||||
public static final int BUFFER_SIZE = 16*1024;
|
||||
/** 2 bytes for length and 4 for CRC */
|
||||
public static final int MAX_MSG_SIZE = BUFFER_SIZE - (2 + 4);
|
||||
|
||||
/**
|
||||
* Create an inbound connected (though not established) NTCP connection
|
||||
|
@ -36,7 +36,7 @@ public class NTCPTransport extends TransportImpl {
|
||||
private SharedBid _fastBid;
|
||||
private SharedBid _slowBid;
|
||||
private SharedBid _transientFail;
|
||||
private Object _conLock;
|
||||
private final Object _conLock;
|
||||
private Map _conByIdent;
|
||||
private NTCPAddress _myAddress;
|
||||
private EventPumper _pumper;
|
||||
@ -46,14 +46,14 @@ public class NTCPTransport extends TransportImpl {
|
||||
* list of NTCPConnection of connections not yet established that we
|
||||
* want to remove on establishment or close on timeout
|
||||
*/
|
||||
private List _establishing;
|
||||
private final List _establishing;
|
||||
|
||||
private List _sent;
|
||||
private NTCPSendFinisher _finisher;
|
||||
|
||||
|
||||
public NTCPTransport(RouterContext ctx) {
|
||||
super(ctx);
|
||||
|
||||
|
||||
_log = ctx.logManager().getLog(getClass());
|
||||
|
||||
_context.statManager().createRateStat("ntcp.sendTime", "Total message lifetime when sent completely", "ntcp", new long[] { 60*1000, 10*60*1000 });
|
||||
@ -105,6 +105,7 @@ public class NTCPTransport extends TransportImpl {
|
||||
_context.statManager().createRateStat("ntcp.outboundEstablishFailed", "", "ntcp", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRateStat("ntcp.outboundFailedIOEImmediate", "", "ntcp", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRateStat("ntcp.invalidOutboundSkew", "", "ntcp", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRateStat("ntcp.noBidTooLargeI2NP", "send size", "ntcp", new long[] { 60*60*1000 });
|
||||
_context.statManager().createRateStat("ntcp.prepBufCache", "", "ntcp", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRateStat("ntcp.queuedRecv", "", "ntcp", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRateStat("ntcp.read", "", "ntcp", new long[] { 60*1000, 10*60*1000 });
|
||||
@ -121,19 +122,19 @@ public class NTCPTransport extends TransportImpl {
|
||||
_establishing = new ArrayList(4);
|
||||
_conLock = new Object();
|
||||
_conByIdent = new HashMap(64);
|
||||
|
||||
|
||||
_sent = new ArrayList(4);
|
||||
_finisher = new NTCPSendFinisher(ctx, this);
|
||||
|
||||
|
||||
_pumper = new EventPumper(ctx, this);
|
||||
_reader = new Reader(ctx);
|
||||
_writer = new net.i2p.router.transport.ntcp.Writer(ctx);
|
||||
|
||||
|
||||
_fastBid = new SharedBid(25); // best
|
||||
_slowBid = new SharedBid(70); // better than ssu unestablished, but not better than ssu established
|
||||
_transientFail = new SharedBid(TransportBid.TRANSIENT_FAIL);
|
||||
}
|
||||
|
||||
|
||||
void inboundEstablished(NTCPConnection con) {
|
||||
_context.statManager().addRateData("ntcp.inboundEstablished", 1, 0);
|
||||
markReachable(con.getRemotePeer().calculateHash(), true);
|
||||
@ -149,7 +150,7 @@ public class NTCPTransport extends TransportImpl {
|
||||
old.close();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
protected void outboundMessageReady() {
|
||||
OutNetMessage msg = getNextMessage();
|
||||
if (msg != null) {
|
||||
@ -218,7 +219,7 @@ public class NTCPTransport extends TransportImpl {
|
||||
}
|
||||
con.enqueueInfoMessage(); // enqueues a netDb store of our own info
|
||||
con.send(msg); // doesn't do anything yet, just enqueues it
|
||||
|
||||
|
||||
try {
|
||||
SocketChannel channel = SocketChannel.open();
|
||||
con.setChannel(channel);
|
||||
@ -236,11 +237,19 @@ public class NTCPTransport extends TransportImpl {
|
||||
*/
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void afterSend(OutNetMessage msg, boolean sendSuccessful, boolean allowRequeue, long msToSend) {
|
||||
super.afterSend(msg, sendSuccessful, allowRequeue, msToSend);
|
||||
}
|
||||
public TransportBid bid(RouterInfo toAddress, long dataSize) {
|
||||
if (!isAlive())
|
||||
return null;
|
||||
if (dataSize > NTCPConnection.MAX_MSG_SIZE) {
|
||||
// let SSU deal with it
|
||||
_context.statManager().addRateData("ntcp.noBidTooLargeI2NP", dataSize, 0);
|
||||
return null;
|
||||
}
|
||||
Hash peer = toAddress.getIdentity().calculateHash();
|
||||
if (_context.shitlist().isShitlisted(peer, STYLE)) {
|
||||
// we aren't shitlisted in general (since we are trying to get a bid), but we have
|
||||
@ -251,7 +260,7 @@ public class NTCPTransport extends TransportImpl {
|
||||
_context.statManager().addRateData("ntcp.attemptUnreachablePeer", 1, 0);
|
||||
return null;
|
||||
}
|
||||
|
||||
|
||||
boolean established = isEstablished(toAddress.getIdentity());
|
||||
if (established) { // should we check the queue size? nah, if its valid, use it
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
@ -259,7 +268,7 @@ public class NTCPTransport extends TransportImpl {
|
||||
return _fastBid;
|
||||
}
|
||||
RouterAddress addr = toAddress.getTargetAddress(STYLE);
|
||||
|
||||
|
||||
if (addr == null) {
|
||||
markUnreachable(peer);
|
||||
_context.statManager().addRateData("ntcp.bidRejectedNoNTCPAddress", 1, 0);
|
||||
@ -286,25 +295,26 @@ public class NTCPTransport extends TransportImpl {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if (!allowConnection()) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("no bid when trying to send to " + toAddress.getIdentity().calculateHash().toBase64() + ", max connection limit reached");
|
||||
return _transientFail;
|
||||
}
|
||||
|
||||
//if ( (_myAddress != null) && (_myAddress.equals(addr)) )
|
||||
//if ( (_myAddress != null) && (_myAddress.equals(addr)) )
|
||||
// return null; // dont talk to yourself
|
||||
|
||||
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("slow bid when trying to send to " + toAddress.getIdentity().calculateHash().toBase64());
|
||||
return _slowBid;
|
||||
}
|
||||
|
||||
|
||||
public boolean allowConnection() {
|
||||
return countActivePeers() < getMaxConnections();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean haveCapacity() {
|
||||
return countActivePeers() < getMaxConnections() * 4 / 5;
|
||||
}
|
||||
@ -315,21 +325,23 @@ public class NTCPTransport extends TransportImpl {
|
||||
private boolean isEstablished(RouterIdentity peer) {
|
||||
return isEstablished(peer.calculateHash());
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public boolean isEstablished(Hash dest) {
|
||||
synchronized (_conLock) {
|
||||
NTCPConnection con = (NTCPConnection)_conByIdent.get(dest);
|
||||
return (con != null) && con.isEstablished() && !con.isClosed();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public boolean isBacklogged(Hash dest) {
|
||||
synchronized (_conLock) {
|
||||
NTCPConnection con = (NTCPConnection)_conByIdent.get(dest);
|
||||
return (con != null) && con.isEstablished() && con.tooBacklogged();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void removeCon(NTCPConnection con) {
|
||||
NTCPConnection removed = null;
|
||||
synchronized (_conLock) {
|
||||
@ -344,15 +356,17 @@ public class NTCPTransport extends TransportImpl {
|
||||
removed.close();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* How many peers can we talk to right now?
|
||||
*
|
||||
*/
|
||||
@Override
|
||||
public int countActivePeers() { synchronized (_conLock) { return _conByIdent.size(); } }
|
||||
/**
|
||||
* How many peers are we actively sending messages to (this minute)
|
||||
*/
|
||||
@Override
|
||||
public int countActiveSendPeers() {
|
||||
int active = 0;
|
||||
synchronized (_conLock) {
|
||||
@ -364,11 +378,12 @@ public class NTCPTransport extends TransportImpl {
|
||||
}
|
||||
return active;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Return our peer clock skews on this transport.
|
||||
* Vector composed of Long, each element representing a peer skew in seconds.
|
||||
*/
|
||||
@Override
|
||||
public Vector getClockSkews() {
|
||||
|
||||
Vector peers = new Vector();
|
||||
@ -386,18 +401,18 @@ public class NTCPTransport extends TransportImpl {
|
||||
_log.debug("NTCP transport returning " + skews.size() + " peer clock skews.");
|
||||
return skews;
|
||||
}
|
||||
|
||||
|
||||
private static final int NUM_CONCURRENT_READERS = 3;
|
||||
private static final int NUM_CONCURRENT_WRITERS = 3;
|
||||
|
||||
|
||||
public RouterAddress startListening() {
|
||||
if (_log.shouldLog(Log.DEBUG)) _log.debug("Starting ntcp transport listening");
|
||||
_finisher.start();
|
||||
_pumper.startPumping();
|
||||
|
||||
|
||||
_reader.startReading(NUM_CONCURRENT_READERS);
|
||||
_writer.startWriting(NUM_CONCURRENT_WRITERS);
|
||||
|
||||
|
||||
configureLocalAddress();
|
||||
return bindAddress();
|
||||
}
|
||||
@ -406,10 +421,10 @@ public class NTCPTransport extends TransportImpl {
|
||||
if (_log.shouldLog(Log.DEBUG)) _log.debug("Restarting ntcp transport listening");
|
||||
_finisher.start();
|
||||
_pumper.startPumping();
|
||||
|
||||
|
||||
_reader.startReading(NUM_CONCURRENT_READERS);
|
||||
_writer.startWriting(NUM_CONCURRENT_WRITERS);
|
||||
|
||||
|
||||
_myAddress = new NTCPAddress(addr);
|
||||
return bindAddress();
|
||||
}
|
||||
@ -440,7 +455,7 @@ public class NTCPTransport extends TransportImpl {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Outbound NTCP connections only - no listener configured");
|
||||
}
|
||||
|
||||
|
||||
if (_myAddress != null) {
|
||||
RouterAddress rv = _myAddress.toRouterAddress();
|
||||
if (rv != null)
|
||||
@ -450,12 +465,12 @@ public class NTCPTransport extends TransportImpl {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reader getReader() { return _reader; }
|
||||
net.i2p.router.transport.ntcp.Writer getWriter() { return _writer; }
|
||||
public String getStyle() { return STYLE; }
|
||||
EventPumper getPumper() { return _pumper; }
|
||||
|
||||
|
||||
/**
|
||||
* how long from initial connection attempt (accept() or connect()) until
|
||||
* the con must be established to avoid premature close()ing
|
||||
@ -496,9 +511,9 @@ public class NTCPTransport extends TransportImpl {
|
||||
if ( (expired != null) && (expired.size() > 0) )
|
||||
_context.statManager().addRateData("ntcp.outboundEstablishFailed", expired.size(), 0);
|
||||
}
|
||||
|
||||
|
||||
//private boolean bindAllInterfaces() { return true; }
|
||||
|
||||
|
||||
private void configureLocalAddress() {
|
||||
RouterContext ctx = getContext();
|
||||
if (ctx == null) {
|
||||
@ -523,7 +538,7 @@ public class NTCPTransport extends TransportImpl {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* This doesn't (completely) block, caller should check isAlive()
|
||||
* before calling startListening() or restartListening()
|
||||
@ -547,8 +562,9 @@ public class NTCPTransport extends TransportImpl {
|
||||
replaceAddress(null);
|
||||
}
|
||||
public static final String STYLE = "NTCP";
|
||||
|
||||
|
||||
public void renderStatusHTML(java.io.Writer out, int sortFlags) throws IOException {}
|
||||
@Override
|
||||
public void renderStatusHTML(java.io.Writer out, String urlBase, int sortFlags) throws IOException {
|
||||
TreeSet peers = new TreeSet(getComparator(sortFlags));
|
||||
synchronized (_conLock) {
|
||||
@ -569,7 +585,7 @@ public class NTCPTransport extends TransportImpl {
|
||||
long totalUptime = 0;
|
||||
long totalSend = 0;
|
||||
long totalRecv = 0;
|
||||
|
||||
|
||||
StringBuffer buf = new StringBuffer(512);
|
||||
buf.append("<b id=\"ntcpcon\">NTCP connections: ").append(peers.size());
|
||||
buf.append(" limit: ").append(getMaxConnections());
|
||||
@ -593,7 +609,10 @@ public class NTCPTransport extends TransportImpl {
|
||||
for (Iterator iter = peers.iterator(); iter.hasNext(); ) {
|
||||
NTCPConnection con = (NTCPConnection)iter.next();
|
||||
String name = con.getRemotePeer().calculateHash().toBase64().substring(0,6);
|
||||
buf.append("<tr><td><code><a href=\"netdb.jsp?r=").append(name).append("\">").append(name);
|
||||
buf.append("<tr><td><code><a href=\"netdb.jsp?r=").append(name).append("\">").append(name).append("</a>");
|
||||
//byte[] ip = getIP(con.getRemotePeer().calculateHash());
|
||||
//if (ip != null)
|
||||
// buf.append(' ').append(_context.blocklist().toStr(ip));
|
||||
buf.append("</code></td><td align=\"center\"><code>");
|
||||
if (con.isInbound())
|
||||
buf.append("in");
|
||||
@ -657,19 +676,19 @@ public class NTCPTransport extends TransportImpl {
|
||||
buf.append("</td><td> </td><td> </td><td> ");
|
||||
buf.append("</td></tr>\n");
|
||||
}
|
||||
|
||||
|
||||
buf.append("</table>\n");
|
||||
buf.append("Peers currently reading I2NP messages: ").append(readingPeers).append("<br />\n");
|
||||
buf.append("Peers currently writing I2NP messages: ").append(writingPeers).append("<br />\n");
|
||||
out.write(buf.toString());
|
||||
buf.setLength(0);
|
||||
}
|
||||
|
||||
private static NumberFormat _rateFmt = new DecimalFormat("#,#0.00");
|
||||
|
||||
private static final NumberFormat _rateFmt = new DecimalFormat("#,#0.00");
|
||||
private static String formatRate(float rate) {
|
||||
synchronized (_rateFmt) { return _rateFmt.format(rate); }
|
||||
}
|
||||
|
||||
|
||||
private Comparator getComparator(int sortFlags) {
|
||||
Comparator rv = null;
|
||||
switch (Math.abs(sortFlags)) {
|
||||
@ -693,7 +712,7 @@ public class NTCPTransport extends TransportImpl {
|
||||
}
|
||||
private static class PeerComparator implements Comparator {
|
||||
public int compare(Object lhs, Object rhs) {
|
||||
if ( (lhs == null) || (rhs == null) || !(lhs instanceof NTCPConnection) || !(rhs instanceof NTCPConnection))
|
||||
if ( (lhs == null) || (rhs == null) || !(lhs instanceof NTCPConnection) || !(rhs instanceof NTCPConnection))
|
||||
throw new IllegalArgumentException("rhs = " + rhs + " lhs = " + lhs);
|
||||
return compare((NTCPConnection)lhs, (NTCPConnection)rhs);
|
||||
}
|
||||
@ -702,13 +721,15 @@ public class NTCPTransport extends TransportImpl {
|
||||
return l.getRemotePeer().calculateHash().toBase64().compareTo(r.getRemotePeer().calculateHash().toBase64());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Cache the bid to reduce object churn
|
||||
*/
|
||||
private class SharedBid extends TransportBid {
|
||||
public SharedBid(int ms) { super(); setLatencyMs(ms); }
|
||||
@Override
|
||||
public Transport getTransport() { return NTCPTransport.this; }
|
||||
@Override
|
||||
public String toString() { return "NTCP bid @ " + getLatencyMs(); }
|
||||
}
|
||||
}
|
||||
|
@ -37,15 +37,15 @@ public class EstablishmentManager {
|
||||
private UDPTransport _transport;
|
||||
private PacketBuilder _builder;
|
||||
/** map of RemoteHostId to InboundEstablishState */
|
||||
private Map _inboundStates;
|
||||
private final Map _inboundStates;
|
||||
/** map of RemoteHostId to OutboundEstablishState */
|
||||
private Map _outboundStates;
|
||||
private final Map _outboundStates;
|
||||
/** map of RemoteHostId to List of OutNetMessage for messages exceeding capacity */
|
||||
private Map _queuedOutbound;
|
||||
private final Map _queuedOutbound;
|
||||
/** map of nonce (Long) to OutboundEstablishState */
|
||||
private Map _liveIntroductions;
|
||||
private final Map _liveIntroductions;
|
||||
private boolean _alive;
|
||||
private Object _activityLock;
|
||||
private final Object _activityLock;
|
||||
private int _activity;
|
||||
|
||||
private static final int DEFAULT_MAX_CONCURRENT_ESTABLISH = 10;
|
||||
@ -450,6 +450,7 @@ public class EstablishmentManager {
|
||||
_transport.addRemotePeerState(peer);
|
||||
|
||||
_transport.inboundConnectionReceived();
|
||||
_transport.setIP(remote.calculateHash(), state.getSentIP());
|
||||
|
||||
_context.statManager().addRateData("udp.inboundEstablishTime", state.getLifetime(), 0);
|
||||
sendInboundComplete(peer);
|
||||
@ -531,6 +532,7 @@ public class EstablishmentManager {
|
||||
|
||||
|
||||
_transport.addRemotePeerState(peer);
|
||||
_transport.setIP(remote.calculateHash(), state.getSentIP());
|
||||
|
||||
_context.statManager().addRateData("udp.outboundEstablishTime", state.getLifetime(), 0);
|
||||
sendOurInfo(peer, false);
|
||||
|
@ -12,35 +12,35 @@ import net.i2p.util.Log;
|
||||
|
||||
/**
|
||||
* Coordinate the outbound fragments and select the next one to be built.
|
||||
* This pool contains messages we are actively trying to send, essentially
|
||||
* This pool contains messages we are actively trying to send, essentially
|
||||
* doing a round robin across each message to send one fragment, as implemented
|
||||
* in {@link #getNextVolley()}. This also honors per-peer throttling, taking
|
||||
* in {@link #getNextVolley()}. This also honors per-peer throttling, taking
|
||||
* note of each peer's allocations. If a message has each of its fragments
|
||||
* sent more than a certain number of times, it is failed out. In addition,
|
||||
* this instance also receives notification of message ACKs from the
|
||||
* {@link InboundMessageFragments}, signaling that we can stop sending a
|
||||
* sent more than a certain number of times, it is failed out. In addition,
|
||||
* this instance also receives notification of message ACKs from the
|
||||
* {@link InboundMessageFragments}, signaling that we can stop sending a
|
||||
* message.
|
||||
*
|
||||
*
|
||||
*/
|
||||
public class OutboundMessageFragments {
|
||||
private RouterContext _context;
|
||||
private Log _log;
|
||||
private UDPTransport _transport;
|
||||
private ActiveThrottle _throttle;
|
||||
private ActiveThrottle _throttle; // LINT not used ??
|
||||
/** peers we are actively sending messages to */
|
||||
private List _activePeers;
|
||||
private final List _activePeers;
|
||||
private boolean _alive;
|
||||
/** which peer should we build the next packet out of? */
|
||||
private int _nextPeer;
|
||||
private PacketBuilder _builder;
|
||||
/** if we can handle more messages explicitly, set this to true */
|
||||
private boolean _allowExcess;
|
||||
private volatile long _packetsRetransmitted;
|
||||
|
||||
private static final int MAX_ACTIVE = 64;
|
||||
private boolean _allowExcess; // LINT not used??
|
||||
private volatile long _packetsRetransmitted; // LINT not used??
|
||||
|
||||
// private static final int MAX_ACTIVE = 64; // not used.
|
||||
// don't send a packet more than 10 times
|
||||
static final int MAX_VOLLEYS = 10;
|
||||
|
||||
|
||||
public OutboundMessageFragments(RouterContext ctx, UDPTransport transport, ActiveThrottle throttle) {
|
||||
_context = ctx;
|
||||
_log = ctx.logManager().getLog(OutboundMessageFragments.class);
|
||||
@ -70,7 +70,7 @@ public class OutboundMessageFragments {
|
||||
_context.statManager().createRateStat("udp.sendCycleTime", "How long it takes to cycle through all of the active messages?", "udp", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
_context.statManager().createRateStat("udp.sendCycleTimeSlow", "How long it takes to cycle through all of the active messages, when its going slowly?", "udp", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
}
|
||||
|
||||
|
||||
public void startup() { _alive = true; }
|
||||
public void shutdown() {
|
||||
_alive = false;
|
||||
@ -87,7 +87,7 @@ public class OutboundMessageFragments {
|
||||
_activePeers.notifyAll();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Block until we allow more messages to be admitted to the active
|
||||
* pool. This is called by the {@link OutboundRefiller}
|
||||
@ -95,11 +95,11 @@ public class OutboundMessageFragments {
|
||||
* @return true if more messages are allowed
|
||||
*/
|
||||
public boolean waitForMoreAllowed() {
|
||||
// test without choking.
|
||||
// test without choking.
|
||||
// perhaps this should check the lifetime of the first activeMessage?
|
||||
if (true) return true;
|
||||
/*
|
||||
|
||||
|
||||
long start = _context.clock().now();
|
||||
int numActive = 0;
|
||||
int maxActive = Math.max(_transport.countActivePeers(), MAX_ACTIVE);
|
||||
@ -123,7 +123,7 @@ public class OutboundMessageFragments {
|
||||
*/
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Add a new message to the active pool
|
||||
*
|
||||
@ -133,7 +133,7 @@ public class OutboundMessageFragments {
|
||||
RouterInfo target = msg.getTarget();
|
||||
if ( (msgBody == null) || (target == null) )
|
||||
return;
|
||||
|
||||
|
||||
// todo: make sure the outNetMessage is initialzed once and only once
|
||||
OutboundMessageState state = new OutboundMessageState(_context);
|
||||
boolean ok = state.initialize(msg, msgBody);
|
||||
@ -164,9 +164,9 @@ public class OutboundMessageFragments {
|
||||
}
|
||||
//finishMessages();
|
||||
}
|
||||
|
||||
/**
|
||||
* short circuit the OutNetMessage, letting us send the establish
|
||||
|
||||
/**
|
||||
* short circuit the OutNetMessage, letting us send the establish
|
||||
* complete message reliably
|
||||
*/
|
||||
public void add(OutboundMessageState state) {
|
||||
@ -228,11 +228,11 @@ public class OutboundMessageFragments {
|
||||
rv += remaining;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private long _lastCycleTime = System.currentTimeMillis();
|
||||
|
||||
|
||||
/**
|
||||
* Fetch all the packets for a message volley, blocking until there is a
|
||||
* Fetch all the packets for a message volley, blocking until there is a
|
||||
* message which can be fully transmitted (or the transport is shut down).
|
||||
* The returned array may be sparse, with null packets taking the place of
|
||||
* already ACKed fragments.
|
||||
@ -270,7 +270,7 @@ public class OutboundMessageFragments {
|
||||
}
|
||||
}
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Done looping, next peer we are sending for: " +
|
||||
_log.debug("Done looping, next peer we are sending for: " +
|
||||
(peer != null ? peer.getRemotePeer().toBase64() : "none"));
|
||||
if (state == null) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
@ -291,10 +291,10 @@ public class OutboundMessageFragments {
|
||||
_log.debug("Woken up while waiting");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Sending " + state);
|
||||
|
||||
|
||||
UDPPacket packets[] = preparePackets(state, peer);
|
||||
if ( (state != null) && (state.getMessage() != null) ) {
|
||||
int valid = 0;
|
||||
@ -303,21 +303,21 @@ public class OutboundMessageFragments {
|
||||
valid++;
|
||||
/*
|
||||
state.getMessage().timestamp("sending a volley of " + valid
|
||||
+ " lastReceived: "
|
||||
+ " lastReceived: "
|
||||
+ (_context.clock().now() - peer.getLastReceiveTime())
|
||||
+ " lastSentFully: "
|
||||
+ " lastSentFully: "
|
||||
+ (_context.clock().now() - peer.getLastSendFullyTime()));
|
||||
*/
|
||||
}
|
||||
return packets;
|
||||
}
|
||||
|
||||
|
||||
private UDPPacket[] preparePackets(OutboundMessageState state, PeerState peer) {
|
||||
if ( (state != null) && (peer != null) ) {
|
||||
int fragments = state.getFragmentCount();
|
||||
if (fragments < 0)
|
||||
return null;
|
||||
|
||||
|
||||
// ok, simplest possible thing is to always tack on the bitfields if
|
||||
List msgIds = peer.getCurrentFullACKs();
|
||||
if (msgIds == null) msgIds = new ArrayList();
|
||||
@ -353,7 +353,7 @@ public class OutboundMessageFragments {
|
||||
}
|
||||
if (sparseCount > 0)
|
||||
remaining.clear();
|
||||
|
||||
|
||||
int piggybackedAck = 0;
|
||||
if (msgIds.size() != remaining.size()) {
|
||||
for (int i = 0; i < msgIds.size(); i++) {
|
||||
@ -364,7 +364,7 @@ public class OutboundMessageFragments {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if (sparseCount > 0)
|
||||
_context.statManager().addRateData("udp.sendSparse", sparseCount, state.getLifetime());
|
||||
if (piggybackedAck > 0)
|
||||
@ -390,10 +390,10 @@ public class OutboundMessageFragments {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* We received an ACK of the given messageId from the given peer, so if it
|
||||
* is still unacked, mark it as complete.
|
||||
* is still unacked, mark it as complete.
|
||||
*
|
||||
* @return fragments acked
|
||||
*/
|
||||
@ -409,7 +409,7 @@ public class OutboundMessageFragments {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public void acked(ACKBitfield bitfield, Hash ackedBy) {
|
||||
PeerState peer = _transport.getPeerState(ackedBy);
|
||||
if (peer != null) {
|
||||
@ -421,7 +421,7 @@ public class OutboundMessageFragments {
|
||||
_log.debug("partial acked [" + bitfield + "] by an unknown remote peer? " + ackedBy.toBase64());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public interface ActiveThrottle {
|
||||
public void choke(Hash peer);
|
||||
public void unchoke(Hash peer);
|
||||
|
@ -1767,6 +1767,9 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
|
||||
buf.append(" [shitlisted]");
|
||||
appended = true;
|
||||
}
|
||||
//byte[] ip = getIP(peer.getRemotePeer());
|
||||
//if (ip != null)
|
||||
// buf.append(' ').append(_context.blocklist().toStr(ip));
|
||||
buf.append("</code></td>");
|
||||
|
||||
long idleIn = (now-peer.getLastReceiveTime())/1000;
|
||||
|
@ -74,6 +74,12 @@ public class FragmentHandler {
|
||||
int padding = 0;
|
||||
while (preprocessed[offset] != (byte)0x00) {
|
||||
offset++; // skip the padding
|
||||
// AIOOBE http://forum.i2p/viewtopic.php?t=3187
|
||||
if (offset >= TrivialPreprocessor.PREPROCESSED_SIZE) {
|
||||
_cache.release(new ByteArray(preprocessed));
|
||||
_context.statManager().addRateData("tunnel.corruptMessage", 1, 1);
|
||||
return;
|
||||
}
|
||||
padding++;
|
||||
}
|
||||
offset++; // skip the final 0x00, terminating the padding
|
||||
@ -387,8 +393,8 @@ public class FragmentHandler {
|
||||
_log.error("Error receiving fragmented message (corrupt?): " + stringified, ioe);
|
||||
} catch (I2NPMessageException ime) {
|
||||
if (stringified == null) stringified = msg.toString();
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Error receiving fragmented message (corrupt?): " + stringified, ime);
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Error receiving fragmented message (corrupt?): " + stringified, ime);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -78,13 +78,13 @@ public class FragmentedMessage {
|
||||
return false;
|
||||
}
|
||||
if (length <= 0) {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Length is impossible (" + length + ") for messageId " + messageId);
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Length is impossible (" + length + ") for messageId " + messageId);
|
||||
return false;
|
||||
}
|
||||
if (offset + length > payload.length) {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Length is impossible (" + length + "/" + offset + " out of " + payload.length + ") for messageId " + messageId);
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Length is impossible (" + length + "/" + offset + " out of " + payload.length + ") for messageId " + messageId);
|
||||
return false;
|
||||
}
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
@ -131,13 +131,13 @@ public class FragmentedMessage {
|
||||
return false;
|
||||
}
|
||||
if (length <= 0) {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Length is impossible (" + length + ") for messageId " + messageId);
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Length is impossible (" + length + ") for messageId " + messageId);
|
||||
return false;
|
||||
}
|
||||
if (offset + length > payload.length) {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Length is impossible (" + length + "/" + offset + " out of " + payload.length + ") for messageId " + messageId);
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Length is impossible (" + length + "/" + offset + " out of " + payload.length + ") for messageId " + messageId);
|
||||
return false;
|
||||
}
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
|
@ -37,12 +37,12 @@ class BuildExecutor implements Runnable {
|
||||
_currentlyBuilding = new ArrayList(10);
|
||||
_context.statManager().createRateStat("tunnel.concurrentBuilds", "How many builds are going at once", "Tunnels", new long[] { 60*1000, 5*60*1000, 60*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.concurrentBuildsLagged", "How many builds are going at once when we reject further builds, due to job lag (period is lag)", "Tunnels", new long[] { 60*1000, 5*60*1000, 60*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.buildExploratoryExpire", "How often an exploratory tunnel times out during creation", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.buildClientExpire", "How often a client tunnel times out during creation", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.buildExploratorySuccess", "Response time for success", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.buildClientSuccess", "Response time for success", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.buildExploratoryReject", "Response time for rejection", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.buildClientReject", "Response time for rejection", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.buildExploratoryExpire", "How often an exploratory tunnel times out during creation", "Tunnels", new long[] { 10*60*1000, 60*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.buildClientExpire", "How often a client tunnel times out during creation", "Tunnels", new long[] { 10*60*1000, 60*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.buildExploratorySuccess", "Response time for success", "Tunnels", new long[] { 10*60*1000, 60*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.buildClientSuccess", "Response time for success", "Tunnels", new long[] { 10*60*1000, 60*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.buildExploratoryReject", "Response time for rejection", "Tunnels", new long[] { 10*60*1000, 60*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.buildClientReject", "Response time for rejection", "Tunnels", new long[] { 10*60*1000, 60*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.buildRequestTime", "How long it takes to build a tunnel request", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.buildRequestZeroHopTime", "How long it takes to build a zero hop tunnel", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.pendingRemaining", "How many inbound requests are pending after a pass (period is how long the pass takes)?", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
|
@ -176,6 +176,7 @@ public abstract class TunnelPeerSelector {
|
||||
|
||||
Set peers = new HashSet(1);
|
||||
peers.addAll(ctx.profileOrganizer().selectPeersRecentlyRejecting());
|
||||
peers.addAll(ctx.tunnelManager().selectPeersInTooManyTunnels());
|
||||
// if (false && filterUnreachable(ctx, isInbound, isExploratory)) {
|
||||
if (filterUnreachable(ctx, isInbound, isExploratory)) {
|
||||
List caps = ctx.peerManager().getPeersByCapability(Router.CAPABILITY_UNREACHABLE);
|
||||
|
@ -28,7 +28,7 @@ public class TunnelPool {
|
||||
private RouterContext _context;
|
||||
private Log _log;
|
||||
private TunnelPoolSettings _settings;
|
||||
private ArrayList _tunnels;
|
||||
private ArrayList<TunnelInfo> _tunnels;
|
||||
private TunnelPeerSelector _peerSelector;
|
||||
private TunnelPoolManager _manager;
|
||||
private boolean _alive;
|
||||
@ -227,7 +227,7 @@ public class TunnelPool {
|
||||
*
|
||||
* @return list of TunnelInfo objects
|
||||
*/
|
||||
public List listTunnels() {
|
||||
public List<TunnelInfo> listTunnels() {
|
||||
synchronized (_tunnels) {
|
||||
return new ArrayList(_tunnels);
|
||||
}
|
||||
|
@ -6,9 +6,12 @@ import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.data.Destination;
|
||||
@ -506,6 +509,7 @@ public class TunnelPoolManager implements TunnelManagerFacade {
|
||||
out.write("</table>\n");
|
||||
out.write("Inactive participating tunnels: " + inactive + "<br />\n");
|
||||
out.write("Lifetime bandwidth usage: " + DataHelper.formatSize(processed*1024) + "B<br />\n");
|
||||
renderPeers(out);
|
||||
}
|
||||
|
||||
class TunnelComparator implements Comparator {
|
||||
@ -579,6 +583,135 @@ public class TunnelPoolManager implements TunnelManagerFacade {
|
||||
DataHelper.formatSize(processedOut*1024) + "B out<br />");
|
||||
}
|
||||
|
||||
private void renderPeers(Writer out) throws IOException {
|
||||
// count up the peers in the local pools
|
||||
HashCounter lc = new HashCounter();
|
||||
int tunnelCount = countTunnelsPerPeer(lc);
|
||||
|
||||
// count up the peers in the participating tunnels
|
||||
HashCounter pc = new HashCounter();
|
||||
int partCount = countParticipatingPerPeer(pc);
|
||||
|
||||
Set<Hash> peers = new HashSet(lc.hashes());
|
||||
peers.addAll(pc.hashes());
|
||||
List<Hash> peerList = new ArrayList(peers);
|
||||
Collections.sort(peerList, new HashComparator());
|
||||
|
||||
out.write("<h2><a name=\"peers\">Tunnel Counts By Peer</a>:</h2>\n");
|
||||
out.write("<table border=\"1\"><tr><td><b>Peer</b></td><td><b>Expl. + Client</b></td><td><b>% of total</b></td><td><b>Part. from + to</b></td><td><b>% of total</b></td></tr>\n");
|
||||
for (Hash h : peerList) {
|
||||
out.write("<tr><td>");
|
||||
out.write(netDbLink(h));
|
||||
out.write("<td align=\"right\">" + lc.count(h));
|
||||
out.write("<td align=\"right\">");
|
||||
if (tunnelCount > 0)
|
||||
out.write("" + (lc.count(h) * 100 / tunnelCount));
|
||||
else
|
||||
out.write('0');
|
||||
out.write("<td align=\"right\">" + pc.count(h));
|
||||
out.write("<td align=\"right\">");
|
||||
if (partCount > 0)
|
||||
out.write("" + (pc.count(h) * 100 / partCount));
|
||||
else
|
||||
out.write('0');
|
||||
out.write('\n');
|
||||
}
|
||||
out.write("<tr><td>Tunnels<td align=\"right\">" + tunnelCount);
|
||||
out.write("<td> <td align=\"right\">" + partCount);
|
||||
out.write("<td> </table>\n");
|
||||
}
|
||||
|
||||
/** @return total number of non-fallback expl. + client tunnels */
|
||||
private int countTunnelsPerPeer(HashCounter lc) {
|
||||
List<TunnelPool> pools = new ArrayList();
|
||||
listPools(pools);
|
||||
int tunnelCount = 0;
|
||||
for (TunnelPool tp : pools) {
|
||||
for (TunnelInfo info : tp.listTunnels()) {
|
||||
if (info.getLength() > 1) {
|
||||
tunnelCount++;
|
||||
for (int j = 0; j < info.getLength(); j++) {
|
||||
Hash peer = info.getPeer(j);
|
||||
if (!_context.routerHash().equals(peer))
|
||||
lc.increment(peer);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return tunnelCount;
|
||||
}
|
||||
|
||||
private static final int DEFAULT_MAX_PCT_TUNNELS = 33;
|
||||
/**
|
||||
* For reliability reasons, don't allow a peer in more than x% of
|
||||
* client and exploratory tunnels.
|
||||
*
|
||||
* This also will prevent a single huge-capacity (or malicious) peer from
|
||||
* taking all the tunnels in the network (although it would be nice to limit
|
||||
* the % of total network tunnels to 10% or so, but that appears to be
|
||||
* too low to set as a default here... much lower than 33% will push client
|
||||
* tunnels out of the fast tier into high cap or beyond...)
|
||||
*
|
||||
* Possible improvement - restrict based on count per IP, or IP block,
|
||||
* to slightly increase costs of collusion
|
||||
*
|
||||
* @return Set of peers that should not be allowed in another tunnel
|
||||
*/
|
||||
public Set<Hash> selectPeersInTooManyTunnels() {
|
||||
HashCounter lc = new HashCounter();
|
||||
int tunnelCount = countTunnelsPerPeer(lc);
|
||||
Set<Hash> rv = new HashSet();
|
||||
if (tunnelCount >= 4 && _context.router().getUptime() > 10*60*1000) {
|
||||
int max = _context.getProperty("router.maxTunnelPercentage", DEFAULT_MAX_PCT_TUNNELS);
|
||||
for (Hash h : lc.hashes()) {
|
||||
if (lc.count(h) > 0 && (lc.count(h) + 1) * 100 / (tunnelCount + 1) > max)
|
||||
rv.add(h);
|
||||
}
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
|
||||
/** @return total number of part. tunnels */
|
||||
private int countParticipatingPerPeer(HashCounter pc) {
|
||||
List<HopConfig> participating = _context.tunnelDispatcher().listParticipatingTunnels();
|
||||
for (HopConfig cfg : participating) {
|
||||
Hash from = cfg.getReceiveFrom();
|
||||
if (from != null)
|
||||
pc.increment(from);
|
||||
Hash to = cfg.getSendTo();
|
||||
if (to != null)
|
||||
pc.increment(to);
|
||||
}
|
||||
return participating.size();
|
||||
}
|
||||
|
||||
class HashComparator implements Comparator {
|
||||
public int compare(Object l, Object r) {
|
||||
return ((Hash)l).toBase64().compareTo(((Hash)r).toBase64());
|
||||
}
|
||||
}
|
||||
|
||||
private static class HashCounter {
|
||||
private ConcurrentHashMap<Hash, Integer> _map;
|
||||
public HashCounter() {
|
||||
_map = new ConcurrentHashMap();
|
||||
}
|
||||
public void increment(Hash h) {
|
||||
Integer i = _map.putIfAbsent(h, Integer.valueOf(1));
|
||||
if (i != null)
|
||||
_map.put(h, Integer.valueOf(i.intValue() + 1));
|
||||
}
|
||||
public int count(Hash h) {
|
||||
Integer i = _map.get(h);
|
||||
if (i != null)
|
||||
return i.intValue();
|
||||
return 0;
|
||||
}
|
||||
public Set<Hash> hashes() {
|
||||
return _map.keySet();
|
||||
}
|
||||
}
|
||||
|
||||
private String getCapacity(Hash peer) {
|
||||
RouterInfo info = _context.netDb().lookupRouterInfoLocally(peer);
|
||||
if (info != null) {
|
||||
|
Reference in New Issue
Block a user