2005-02-16 jrandom
* (Merged the 0.5-pre branch back into CVS HEAD) * Replaced the old tunnel routing crypto with the one specified in router/doc/tunnel-alt.html, including updates to the web console to view and tweak it. * Provide the means for routers to reject tunnel requests with a wider range of responses: probabalistic rejection, due to approaching overload transient rejection, due to temporary overload bandwidth rejection, due to persistent bandwidth overload critical rejection, due to general router fault (or imminent shutdown) The different responses are factored into the profiles accordingly. * Replaced the old I2CP tunnel related options (tunnels.depthInbound, etc) with a series of new properties, relevent to the new tunnel routing code: inbound.nickname (used on the console) inbound.quantity (# of tunnels to use in any leaseSets) inbound.backupQuantity (# of tunnels to keep in the ready) inbound.length (# of remote peers in the tunnel) inbound.lengthVariance (if > 0, permute the length by adding a random # up to the variance. if < 0, permute the length by adding or subtracting a random # up to the variance) outbound.* (same as the inbound, except for the, uh, outbound tunnels in that client's pool) There are other options, and more will be added later, but the above are the most relevent ones. * Replaced Jetty 4.2.21 with Jetty 5.1.2 * Compress all profile data on disk. * Adjust the reseeding functionality to work even when the JVM's http proxy is set. * Enable a poor-man's interactive-flow in the streaming lib by choking the max window size. * Reduced the default streaming lib max message size to 16KB (though still configurable by the user), also doubling the default maximum window size. * Replaced the RouterIdentity in a Lease with its SHA256 hash. * Reduced the overall I2NP message checksum from a full 32 byte SHA256 to the first byte of the SHA256. * Added a new "netId" flag to let routers drop references to other routers who we won't be able to talk to. * Extended the timestamper to get a second (or third) opinion whenever it wants to actually adjust the clock offset. * Replaced that kludge of a timestamp I2NP message with a full blown DateMessage. * Substantial memory optimizations within the router and the SDK to reduce GC churn. Client apps and the streaming libs have not been tuned, however. * More bugfixes thank you can shake a stick at. 2005-02-13 jrandom * Updated jbigi source to handle 64bit CPUs. The bundled jbigi.jar still only contains 32bit versions, so build your own, placing libjbigi.so in your install dir if necessary. (thanks mule!) * Added support for libjbigi-$os-athlon64 to NativeBigInteger and CPUID (thanks spaetz!)
This commit is contained in:
@ -39,12 +39,13 @@
|
||||
<pathelement location="../../jetty/jettylib/jasper-compiler.jar" />
|
||||
<pathelement location="../../jetty/jettylib/jasper-runtime.jar" />
|
||||
<pathelement location="../../jetty/jettylib/javax.servlet.jar" />
|
||||
<pathelement location="../../jetty/jettylib/commons-logging.jar" />
|
||||
<pathelement location="../../jetty/jettylib/commons-el.jar" />
|
||||
<pathelement location="../../jetty/jettylib/ant.jar" />
|
||||
<pathelement location="build/i2ptunnel.jar" />
|
||||
</classpath>
|
||||
<arg value="-d" />
|
||||
<arg value="../jsp/WEB-INF/classes" />
|
||||
<arg value="-v9" />
|
||||
<arg value="-p" />
|
||||
<arg value="net.i2p.i2ptunnel.jsp" />
|
||||
<arg value="-webinc" />
|
||||
@ -52,10 +53,12 @@
|
||||
<arg value="-webapp" />
|
||||
<arg value="../jsp/" />
|
||||
</java>
|
||||
<javac destdir="../jsp/WEB-INF/classes/" srcdir="../jsp/WEB-INF/classes" includes="*.java">
|
||||
<javac destdir="../jsp/WEB-INF/classes/" srcdir="../jsp/WEB-INF/classes" includes="**/*.java">
|
||||
<classpath>
|
||||
<pathelement location="../../jetty/jettylib/jasper-runtime.jar" />
|
||||
<pathelement location="../../jetty/jettylib/javax.servlet.jar" />
|
||||
<pathelement location="../../jetty/jettylib/commons-logging.jar" />
|
||||
<pathelement location="../../jetty/jettylib/commons-el.jar" />
|
||||
<pathelement location="build/i2ptunnel.jar" />
|
||||
</classpath>
|
||||
</javac>
|
||||
|
@ -183,7 +183,8 @@ public class I2PTunnel implements Logging, EventDispatcher {
|
||||
void addSession(I2PSession session) {
|
||||
if (session == null) return;
|
||||
synchronized (_sessions) {
|
||||
_sessions.add(session);
|
||||
if (!_sessions.contains(session))
|
||||
_sessions.add(session);
|
||||
}
|
||||
}
|
||||
void removeSession(I2PSession session) {
|
||||
|
@ -102,6 +102,7 @@ public class TunnelController implements Logging {
|
||||
|
||||
public void startTunnelBackground() {
|
||||
if (_running) return;
|
||||
_starting = true;
|
||||
new I2PThread(new Runnable() { public void run() { startTunnel(); } }).start();
|
||||
}
|
||||
|
||||
|
@ -180,7 +180,7 @@ public class TunnelControllerGroup {
|
||||
List msgs = new ArrayList();
|
||||
for (int i = 0; i < _controllers.size(); i++) {
|
||||
TunnelController controller = (TunnelController)_controllers.get(i);
|
||||
controller.startTunnel();
|
||||
controller.startTunnelBackground();
|
||||
msgs.addAll(controller.clearMessages());
|
||||
}
|
||||
|
||||
|
@ -186,6 +186,9 @@ class WebEditPageFormGenerator {
|
||||
buf.append("<form action=\"edit.jsp\">");
|
||||
if (id != null)
|
||||
buf.append("<input type=\"hidden\" name=\"num\" value=\"").append(id).append("\" />");
|
||||
long nonce = new Random().nextLong();
|
||||
System.setProperty(WebEditPageHelper.class.getName() + ".nonce", nonce+"");
|
||||
buf.append("<input type=\"hidden\" name=\"nonce\" value=\"").append(nonce).append("\" />");
|
||||
|
||||
buf.append("<b>Name:</b> <input type=\"text\" name=\"name\" size=\"20\" ");
|
||||
if ( (controller != null) && (controller.getName() != null) )
|
||||
@ -253,9 +256,10 @@ class WebEditPageFormGenerator {
|
||||
int tunnelDepth = 2;
|
||||
int numTunnels = 2;
|
||||
int connectDelay = 0;
|
||||
int maxWindowSize = -1;
|
||||
Properties opts = getOptions(controller);
|
||||
if (opts != null) {
|
||||
String depth = opts.getProperty("tunnels.depthInbound");
|
||||
String depth = opts.getProperty("inbound.length");
|
||||
if (depth != null) {
|
||||
try {
|
||||
tunnelDepth = Integer.parseInt(depth);
|
||||
@ -263,7 +267,7 @@ class WebEditPageFormGenerator {
|
||||
tunnelDepth = 2;
|
||||
}
|
||||
}
|
||||
String num = opts.getProperty("tunnels.numInbound");
|
||||
String num = opts.getProperty("inbound.quantity");
|
||||
if (num != null) {
|
||||
try {
|
||||
numTunnels = Integer.parseInt(num);
|
||||
@ -279,6 +283,14 @@ class WebEditPageFormGenerator {
|
||||
connectDelay = 0;
|
||||
}
|
||||
}
|
||||
String max = opts.getProperty("i2p.streaming.maxWindowSize");
|
||||
if (max != null) {
|
||||
try {
|
||||
maxWindowSize = Integer.parseInt(max);
|
||||
} catch (NumberFormatException nfe) {
|
||||
maxWindowSize = -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
buf.append("<b>Tunnel depth:</b> ");
|
||||
@ -325,6 +337,14 @@ class WebEditPageFormGenerator {
|
||||
buf.append("checked=\"true\" ");
|
||||
buf.append("/> (useful for brief request/response connections)<br />\n");
|
||||
|
||||
buf.append("<b>Communication profile:</b>");
|
||||
buf.append("<select name=\"profile\">");
|
||||
if (maxWindowSize <= 0)
|
||||
buf.append("<option value=\"interactive\">Interactive</option><option value=\"bulk\" selected=\"true\">Bulk</option>");
|
||||
else
|
||||
buf.append("<option value=\"interactive\" selected=\"true\">Interactive</option><option value=\"bulk\">Bulk</option>");
|
||||
buf.append("</select><br />\n");
|
||||
|
||||
buf.append("<b>I2CP host:</b> ");
|
||||
buf.append("<input type=\"text\" name=\"clientHost\" size=\"20\" value=\"");
|
||||
if ( (controller != null) && (controller.getI2CPHost() != null) )
|
||||
@ -347,9 +367,14 @@ class WebEditPageFormGenerator {
|
||||
for (Iterator iter = opts.keySet().iterator(); iter.hasNext(); ) {
|
||||
String key = (String)iter.next();
|
||||
String val = opts.getProperty(key);
|
||||
if ("tunnels.depthInbound".equals(key)) continue;
|
||||
if ("tunnels.numInbound".equals(key)) continue;
|
||||
if ("inbound.length".equals(key)) continue;
|
||||
if ("outbound.length".equals(key)) continue;
|
||||
if ("inbound.quantity".equals(key)) continue;
|
||||
if ("outbound.quantity".equals(key)) continue;
|
||||
if ("inbound.nickname".equals(key)) continue;
|
||||
if ("outbound.nickname".equals(key)) continue;
|
||||
if ("i2p.streaming.connectDelay".equals(key)) continue;
|
||||
if ("i2p.streaming.maxWindowSize".equals(key)) continue;
|
||||
if (i != 0) buf.append(' ');
|
||||
buf.append(key).append('=').append(val);
|
||||
i++;
|
||||
|
@ -40,9 +40,11 @@ public class WebEditPageHelper {
|
||||
private String _targetPort;
|
||||
private String _spoofedHost;
|
||||
private String _privKeyFile;
|
||||
private String _profile;
|
||||
private boolean _startOnLoad;
|
||||
private boolean _privKeyGenerate;
|
||||
private boolean _removeConfirmed;
|
||||
private long _nonce;
|
||||
|
||||
public WebEditPageHelper() {
|
||||
_action = null;
|
||||
@ -52,6 +54,14 @@ public class WebEditPageHelper {
|
||||
_log = I2PAppContext.getGlobalContext().logManager().getLog(WebEditPageHelper.class);
|
||||
}
|
||||
|
||||
public void setNonce(String nonce) {
|
||||
if (nonce != null) {
|
||||
try {
|
||||
_nonce = Long.parseLong(nonce);
|
||||
} catch (NumberFormatException nfe) {}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Used for form submit - either "Save" or Remove"
|
||||
*/
|
||||
@ -173,6 +183,9 @@ public class WebEditPageHelper {
|
||||
public void setConnectDelay(String moo) {
|
||||
_connectDelay = true;
|
||||
}
|
||||
public void setProfile(String profile) {
|
||||
_profile = profile;
|
||||
}
|
||||
|
||||
/**
|
||||
* Process the form and display any resulting messages
|
||||
@ -224,6 +237,9 @@ public class WebEditPageHelper {
|
||||
private String processAction() {
|
||||
if ( (_action == null) || (_action.trim().length() <= 0) )
|
||||
return "";
|
||||
String expected = System.getProperty(getClass().getName() + ".nonce");
|
||||
if ( (expected == null) || (!expected.equals(Long.toString(_nonce))) )
|
||||
return "<b>Invalid nonce, are you being spoofed?</b>";
|
||||
if ("Save".equals(_action))
|
||||
return save();
|
||||
else if ("Remove".equals(_action))
|
||||
@ -272,14 +288,26 @@ public class WebEditPageHelper {
|
||||
if (c == cur) continue;
|
||||
if ("httpclient".equals(c.getType()) || "client".equals(c.getType())) {
|
||||
Properties cOpt = c.getConfig("");
|
||||
if (_tunnelCount != null)
|
||||
cOpt.setProperty("option.tunnels.numInbound", _tunnelCount);
|
||||
if (_tunnelDepth != null)
|
||||
cOpt.setProperty("option.tunnels.depthInbound", _tunnelDepth);
|
||||
if (_tunnelCount != null) {
|
||||
cOpt.setProperty("option.inbound.quantity", _tunnelCount);
|
||||
cOpt.setProperty("option.outbound.quantity", _tunnelCount);
|
||||
}
|
||||
if (_tunnelDepth != null) {
|
||||
cOpt.setProperty("option.inbound.length", _tunnelDepth);
|
||||
cOpt.setProperty("option.outbound.length", _tunnelDepth);
|
||||
}
|
||||
if (_connectDelay)
|
||||
cOpt.setProperty("option.i2p.streaming.connectDelay", "1000");
|
||||
else
|
||||
cOpt.setProperty("option.i2p.streaming.connectDelay", "0");
|
||||
if ("interactive".equals(_profile))
|
||||
cOpt.setProperty("option.i2p.streaming.maxWindowSize", "1");
|
||||
else
|
||||
cOpt.remove("option.i2p.streaming.maxWindowSize");
|
||||
if (_name != null) {
|
||||
cOpt.setProperty("option.inbound.nickname", _name);
|
||||
cOpt.setProperty("option.outbound.nickname", _name);
|
||||
}
|
||||
c.setConfig(cOpt, "");
|
||||
}
|
||||
}
|
||||
@ -339,7 +367,6 @@ public class WebEditPageHelper {
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
|
||||
return config;
|
||||
}
|
||||
|
||||
@ -363,23 +390,40 @@ public class WebEditPageHelper {
|
||||
continue;
|
||||
String key = pair.substring(0, eq);
|
||||
String val = pair.substring(eq+1);
|
||||
if ("tunnels.numInbound".equals(key)) continue;
|
||||
if ("tunnels.depthInbound".equals(key)) continue;
|
||||
if ("inbound.length".equals(key)) continue;
|
||||
if ("outbound.length".equals(key)) continue;
|
||||
if ("inbound.quantity".equals(key)) continue;
|
||||
if ("outbound.quantity".equals(key)) continue;
|
||||
if ("inbound.nickname".equals(key)) continue;
|
||||
if ("outbound.nickname".equals(key)) continue;
|
||||
if ("i2p.streaming.connectDelay".equals(key)) continue;
|
||||
if ("i2p.streaming.maxWindowSize".equals(key)) continue;
|
||||
config.setProperty("option." + key, val);
|
||||
}
|
||||
}
|
||||
|
||||
config.setProperty("startOnLoad", _startOnLoad + "");
|
||||
|
||||
if (_tunnelCount != null)
|
||||
config.setProperty("option.tunnels.numInbound", _tunnelCount);
|
||||
if (_tunnelDepth != null)
|
||||
config.setProperty("option.tunnels.depthInbound", _tunnelDepth);
|
||||
if (_tunnelCount != null) {
|
||||
config.setProperty("option.inbound.quantity", _tunnelCount);
|
||||
config.setProperty("option.outbound.quantity", _tunnelCount);
|
||||
}
|
||||
if (_tunnelDepth != null) {
|
||||
config.setProperty("option.inbound.length", _tunnelDepth);
|
||||
config.setProperty("option.outbound.length", _tunnelDepth);
|
||||
}
|
||||
if (_connectDelay)
|
||||
config.setProperty("option.i2p.streaming.connectDelay", "1000");
|
||||
else
|
||||
config.setProperty("option.i2p.streaming.connectDelay", "0");
|
||||
if (_name != null) {
|
||||
config.setProperty("option.inbound.nickname", _name);
|
||||
config.setProperty("option.outbound.nickname", _name);
|
||||
}
|
||||
if ("interactive".equals(_profile))
|
||||
config.setProperty("option.i2p.streaming.maxWindowSize", "1");
|
||||
else
|
||||
config.remove("option.i2p.streaming.maxWindowSize");
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -14,14 +14,17 @@ import net.i2p.util.Log;
|
||||
*
|
||||
*/
|
||||
public class WebStatusPageHelper {
|
||||
private I2PAppContext _context;
|
||||
private Log _log;
|
||||
private String _action;
|
||||
private int _controllerNum;
|
||||
private long _nonce;
|
||||
|
||||
public WebStatusPageHelper() {
|
||||
_context = I2PAppContext.getGlobalContext();
|
||||
_action = null;
|
||||
_controllerNum = -1;
|
||||
_log = I2PAppContext.getGlobalContext().logManager().getLog(WebStatusPageHelper.class);
|
||||
_log = _context.logManager().getLog(WebStatusPageHelper.class);
|
||||
}
|
||||
|
||||
public void setAction(String action) {
|
||||
@ -36,6 +39,14 @@ public class WebStatusPageHelper {
|
||||
}
|
||||
}
|
||||
}
|
||||
public void setNonce(long nonce) { _nonce = nonce; }
|
||||
public void setNonce(String nonce) {
|
||||
if (nonce != null) {
|
||||
try {
|
||||
_nonce = Long.parseLong(nonce);
|
||||
} catch (NumberFormatException nfe) {}
|
||||
}
|
||||
}
|
||||
|
||||
public String getActionResults() {
|
||||
try {
|
||||
@ -51,28 +62,44 @@ public class WebStatusPageHelper {
|
||||
if (group == null)
|
||||
return "<b>I2PTunnel instances not yet started - please be patient</b>\n";
|
||||
|
||||
long nonce = _context.random().nextLong();
|
||||
StringBuffer buf = new StringBuffer(4*1024);
|
||||
buf.append("<ul>");
|
||||
List tunnels = group.getControllers();
|
||||
for (int i = 0; i < tunnels.size(); i++) {
|
||||
buf.append("<li>\n");
|
||||
getSummary(buf, i, (TunnelController)tunnels.get(i));
|
||||
getSummary(buf, i, (TunnelController)tunnels.get(i), nonce);
|
||||
buf.append("</li>\n");
|
||||
}
|
||||
buf.append("</ul>");
|
||||
|
||||
buf.append("<hr /><form action=\"index.jsp\" method=\"GET\">\n");
|
||||
buf.append("<input type=\"hidden\" name=\"nonce\" value=\"").append(nonce).append("\" />\n");
|
||||
buf.append("<input type=\"submit\" name=\"action\" value=\"Stop all\" />\n");
|
||||
buf.append("<input type=\"submit\" name=\"action\" value=\"Start all\" />\n");
|
||||
buf.append("<input type=\"submit\" name=\"action\" value=\"Restart all\" />\n");
|
||||
buf.append("<input type=\"submit\" name=\"action\" value=\"Reload config\" />\n");
|
||||
buf.append("</form>\n");
|
||||
|
||||
System.setProperty(getClass().getName() + ".nonce", nonce+"");
|
||||
|
||||
return buf.toString();
|
||||
}
|
||||
|
||||
private void getSummary(StringBuffer buf, int num, TunnelController controller) {
|
||||
private void getSummary(StringBuffer buf, int num, TunnelController controller, long nonce) {
|
||||
buf.append("<b>").append(controller.getName()).append("</b>: ");
|
||||
if (controller.getIsRunning()) {
|
||||
buf.append("<i>running</i> ");
|
||||
buf.append("<a href=\"index.jsp?num=").append(num).append("&action=stop\">stop</a> ");
|
||||
buf.append("<a href=\"index.jsp?num=").append(num);
|
||||
buf.append("&nonce=").append(nonce);
|
||||
buf.append("&action=stop\">stop</a> ");
|
||||
} else if (controller.getIsStarting()) {
|
||||
buf.append("<i>startup in progress (please be patient)</i>");
|
||||
} else {
|
||||
buf.append("<i>not running</i> ");
|
||||
buf.append("<a href=\"index.jsp?num=").append(num).append("&action=start\">start</a> ");
|
||||
buf.append("<a href=\"index.jsp?num=").append(num);
|
||||
buf.append("&nonce=").append(nonce);
|
||||
buf.append("&action=start\">start</a> ");
|
||||
}
|
||||
buf.append("<a href=\"edit.jsp?num=").append(num).append("\">edit</a> ");
|
||||
buf.append("<br />\n");
|
||||
@ -82,6 +109,9 @@ public class WebStatusPageHelper {
|
||||
private String processAction() {
|
||||
if ( (_action == null) || (_action.trim().length() <= 0) )
|
||||
return getMessages();
|
||||
String expected = System.getProperty(getClass().getName() + ".nonce");
|
||||
if ( (expected == null) || (!expected.equals(Long.toString(_nonce))) )
|
||||
return "<b>Invalid nonce, are you being spoofed?</b>";
|
||||
if ("Stop all".equals(_action))
|
||||
return stopAll();
|
||||
else if ("Start all".equals(_action))
|
||||
@ -139,7 +169,7 @@ public class WebStatusPageHelper {
|
||||
List controllers = group.getControllers();
|
||||
if (_controllerNum >= controllers.size()) return "Invalid tunnel";
|
||||
TunnelController controller = (TunnelController)controllers.get(_controllerNum);
|
||||
controller.startTunnel();
|
||||
controller.startTunnelBackground();
|
||||
return getMessages(controller.clearMessages());
|
||||
}
|
||||
|
||||
|
@ -11,13 +11,6 @@
|
||||
<b><jsp:getProperty name="helper" property="actionResults" /></b>
|
||||
|
||||
<jsp:getProperty name="helper" property="summaryList" />
|
||||
<hr />
|
||||
<form action="index.jsp" method="GET">
|
||||
<input type="submit" name="action" value="Stop all" />
|
||||
<input type="submit" name="action" value="Start all" />
|
||||
<input type="submit" name="action" value="Restart all" />
|
||||
<input type="submit" name="action" value="Reload config" />
|
||||
</form>
|
||||
|
||||
<form action="edit.jsp">
|
||||
<b>Add new:</b>
|
||||
|
@ -3,18 +3,38 @@
|
||||
|
||||
<target name="all" depends="build" />
|
||||
<target name="fetchJettylib" >
|
||||
<available property="jetty.available" file="jettylib" />
|
||||
<available property="jetty.available" file="jetty-5.1.2.zip" />
|
||||
<ant target="doFetchJettylib" />
|
||||
</target>
|
||||
<target name="doFetchJettylib" unless="jetty.available" >
|
||||
<echo message="The libraries contained within the fetched file are from Jetty's 4.2.21 " />
|
||||
<echo message="distribution (http://jetty.mortbay.org/) which we have copied to our website since" />
|
||||
<echo message="theirs doesn't have direct HTTP access to the libs. These are not " />
|
||||
<echo message="The libraries contained within the fetched file are from Jetty's 5.1.2" />
|
||||
<echo message="distribution (http://jetty.mortbay.org/). These are not " />
|
||||
<echo message="necessary for using I2P, but are used by some applications on top of I2P," />
|
||||
<echo message="such as the routerconsole." />
|
||||
<get src="http://dev.i2p.net/jettylib.tar.bz2" verbose="true" dest="jettylib.tar.bz2" />
|
||||
<untar src="jettylib.tar.bz2" compression="bzip2" dest="." />
|
||||
<delete file="jettylib.tar.bz2" />
|
||||
<get src="http://mesh.dl.sourceforge.net/sourceforge/jetty/jetty-5.1.2.zip" verbose="true" dest="jetty-5.1.2.zip" />
|
||||
<ant target="doExtract" />
|
||||
</target>
|
||||
<target name="doExtract">
|
||||
<unzip src="jetty-5.1.2.zip" dest="." />
|
||||
<mkdir dir="jettylib" />
|
||||
<copy todir="jettylib">
|
||||
<fileset dir="jetty-5.1.2/lib">
|
||||
<include name="*.jar" />
|
||||
</fileset>
|
||||
</copy>
|
||||
<copy todir="jettylib">
|
||||
<fileset dir="jetty-5.1.2/ext">
|
||||
<include name="ant.jar" />
|
||||
<include name="commons-el.jar" />
|
||||
<include name="commons-logging.jar" />
|
||||
<include name="jasper-compiler.jar" />
|
||||
<include name="jasper-runtime.jar" />
|
||||
<include name="javax.servlet.jar" />
|
||||
<include name="org.mortbay.jetty.jar" />
|
||||
<include name="xercesImpl.jar" />
|
||||
</fileset>
|
||||
</copy>
|
||||
<delete dir="jetty-5.1.2" />
|
||||
</target>
|
||||
<target name="build" depends="fetchJettylib" />
|
||||
<target name="builddep" />
|
||||
|
@ -115,6 +115,7 @@ public class StreamSinkServer {
|
||||
}
|
||||
public void run() {
|
||||
if (_fos == null) return;
|
||||
long start = System.currentTimeMillis();
|
||||
try {
|
||||
InputStream in = _sock.getInputStream();
|
||||
byte buf[] = new byte[4096];
|
||||
@ -126,7 +127,8 @@ public class StreamSinkServer {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("read and wrote " + read);
|
||||
}
|
||||
_log.error("Got EOF from client socket [written=" + written + "]");
|
||||
long lifetime = System.currentTimeMillis() - start;
|
||||
_log.error("Got EOF from client socket [written=" + written + " lifetime=" + lifetime + "]");
|
||||
} catch (IOException ioe) {
|
||||
_log.error("Error writing the sink", ioe);
|
||||
} finally {
|
||||
|
@ -20,7 +20,7 @@
|
||||
<classpath>
|
||||
<pathelement location="../../../core/java/build/i2p.jar" />
|
||||
<pathelement location="../../../router/java/build/router.jar" />
|
||||
<pathelement location="../../jetty/jettylib/org.mortbay.jetty-jdk1.2.jar" />
|
||||
<pathelement location="../../jetty/jettylib/org.mortbay.jetty.jar" />
|
||||
<pathelement location="../../jetty/jettylib/javax.servlet.jar" />
|
||||
<pathelement location="../../systray/java/build/systray.jar" />
|
||||
<pathelement location="../../systray/java/lib/systray4j.jar" />
|
||||
@ -53,17 +53,19 @@
|
||||
<pathelement location="../../jetty/jettylib/jasper-compiler.jar" />
|
||||
<pathelement location="../../jetty/jettylib/jasper-runtime.jar" />
|
||||
<pathelement location="../../jetty/jettylib/javax.servlet.jar" />
|
||||
<pathelement location="../../jetty/jettylib/commons-logging.jar" />
|
||||
<pathelement location="../../jetty/jettylib/commons-el.jar" />
|
||||
<pathelement location="../../jetty/jettylib/ant.jar" />
|
||||
<pathelement location="../../systray/java/build/obj" />
|
||||
<pathelement location="../../systray/java/lib/systray4j.jar" /> <!-- some javacs resolve recursively... -->
|
||||
<pathelement location="../../../installer/lib/wrapper/win32/wrapper.jar" /> <!-- we dont care if we're not on win32 -->
|
||||
<pathelement location="../../../core/java/build/i2p.jar" />
|
||||
<pathelement location="../../../router/java/build/router.jar" />
|
||||
<pathelement location="../../systray/java/lib/systray4j.jar" />
|
||||
<pathelement location="../../../installer/lib/wrapper/win32/wrapper.jar" />
|
||||
<pathelement location="build/routerconsole.jar" />
|
||||
<pathelement location="../../../router/java/build/router.jar" />
|
||||
<pathelement location="../../../core/java/build/i2p.jar" />
|
||||
</classpath>
|
||||
<arg value="-d" />
|
||||
<arg value="../jsp/WEB-INF/classes" />
|
||||
<arg value="-v9" />
|
||||
<arg value="-v" />
|
||||
<arg value="-p" />
|
||||
<arg value="net.i2p.router.web.jsp" />
|
||||
<arg value="-webinc" />
|
||||
@ -71,10 +73,13 @@
|
||||
<arg value="-webapp" />
|
||||
<arg value="../jsp/" />
|
||||
</java>
|
||||
<javac destdir="../jsp/WEB-INF/classes/" srcdir="../jsp/WEB-INF/classes" includes="*.java">
|
||||
|
||||
<javac destdir="../jsp/WEB-INF/classes/" srcdir="../jsp/WEB-INF/classes" includes="**/*.java">
|
||||
<classpath>
|
||||
<pathelement location="../../jetty/jettylib/jasper-runtime.jar" />
|
||||
<pathelement location="../../jetty/jettylib/javax.servlet.jar" />
|
||||
<pathelement location="../../jetty/jettylib/commons-logging.jar" />
|
||||
<pathelement location="../../jetty/jettylib/commons-el.jar" />
|
||||
<pathelement location="build/routerconsole.jar" />
|
||||
</classpath>
|
||||
</javac>
|
||||
|
@ -39,6 +39,7 @@ public class ConfigNetHandler extends FormHandler {
|
||||
private String _outboundRate;
|
||||
private String _outboundBurst;
|
||||
private String _reseedFrom;
|
||||
private String _sharePct;
|
||||
|
||||
public void ConfigNetHandler() {
|
||||
_guessRequested = false;
|
||||
@ -85,6 +86,9 @@ public class ConfigNetHandler extends FormHandler {
|
||||
public void setReseedfrom(String url) {
|
||||
_reseedFrom = (url != null ? url.trim() : null);
|
||||
}
|
||||
public void setSharePercentage(String pct) {
|
||||
_sharePct = (pct != null ? pct.trim() : null);
|
||||
}
|
||||
|
||||
private static final String IP_PREFIX = "<h1>Your IP is ";
|
||||
private static final String IP_SUFFIX = " <br></h1>";
|
||||
@ -229,6 +233,14 @@ public class ConfigNetHandler extends FormHandler {
|
||||
|
||||
updateRates();
|
||||
|
||||
if (_sharePct != null) {
|
||||
String old = _context.router().getConfigSetting(ConfigNetHelper.PROP_SHARE_PERCENTAGE);
|
||||
if ( (old == null) || (!old.equalsIgnoreCase(_sharePct)) ) {
|
||||
_context.router().setConfigSetting(ConfigNetHelper.PROP_SHARE_PERCENTAGE, _sharePct);
|
||||
addFormNotice("Updating bandwidth share percentage");
|
||||
}
|
||||
}
|
||||
|
||||
if (_timeSyncEnabled) {
|
||||
// Time sync enable, means NOT disabled
|
||||
_context.router().setConfigSetting(Timestamper.PROP_DISABLED, "false");
|
||||
|
@ -62,6 +62,8 @@ public class ConfigNetHelper {
|
||||
public static final String PROP_OUTBOUND_KBPS = "i2np.bandwidth.outboundKBytesPerSecond";
|
||||
public static final String PROP_INBOUND_BURST = "i2np.bandwidth.inboundBurstKBytes";
|
||||
public static final String PROP_OUTBOUND_BURST = "i2np.bandwidth.outboundBurstKBytes";
|
||||
public static final String PROP_SHARE_PERCENTAGE = "router.sharePercentage";
|
||||
public static final int DEFAULT_SHARE_PERCENTAGE = 80;
|
||||
|
||||
public String getInboundRate() {
|
||||
String rate = _context.getProperty(PROP_INBOUND_KBPS);
|
||||
@ -135,4 +137,26 @@ public class ConfigNetHelper {
|
||||
buf.append("</select>\n");
|
||||
return buf.toString();
|
||||
}
|
||||
|
||||
public String getSharePercentageBox() {
|
||||
String pctStr = _context.getProperty(PROP_SHARE_PERCENTAGE);
|
||||
int pct = DEFAULT_SHARE_PERCENTAGE;
|
||||
if (pctStr != null)
|
||||
try { pct = Integer.parseInt(pctStr); } catch (NumberFormatException nfe) {}
|
||||
StringBuffer buf = new StringBuffer(256);
|
||||
buf.append("<select name=\"sharePercentage\">\n");
|
||||
boolean found = false;
|
||||
for (int i = 30; i <= 100; i += 10) {
|
||||
buf.append("<option value=\"").append(i).append("\" ");
|
||||
if (pct == i) {
|
||||
buf.append("selected=\"true\" ");
|
||||
found = true;
|
||||
} else if ( (i == DEFAULT_SHARE_PERCENTAGE) && (!found) ) {
|
||||
buf.append("selected=\"true\" ");
|
||||
}
|
||||
buf.append(">Up to ").append(i).append("%</option>\n");
|
||||
}
|
||||
buf.append("</select>\n");
|
||||
return buf.toString();
|
||||
}
|
||||
}
|
||||
|
@ -4,7 +4,9 @@ import java.io.ByteArrayOutputStream;
|
||||
import java.io.File;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
|
||||
import java.net.Socket;
|
||||
import java.net.URL;
|
||||
import java.net.URLConnection;
|
||||
import java.util.HashSet;
|
||||
@ -103,8 +105,17 @@ public class ReseedHandler {
|
||||
|
||||
private static byte[] readURL(URL url) throws Exception {
|
||||
ByteArrayOutputStream baos = new ByteArrayOutputStream(1024);
|
||||
URLConnection con = url.openConnection();
|
||||
InputStream in = con.getInputStream();
|
||||
String hostname = url.getHost();
|
||||
int port = url.getPort();
|
||||
if (port < 0)
|
||||
port = 80;
|
||||
Socket s = new Socket(hostname, port);
|
||||
OutputStream out = s.getOutputStream();
|
||||
InputStream in = s.getInputStream();
|
||||
String request = getRequest(url);
|
||||
System.out.println("Sending to " + hostname +":"+ port + ": " + request);
|
||||
out.write(request.getBytes());
|
||||
out.flush();
|
||||
byte buf[] = new byte[1024];
|
||||
while (true) {
|
||||
int read = in.read(buf);
|
||||
@ -113,9 +124,24 @@ public class ReseedHandler {
|
||||
baos.write(buf, 0, read);
|
||||
}
|
||||
in.close();
|
||||
s.close();
|
||||
return baos.toByteArray();
|
||||
}
|
||||
|
||||
private static String getRequest(URL url) {
|
||||
StringBuffer buf = new StringBuffer(512);
|
||||
String path = url.getPath();
|
||||
if ("".equals(path))
|
||||
path = "/";
|
||||
buf.append("GET ").append(path).append(" HTTP/1.0\n");
|
||||
buf.append("Host: ").append(url.getHost());
|
||||
int port = url.getPort();
|
||||
if ( (port > 0) && (port != 80) )
|
||||
buf.append(":").append(port);
|
||||
buf.append("\nConnection: close\n\n");
|
||||
return buf.toString();
|
||||
}
|
||||
|
||||
private static void writeSeed(String name, byte data[]) throws Exception {
|
||||
String dirName = "netDb"; // _context.getProperty("router.networkDatabase.dbDir", "netDb");
|
||||
File netDbDir = new File(dirName);
|
||||
@ -126,4 +152,9 @@ public class ReseedHandler {
|
||||
fos.write(data);
|
||||
fos.close();
|
||||
}
|
||||
|
||||
public static void main(String args[]) {
|
||||
reseed();
|
||||
System.out.println("Done reseeding");
|
||||
}
|
||||
}
|
||||
|
@ -15,6 +15,7 @@ import org.mortbay.http.handler.SecurityHandler;
|
||||
import org.mortbay.http.HashUserRealm;
|
||||
import org.mortbay.http.HttpRequest;
|
||||
import org.mortbay.http.SecurityConstraint;
|
||||
import org.mortbay.http.Authenticator;
|
||||
import org.mortbay.util.MultiException;
|
||||
|
||||
public class RouterConsoleRunner {
|
||||
@ -64,7 +65,7 @@ public class RouterConsoleRunner {
|
||||
}
|
||||
try {
|
||||
_server.start();
|
||||
} catch (MultiException me) {
|
||||
} catch (Exception me) {
|
||||
me.printStackTrace();
|
||||
}
|
||||
try {
|
||||
|
@ -4,13 +4,18 @@ import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStreamWriter;
|
||||
import java.text.DecimalFormat;
|
||||
import java.util.Iterator;
|
||||
import java.util.Set;
|
||||
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.data.Destination;
|
||||
import net.i2p.data.LeaseSet;
|
||||
import net.i2p.stat.Rate;
|
||||
import net.i2p.stat.RateStat;
|
||||
import net.i2p.router.Router;
|
||||
import net.i2p.router.RouterContext;
|
||||
import net.i2p.router.RouterVersion;
|
||||
import net.i2p.router.TunnelPoolSettings;
|
||||
|
||||
/**
|
||||
* Simple helper to query the appropriate router for data necessary to render
|
||||
@ -333,16 +338,39 @@ public class SummaryHelper {
|
||||
* @return html section summary
|
||||
*/
|
||||
public String getDestinations() {
|
||||
ByteArrayOutputStream baos = new ByteArrayOutputStream(1024);
|
||||
try {
|
||||
OutputStreamWriter osw = new OutputStreamWriter(baos);
|
||||
_context.clientManager().renderStatusHTML(osw);
|
||||
osw.flush();
|
||||
return new String(baos.toByteArray());
|
||||
} catch (IOException ioe) {
|
||||
_context.logManager().getLog(SummaryHelper.class).error("Error rendering client info", ioe);
|
||||
return "";
|
||||
Set clients = _context.clientManager().listClients();
|
||||
|
||||
StringBuffer buf = new StringBuffer(512);
|
||||
buf.append("<u><b>Local destinations</b></u><br />");
|
||||
|
||||
for (Iterator iter = clients.iterator(); iter.hasNext(); ) {
|
||||
Destination client = (Destination)iter.next();
|
||||
TunnelPoolSettings in = _context.tunnelManager().getInboundSettings(client.calculateHash());
|
||||
TunnelPoolSettings out = _context.tunnelManager().getOutboundSettings(client.calculateHash());
|
||||
String name = (in != null ? in.getDestinationNickname() : null);
|
||||
if (name == null)
|
||||
name = (out != null ? out.getDestinationNickname() : null);
|
||||
if (name == null)
|
||||
name = client.calculateHash().toBase64().substring(0,6);
|
||||
|
||||
buf.append("<b>*</b> ").append(name).append("<br />\n");
|
||||
LeaseSet ls = _context.netDb().lookupLeaseSetLocally(client.calculateHash());
|
||||
if (ls != null) {
|
||||
long timeToExpire = ls.getEarliestLeaseDate() - _context.clock().now();
|
||||
if (timeToExpire < 0) {
|
||||
buf.append("<i>expired ").append(DataHelper.formatDuration(0-timeToExpire));
|
||||
buf.append(" ago</i><br />\n");
|
||||
}
|
||||
} else {
|
||||
buf.append("<i>No leases</i><br />\n");
|
||||
}
|
||||
buf.append("<a href=\"tunnels.jsp#").append(client.calculateHash().toBase64().substring(0,4));
|
||||
buf.append("\">Details</a> ");
|
||||
buf.append("<a href=\"configtunnels.jsp#").append(client.calculateHash().toBase64().substring(0,4));
|
||||
buf.append("\">Config</a><br />\n");
|
||||
}
|
||||
buf.append("<hr />\n");
|
||||
return buf.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -39,14 +39,17 @@
|
||||
|
||||
<b>Bandwidth limiter</b><br />
|
||||
Inbound rate:
|
||||
<input name="inboundrate" type="text" size="2" value="<jsp:getProperty name="nethelper" property="inboundRate" />" /> KBytes per second<br />
|
||||
Inbound burst duration:
|
||||
<input name="inboundrate" type="text" size="2" value="<jsp:getProperty name="nethelper" property="inboundRate" />" /> KBytes per second
|
||||
bursting up to
|
||||
<jsp:getProperty name="nethelper" property="inboundBurstFactorBox" /><br />
|
||||
Outbound rate:
|
||||
<input name="outboundrate" type="text" size="2" value="<jsp:getProperty name="nethelper" property="outboundRate" />" /> KBytes per second<br />
|
||||
Outbound burst duration:
|
||||
<input name="outboundrate" type="text" size="2" value="<jsp:getProperty name="nethelper" property="outboundRate" />" /> KBytes per second
|
||||
bursting up to
|
||||
<jsp:getProperty name="nethelper" property="outboundBurstFactorBox" /><br />
|
||||
<i>A negative rate means there is no limit</i><br />
|
||||
Bandwidth share percentage:
|
||||
<jsp:getProperty name="nethelper" property="sharePercentageBox" /><br />
|
||||
Sharing a higher percentage will improve your anonymity and help the network
|
||||
<hr />
|
||||
Enable internal time synchronization? <input type="checkbox" <jsp:getProperty name="nethelper" property="enableTimeSyncChecked" /> name="enabletimesync" /><br />
|
||||
<i>If disabled, your machine <b>must</b> be NTP synchronized - your clock must always
|
||||
@ -61,17 +64,7 @@
|
||||
<hr />
|
||||
<b>Advanced network config:</b>
|
||||
<p>
|
||||
There are two other network settings, but no one reads this text so there's no reason
|
||||
to tell you about them. In case you actually do read this, here's the deal: by default,
|
||||
I2P will attempt to guess your IP address by having whomever it talks to tell it what
|
||||
address they think you are. If and only if you have no working TCP connections and you
|
||||
have not overridden the IP address, your router will believe them. If that doesn't sound
|
||||
ok to you, thats fine - go to the <a href="configadvanced.jsp">advanced config</a> page
|
||||
and add "i2np.tcp.hostname=yourHostname", then go to the
|
||||
<a href="configservice.jsp">service</a> page and do a graceful restart. We used to make
|
||||
people enter a hostname/IP address on this page, but too many people got it wrong ;)</p>
|
||||
|
||||
<p>The other advanced network option has to do with reseeding - you should never need to
|
||||
One advanced network option has to do with reseeding - you should never need to
|
||||
reseed your router as long as you can find at least one other peer on the network. However,
|
||||
when you do need to reseed, a link will show up on the left hand side which will
|
||||
fetch all of the routerInfo-* files from http://dev.i2p.net/i2pdb/. That URL is just an
|
||||
|
@ -2,8 +2,8 @@
|
||||
%>Network | <% } else { %><a href="config.jsp">Network</a> | <% }
|
||||
if (request.getRequestURI().indexOf("configservice.jsp") != -1) {
|
||||
%>Service | <% } else { %><a href="configservice.jsp">Service</a> | <% }
|
||||
if (request.getRequestURI().indexOf("configclients.jsp") != -1) {
|
||||
%>Clients | <% } else { %><a href="configclients.jsp">Clients</a> | <% }
|
||||
if (request.getRequestURI().indexOf("configtunnels.jsp") != -1) {
|
||||
%>Tunnels | <% } else { %><a href="configtunnels.jsp">Tunnels</a> | <% }
|
||||
if (request.getRequestURI().indexOf("configlogging.jsp") != -1) {
|
||||
%>Logging | <% } else { %><a href="configlogging.jsp">Logging</a> | <% }
|
||||
if (request.getRequestURI().indexOf("configadvanced.jsp") != -1) {
|
||||
|
@ -15,6 +15,7 @@
|
||||
</div>
|
||||
|
||||
<h4>
|
||||
<a href="tunnels.jsp">Tunnels</a> |
|
||||
<a href="profiles.jsp">Profiles</a> |
|
||||
<a href="netdb.jsp">Network Database</a> |
|
||||
<a href="logs.jsp">Logs</a> |
|
||||
|
@ -12,6 +12,7 @@ import java.util.TreeMap;
|
||||
import net.i2p.I2PAppContext;
|
||||
import net.i2p.client.I2PSession;
|
||||
import net.i2p.data.Base64;
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.data.Destination;
|
||||
import net.i2p.data.SessionTag;
|
||||
import net.i2p.util.Log;
|
||||
@ -80,8 +81,8 @@ public class Connection {
|
||||
/** wait up to 5 minutes after disconnection so we can ack/close packets */
|
||||
public static int DISCONNECT_TIMEOUT = 5*60*1000;
|
||||
|
||||
/** lets be sane- no more than 32 packets in the air in each dir */
|
||||
public static final int MAX_WINDOW_SIZE = 32;
|
||||
/** lets be sane- no more than 64 packets in the air in each dir */
|
||||
public static final int MAX_WINDOW_SIZE = 64;
|
||||
|
||||
public Connection(I2PAppContext ctx, ConnectionManager manager, SchedulerChooser chooser, PacketQueue queue, ConnectionPacketHandler handler) {
|
||||
this(ctx, manager, chooser, queue, handler, null);
|
||||
@ -106,7 +107,7 @@ public class Connection {
|
||||
_unackedPacketsReceived = 0;
|
||||
_congestionWindowEnd = 0;
|
||||
_highestAckedThrough = -1;
|
||||
_lastCongestionSeenAt = MAX_WINDOW_SIZE;
|
||||
_lastCongestionSeenAt = MAX_WINDOW_SIZE*2; // lets allow it to grow
|
||||
_lastCongestionTime = -1;
|
||||
_lastCongestionHighestUnacked = -1;
|
||||
_connectionManager = manager;
|
||||
@ -767,6 +768,21 @@ public class Connection {
|
||||
buf.append(" ").append(nacks[i]);
|
||||
buf.append("]");
|
||||
}
|
||||
|
||||
if (getResetSent())
|
||||
buf.append(" reset sent");
|
||||
if (getResetReceived())
|
||||
buf.append(" reset received");
|
||||
if (getCloseSentOn() > 0) {
|
||||
buf.append(" close sent ");
|
||||
long timeSinceClose = _context.clock().now() - getCloseSentOn();
|
||||
buf.append(DataHelper.formatDuration(timeSinceClose));
|
||||
buf.append(" ago");
|
||||
}
|
||||
if (getCloseReceivedOn() > 0)
|
||||
buf.append(" close received");
|
||||
buf.append(" acked packets ").append(getAckedPackets());
|
||||
|
||||
buf.append("]");
|
||||
return buf.toString();
|
||||
}
|
||||
|
@ -21,6 +21,7 @@ public class ConnectionOptions extends I2PSocketOptionsImpl {
|
||||
private int _inactivityTimeout;
|
||||
private int _inactivityAction;
|
||||
private int _inboundBufferSize;
|
||||
private int _maxWindowSize;
|
||||
|
||||
public static final int PROFILE_BULK = 1;
|
||||
public static final int PROFILE_INTERACTIVE = 2;
|
||||
@ -43,6 +44,7 @@ public class ConnectionOptions extends I2PSocketOptionsImpl {
|
||||
public static final String PROP_INITIAL_RECEIVE_WINDOW = "i2p.streaming.initialReceiveWindow";
|
||||
public static final String PROP_INACTIVITY_TIMEOUT = "i2p.streaming.inactivityTimeout";
|
||||
public static final String PROP_INACTIVITY_ACTION = "i2p.streaming.inactivityAction";
|
||||
public static final String PROP_MAX_WINDOW_SIZE = "i2p.streaming.maxWindowSize";
|
||||
|
||||
public ConnectionOptions() {
|
||||
super();
|
||||
@ -71,6 +73,7 @@ public class ConnectionOptions extends I2PSocketOptionsImpl {
|
||||
setInactivityTimeout(opts.getInactivityTimeout());
|
||||
setInactivityAction(opts.getInactivityAction());
|
||||
setInboundBufferSize(opts.getInboundBufferSize());
|
||||
setMaxWindowSize(opts.getMaxWindowSize());
|
||||
}
|
||||
}
|
||||
|
||||
@ -78,11 +81,11 @@ public class ConnectionOptions extends I2PSocketOptionsImpl {
|
||||
super.init(opts);
|
||||
setConnectDelay(getInt(opts, PROP_CONNECT_DELAY, -1));
|
||||
setProfile(getInt(opts, PROP_PROFILE, PROFILE_BULK));
|
||||
setMaxMessageSize(getInt(opts, PROP_MAX_MESSAGE_SIZE, Packet.MAX_PAYLOAD_SIZE));
|
||||
setMaxMessageSize(getInt(opts, PROP_MAX_MESSAGE_SIZE, 16*1024));
|
||||
setRTT(getInt(opts, PROP_INITIAL_RTT, 30*1000));
|
||||
setReceiveWindow(getInt(opts, PROP_INITIAL_RECEIVE_WINDOW, 1));
|
||||
setResendDelay(getInt(opts, PROP_INITIAL_RESEND_DELAY, 500));
|
||||
setSendAckDelay(getInt(opts, PROP_INITIAL_ACK_DELAY, 500));
|
||||
setResendDelay(getInt(opts, PROP_INITIAL_RESEND_DELAY, 1000));
|
||||
setSendAckDelay(getInt(opts, PROP_INITIAL_ACK_DELAY, 1000));
|
||||
setWindowSize(getInt(opts, PROP_INITIAL_WINDOW_SIZE, 1));
|
||||
setMaxResends(getInt(opts, PROP_MAX_RESENDS, 5));
|
||||
setWriteTimeout(getInt(opts, PROP_WRITE_TIMEOUT, -1));
|
||||
@ -91,6 +94,7 @@ public class ConnectionOptions extends I2PSocketOptionsImpl {
|
||||
setInboundBufferSize((getMaxMessageSize() + 2) * Connection.MAX_WINDOW_SIZE);
|
||||
|
||||
setConnectTimeout(getInt(opts, PROP_CONNECT_TIMEOUT, Connection.DISCONNECT_TIMEOUT));
|
||||
setMaxWindowSize(getInt(opts, PROP_MAX_WINDOW_SIZE, Connection.MAX_WINDOW_SIZE));
|
||||
}
|
||||
|
||||
public void setProperties(Properties opts) {
|
||||
@ -124,6 +128,8 @@ public class ConnectionOptions extends I2PSocketOptionsImpl {
|
||||
|
||||
if (opts.containsKey(PROP_CONNECT_TIMEOUT))
|
||||
setConnectTimeout(getInt(opts, PROP_CONNECT_TIMEOUT, Connection.DISCONNECT_TIMEOUT));
|
||||
if (opts.containsKey(PROP_MAX_WINDOW_SIZE))
|
||||
setMaxWindowSize(getInt(opts, PROP_MAX_WINDOW_SIZE, Connection.MAX_WINDOW_SIZE));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -152,8 +158,8 @@ public class ConnectionOptions extends I2PSocketOptionsImpl {
|
||||
*/
|
||||
public int getWindowSize() { return _windowSize; }
|
||||
public void setWindowSize(int numMsgs) {
|
||||
if (numMsgs > Connection.MAX_WINDOW_SIZE)
|
||||
numMsgs = Connection.MAX_WINDOW_SIZE;
|
||||
if (numMsgs > _maxWindowSize)
|
||||
numMsgs = _maxWindowSize;
|
||||
_windowSize = numMsgs;
|
||||
}
|
||||
|
||||
@ -232,6 +238,16 @@ public class ConnectionOptions extends I2PSocketOptionsImpl {
|
||||
public int getInactivityAction() { return _inactivityAction; }
|
||||
public void setInactivityAction(int action) { _inactivityAction = action; }
|
||||
|
||||
public int getMaxWindowSize() { return _maxWindowSize; }
|
||||
public void setMaxWindowSize(int msgs) {
|
||||
if (msgs > Connection.MAX_WINDOW_SIZE)
|
||||
_maxWindowSize = Connection.MAX_WINDOW_SIZE;
|
||||
else if (msgs < 1)
|
||||
_maxWindowSize = 1;
|
||||
else
|
||||
_maxWindowSize = msgs;
|
||||
}
|
||||
|
||||
/**
|
||||
* how much data are we willing to accept in our buffer?
|
||||
*
|
||||
@ -252,6 +268,7 @@ public class ConnectionOptions extends I2PSocketOptionsImpl {
|
||||
buf.append(" writeTimeout=").append(getWriteTimeout());
|
||||
buf.append(" inactivityTimeout=").append(_inactivityTimeout);
|
||||
buf.append(" inboundBuffer=").append(_inboundBufferSize);
|
||||
buf.append(" maxWindowSize=").append(_maxWindowSize);
|
||||
return buf.toString();
|
||||
}
|
||||
|
||||
|
@ -87,8 +87,8 @@ public class PacketHandler {
|
||||
}
|
||||
|
||||
private void receivePacketDirect(Packet packet) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("packet received: " + packet);
|
||||
//if (_log.shouldLog(Log.DEBUG))
|
||||
// _log.debug("packet received: " + packet);
|
||||
|
||||
byte sendId[] = packet.getSendStreamId();
|
||||
if (!isNonZero(sendId))
|
||||
@ -118,8 +118,8 @@ public class PacketHandler {
|
||||
// the packet is pointed at a stream ID we're receiving on
|
||||
if (isValidMatch(con.getSendStreamId(), packet.getReceiveStreamId())) {
|
||||
// the packet's receive stream ID also matches what we expect
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("receive valid: " + packet);
|
||||
//if (_log.shouldLog(Log.DEBUG))
|
||||
// _log.debug("receive valid: " + packet);
|
||||
try {
|
||||
con.getPacketHandler().receivePacket(packet, con);
|
||||
} catch (I2PException ie) {
|
||||
|
@ -36,7 +36,7 @@ class SchedulerClosed extends SchedulerImpl {
|
||||
long timeSinceClose = _context.clock().now() - con.getCloseSentOn();
|
||||
boolean ok = (con.getCloseSentOn() > 0) &&
|
||||
(con.getCloseReceivedOn() > 0) &&
|
||||
(con.getUnackedPacketsReceived() <= 0) &&
|
||||
//(con.getUnackedPacketsReceived() <= 0) &&
|
||||
(con.getUnackedPacketsSent() <= 0) &&
|
||||
(!con.getResetReceived()) &&
|
||||
(timeSinceClose < Connection.DISCONNECT_TIMEOUT);
|
||||
|
@ -34,9 +34,12 @@ class SchedulerClosing extends SchedulerImpl {
|
||||
}
|
||||
|
||||
public boolean accept(Connection con) {
|
||||
long timeSinceClose = _context.clock().now() - con.getCloseSentOn();
|
||||
boolean ok = (con != null) &&
|
||||
(con.getCloseSentOn() > 0) &&
|
||||
(con.getCloseReceivedOn() > 0) &&
|
||||
(!con.getResetSent()) &&
|
||||
(!con.getResetReceived()) &&
|
||||
( (con.getCloseSentOn() > 0) || (con.getCloseReceivedOn() > 0) ) &&
|
||||
(timeSinceClose < Connection.DISCONNECT_TIMEOUT) &&
|
||||
( (con.getUnackedPacketsReceived() > 0) || (con.getUnackedPacketsSent() > 0) );
|
||||
return ok;
|
||||
}
|
||||
|
24
build.xml
24
build.xml
@ -34,13 +34,12 @@
|
||||
<copy file="apps/routerconsole/java/build/routerconsole.jar" todir="build/" />
|
||||
<copy file="apps/routerconsole/java/build/routerconsole.war" todir="build/" />
|
||||
<copy file="apps/jetty/jettylib/org.mortbay.jetty.jar" todir="build/" />
|
||||
<copy file="apps/jetty/jettylib/org.mortbay.jetty-jdk1.2.jar" todir="build/" />
|
||||
<copy file="apps/jetty/jettylib/ant.jar" todir="build/" />
|
||||
<copy file="apps/jetty/jettylib/jasper-compiler.jar" todir="build/" />
|
||||
<copy file="apps/jetty/jettylib/jasper-runtime.jar" todir="build/" />
|
||||
<copy file="apps/jetty/jettylib/jnet.jar" todir="build/" />
|
||||
<copy file="apps/jetty/jettylib/commons-logging.jar" todir="build/" />
|
||||
<copy file="apps/jetty/jettylib/commons-el.jar" todir="build/" />
|
||||
<copy file="apps/jetty/jettylib/xercesImpl.jar" todir="build/" />
|
||||
<copy file="apps/jetty/jettylib/xml-apis.jar" todir="build/" />
|
||||
<copy file="apps/jetty/jettylib/javax.servlet.jar" todir="build/" />
|
||||
</target>
|
||||
<target name="compile" />
|
||||
@ -161,13 +160,13 @@
|
||||
<copy file="build/i2ptunnel.jar" todir="pkg-temp/lib/" />
|
||||
<copy file="build/jasper-compiler.jar" todir="pkg-temp/lib/" />
|
||||
<copy file="build/jasper-runtime.jar" todir="pkg-temp/lib/" />
|
||||
<copy file="build/commons-logging.jar" todir="pkg-temp/lib/" />
|
||||
<copy file="build/commons-el.jar" todir="pkg-temp/lib/" />
|
||||
<copy file="build/javax.servlet.jar" todir="pkg-temp/lib/" />
|
||||
<copy file="build/jbigi.jar" todir="pkg-temp/lib" />
|
||||
<copy file="build/jnet.jar" todir="pkg-temp/lib/" />
|
||||
<copy file="build/mstreaming.jar" todir="pkg-temp/lib/" />
|
||||
<copy file="build/streaming.jar" todir="pkg-temp/lib/" />
|
||||
<copy file="build/netmonitor.jar" todir="pkg-temp/lib/" />
|
||||
<copy file="build/org.mortbay.jetty-jdk1.2.jar" todir="pkg-temp/lib/" />
|
||||
<copy file="build/org.mortbay.jetty.jar" todir="pkg-temp/lib/" />
|
||||
<copy file="build/router.jar" todir="pkg-temp/lib/" />
|
||||
<copy file="build/routerconsole.jar" todir="pkg-temp/lib/" />
|
||||
@ -178,7 +177,6 @@
|
||||
<copy file="apps/systray/java/resources/iggy.ico" todir="pkg-temp/icons" />
|
||||
<copy file="apps/systray/java/resources/iggy.xpm" todir="pkg-temp/icons" />
|
||||
<copy file="build/xercesImpl.jar" todir="pkg-temp/lib/" />
|
||||
<copy file="build/xml-apis.jar" todir="pkg-temp/lib/" />
|
||||
<copy file="build/i2ptunnel.war" todir="pkg-temp/webapps/" />
|
||||
<copy file="build/routerconsole.war" todir="pkg-temp/webapps/" />
|
||||
<copy file="build/addressbook.war" todir="pkg-temp/webapps/" />
|
||||
@ -227,6 +225,7 @@
|
||||
<mkdir dir="pkg-temp/eepsite/webapps" />
|
||||
<mkdir dir="pkg-temp/eepsite/logs" />
|
||||
<mkdir dir="pkg-temp/eepsite/docroot" />
|
||||
<mkdir dir="pkg-temp/eepsite/cgi-bin" />
|
||||
<copy file="installer/resources/eepsite_index.html" tofile="pkg-temp/eepsite/docroot/index.html" />
|
||||
<copy file="installer/resources/favicon.ico" tofile="pkg-temp/eepsite/docroot/favicon.ico" />
|
||||
<copy file="installer/resources/jetty.xml" tofile="pkg-temp/eepsite/jetty.xml" />
|
||||
@ -245,6 +244,18 @@
|
||||
<copy file="build/sam.jar" todir="pkg-temp/lib/" />
|
||||
<copy file="build/router.jar" todir="pkg-temp/lib/" />
|
||||
<copy file="build/routerconsole.jar" todir="pkg-temp/lib/" />
|
||||
|
||||
<!-- for the i2p 0.5 release, push jetty 5.2.1 -->
|
||||
<copy file="build/jasper-compiler.jar" todir="pkg-temp/lib/" />
|
||||
<copy file="build/jasper-runtime.jar" todir="pkg-temp/lib/" />
|
||||
<copy file="build/commons-logging.jar" todir="pkg-temp/lib/" />
|
||||
<copy file="build/commons-el.jar" todir="pkg-temp/lib/" />
|
||||
<copy file="build/org.mortbay.jetty.jar" todir="pkg-temp/lib/" />
|
||||
<copy file="build/javax.servlet.jar" todir="pkg-temp/lib/" />
|
||||
<!-- requires commons-* to be added to the classpath (boo, hiss) -->
|
||||
<copy file="installer/resources/wrapper.config" todir="pkg-temp/" />
|
||||
<touch file="pkg-temp/wrapper.config.updated" />
|
||||
|
||||
<copy file="build/i2ptunnel.war" todir="pkg-temp/webapps/" />
|
||||
<copy file="build/routerconsole.war" todir="pkg-temp/webapps/" />
|
||||
<copy file="build/addressbook.war" todir="pkg-temp/webapps/" />
|
||||
@ -252,6 +263,7 @@
|
||||
<copy file="hosts.txt" todir="pkg-temp/" />
|
||||
<mkdir dir="pkg-temp/eepsite" />
|
||||
<mkdir dir="pkg-temp/eepsite/webapps" />
|
||||
<mkdir dir="pkg-temp/eepsite/cgi-bin" />
|
||||
<zip destfile="i2pupdate.zip" basedir="pkg-temp" />
|
||||
</target>
|
||||
<taskdef name="izpack" classpath="${basedir}/installer/lib/izpack/standalone-compiler.jar" classname="com.izforge.izpack.ant.IzPackTask" />
|
||||
|
@ -143,7 +143,9 @@ void convert_j2mp(JNIEnv* env, jbyteArray jvalue, mpz_t* mvalue)
|
||||
|
||||
void convert_mp2j(JNIEnv* env, mpz_t mvalue, jbyteArray* jvalue)
|
||||
{
|
||||
jsize size;
|
||||
// size_t not jsize to work with 64bit CPUs (do we need to update this
|
||||
// elsewhere, and/or adjust memory alloc sizes?)
|
||||
size_t size;
|
||||
jbyte* buffer;
|
||||
jboolean copy;
|
||||
//int i;
|
||||
|
@ -252,6 +252,10 @@ public class CPUID {
|
||||
return "Athlon 64";
|
||||
case 5:
|
||||
return "Athlon 64 FX Opteron";
|
||||
case 12:
|
||||
return "Athlon 64";
|
||||
default: // is this safe?
|
||||
return "Athlon 64 (unknown)";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -14,8 +14,8 @@ package net.i2p;
|
||||
*
|
||||
*/
|
||||
public class CoreVersion {
|
||||
public final static String ID = "$Revision: 1.25 $ $Date: 2004/11/06 22:00:57 $";
|
||||
public final static String VERSION = "0.4.2";
|
||||
public final static String ID = "$Revision: 1.26.2.1 $ $Date: 2005/02/09 13:46:58 $";
|
||||
public final static String VERSION = "0.5-pre";
|
||||
|
||||
public static void main(String args[]) {
|
||||
System.out.println("I2P Core version: " + VERSION);
|
||||
|
@ -103,8 +103,9 @@ class I2CPMessageProducer {
|
||||
if (payload == null) throw new I2PSessionException("No payload specified");
|
||||
|
||||
Payload data = new Payload();
|
||||
// randomize padding
|
||||
int size = payload.length + RandomSource.getInstance().nextInt(1024);
|
||||
// no padding at this level
|
||||
// the garlic may pad, and the tunnels may pad, and the transports may pad
|
||||
int size = payload.length;
|
||||
byte encr[] = _context.elGamalAESEngine().encrypt(payload, dest.getPublicKey(), key, tags, tag, newKey, size);
|
||||
// yes, in an intelligent component, newTags would be queued for confirmation along with key, and
|
||||
// generateNewTags would only generate tags if necessary
|
||||
|
@ -111,6 +111,8 @@ abstract class I2PSessionImpl implements I2PSession, I2CPMessageReader.I2CPMessa
|
||||
}
|
||||
}
|
||||
|
||||
public static final int LISTEN_PORT = 7654;
|
||||
|
||||
/**
|
||||
* Create a new session, reading the Destination, PrivateKey, and SigningPrivateKey
|
||||
* from the destKeyStream, and using the specified options to connect to the router
|
||||
@ -145,14 +147,14 @@ abstract class I2PSessionImpl implements I2PSession, I2CPMessageReader.I2CPMessa
|
||||
_options = new Properties();
|
||||
_options.putAll(filter(options));
|
||||
_hostname = _options.getProperty(I2PClient.PROP_TCP_HOST, "localhost");
|
||||
String portNum = _options.getProperty(I2PClient.PROP_TCP_PORT, TestServer.LISTEN_PORT + "");
|
||||
String portNum = _options.getProperty(I2PClient.PROP_TCP_PORT, LISTEN_PORT + "");
|
||||
try {
|
||||
_portNum = Integer.parseInt(portNum);
|
||||
} catch (NumberFormatException nfe) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn(getPrefix() + "Invalid port number specified, defaulting to "
|
||||
+ TestServer.LISTEN_PORT, nfe);
|
||||
_portNum = TestServer.LISTEN_PORT;
|
||||
+ LISTEN_PORT, nfe);
|
||||
_portNum = LISTEN_PORT;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -46,7 +46,7 @@ class RequestLeaseSetMessageHandler extends HandlerImpl {
|
||||
LeaseSet leaseSet = new LeaseSet();
|
||||
for (int i = 0; i < msg.getEndpoints(); i++) {
|
||||
Lease lease = new Lease();
|
||||
lease.setRouterIdentity(msg.getRouter(i));
|
||||
lease.setGateway(msg.getRouter(i));
|
||||
lease.setTunnelId(msg.getTunnelId(i));
|
||||
lease.setEndDate(msg.getEndDate());
|
||||
//lease.setStartDate(msg.getStartDate());
|
||||
|
@ -602,14 +602,15 @@ class TransientSessionKeyManager extends SessionKeyManager {
|
||||
long rv = 0;
|
||||
if (_key != null) rv = rv * 7 + _key.hashCode();
|
||||
rv = rv * 7 + _date;
|
||||
if (_sessionTags != null) rv = rv * 7 + DataHelper.hashCode(_sessionTags);
|
||||
// no need to hashCode the tags, key + date should be enough
|
||||
return (int) rv;
|
||||
}
|
||||
|
||||
public boolean equals(Object o) {
|
||||
if ((o == null) || !(o instanceof TagSet)) return false;
|
||||
TagSet ts = (TagSet) o;
|
||||
return DataHelper.eq(ts.getAssociatedKey(), getAssociatedKey()) && DataHelper.eq(ts.getTags(), getTags())
|
||||
return DataHelper.eq(ts.getAssociatedKey(), getAssociatedKey())
|
||||
//&& DataHelper.eq(ts.getTags(), getTags())
|
||||
&& ts.getDate() == getDate();
|
||||
}
|
||||
}
|
||||
|
@ -19,6 +19,7 @@ import java.io.Serializable;
|
||||
public class ByteArray implements Serializable, Comparable {
|
||||
private byte[] _data;
|
||||
private int _valid;
|
||||
private int _offset;
|
||||
|
||||
public ByteArray() {
|
||||
this(null);
|
||||
@ -28,6 +29,11 @@ public class ByteArray implements Serializable, Comparable {
|
||||
_data = data;
|
||||
_valid = 0;
|
||||
}
|
||||
public ByteArray(byte[] data, int offset, int length) {
|
||||
_data = data;
|
||||
_offset = offset;
|
||||
_valid = length;
|
||||
}
|
||||
|
||||
public final byte[] getData() {
|
||||
return _data;
|
||||
@ -44,6 +50,8 @@ public class ByteArray implements Serializable, Comparable {
|
||||
*/
|
||||
public final int getValid() { return _valid; }
|
||||
public final void setValid(int valid) { _valid = valid; }
|
||||
public final int getOffset() { return _offset; }
|
||||
public final void setOffset(int offset) { _offset = offset; }
|
||||
|
||||
public final boolean equals(Object o) {
|
||||
if (o == null) return false;
|
||||
|
@ -35,6 +35,7 @@ import java.util.zip.GZIPInputStream;
|
||||
import java.util.zip.GZIPOutputStream;
|
||||
|
||||
import net.i2p.util.ByteCache;
|
||||
import net.i2p.util.CachingByteArrayOutputStream;
|
||||
import net.i2p.util.OrderedProperties;
|
||||
|
||||
/**
|
||||
@ -123,7 +124,70 @@ public class DataHelper {
|
||||
writeLong(rawStream, 2, 0);
|
||||
}
|
||||
}
|
||||
|
||||
public static int toProperties(byte target[], int offset, Properties props) throws DataFormatException, IOException {
|
||||
if (props != null) {
|
||||
OrderedProperties p = new OrderedProperties();
|
||||
p.putAll(props);
|
||||
ByteArrayOutputStream baos = new ByteArrayOutputStream(32);
|
||||
for (Iterator iter = p.keySet().iterator(); iter.hasNext();) {
|
||||
String key = (String) iter.next();
|
||||
String val = p.getProperty(key);
|
||||
// now make sure they're in UTF-8
|
||||
//key = new String(key.getBytes(), "UTF-8");
|
||||
//val = new String(val.getBytes(), "UTF-8");
|
||||
writeString(baos, key);
|
||||
baos.write(_equalBytes);
|
||||
writeString(baos, val);
|
||||
baos.write(_semicolonBytes);
|
||||
}
|
||||
baos.close();
|
||||
byte propBytes[] = baos.toByteArray();
|
||||
toLong(target, offset, 2, propBytes.length);
|
||||
offset += 2;
|
||||
System.arraycopy(propBytes, 0, target, offset, propBytes.length);
|
||||
offset += propBytes.length;
|
||||
return offset;
|
||||
} else {
|
||||
toLong(target, offset, 2, 0);
|
||||
return offset + 2;
|
||||
}
|
||||
}
|
||||
|
||||
public static int fromProperties(byte source[], int offset, Properties target) throws DataFormatException, IOException {
|
||||
int size = (int)fromLong(source, offset, 2);
|
||||
offset += 2;
|
||||
ByteArrayInputStream in = new ByteArrayInputStream(source, offset, size);
|
||||
byte eqBuf[] = new byte[_equalBytes.length];
|
||||
byte semiBuf[] = new byte[_semicolonBytes.length];
|
||||
while (in.available() > 0) {
|
||||
String key = readString(in);
|
||||
int read = read(in, eqBuf);
|
||||
if ((read != eqBuf.length) || (!eq(eqBuf, _equalBytes))) {
|
||||
break;
|
||||
}
|
||||
String val = readString(in);
|
||||
read = read(in, semiBuf);
|
||||
if ((read != semiBuf.length) || (!eq(semiBuf, _semicolonBytes))) {
|
||||
break;
|
||||
}
|
||||
target.put(key, val);
|
||||
}
|
||||
return offset + size;
|
||||
}
|
||||
|
||||
public static byte[] toProperties(Properties opts) {
|
||||
try {
|
||||
ByteArrayOutputStream baos = new ByteArrayOutputStream(2);
|
||||
writeProperties(baos, opts);
|
||||
return baos.toByteArray();
|
||||
} catch (DataFormatException dfe) {
|
||||
throw new RuntimeException("Format error writing to memory?! " + dfe.getMessage());
|
||||
} catch (IOException ioe) {
|
||||
throw new RuntimeException("IO error writing to memory?! " + ioe.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Pretty print the mapping
|
||||
*
|
||||
@ -147,9 +211,12 @@ public class DataHelper {
|
||||
*
|
||||
*/
|
||||
public static void loadProps(Properties props, File file) throws IOException {
|
||||
loadProps(props, new FileInputStream(file));
|
||||
}
|
||||
public static void loadProps(Properties props, InputStream inStr) throws IOException {
|
||||
BufferedReader in = null;
|
||||
try {
|
||||
in = new BufferedReader(new InputStreamReader(new FileInputStream(file)), 16*1024);
|
||||
in = new BufferedReader(new InputStreamReader(inStr), 16*1024);
|
||||
String line = null;
|
||||
while ( (line = in.readLine()) != null) {
|
||||
if (line.trim().length() <= 0) continue;
|
||||
@ -258,15 +325,32 @@ public class DataHelper {
|
||||
throws DataFormatException, IOException {
|
||||
if (numBytes > 8)
|
||||
throw new DataFormatException("readLong doesn't currently support reading numbers > 8 bytes [as thats bigger than java's long]");
|
||||
byte data[] = new byte[numBytes];
|
||||
int num = read(rawStream, data);
|
||||
if (num != numBytes)
|
||||
throw new DataFormatException("Not enough bytes [" + num + "] as required for the field [" + numBytes + "]");
|
||||
|
||||
UnsignedInteger val = new UnsignedInteger(data);
|
||||
return val.getLong();
|
||||
long rv = 0;
|
||||
for (int i = 0; i < numBytes; i++) {
|
||||
long cur = rawStream.read() & 0xFF;
|
||||
if (cur == -1) throw new DataFormatException("Not enough bytes for the field");
|
||||
// we loop until we find a nonzero byte (or we reach the end)
|
||||
if (cur != 0) {
|
||||
// ok, data found, now iterate through it to fill the rv
|
||||
long remaining = numBytes - i;
|
||||
for (int j = 0; j < remaining; j++) {
|
||||
long shiftAmount = 8 * (remaining-j-1);
|
||||
cur = cur << shiftAmount;
|
||||
rv += cur;
|
||||
if (j + 1 < remaining) {
|
||||
cur = rawStream.read() & 0xFF;
|
||||
if (cur == -1)
|
||||
throw new DataFormatException("Not enough bytes for the field");
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return rv;
|
||||
}
|
||||
|
||||
|
||||
/** Write an integer as defined by the I2P data structure specification to the stream.
|
||||
* Integers are a fixed number of bytes (numBytes), stored as unsigned integers in network byte order.
|
||||
* @param value value to write out
|
||||
@ -277,12 +361,10 @@ public class DataHelper {
|
||||
*/
|
||||
public static void writeLong(OutputStream rawStream, int numBytes, long value)
|
||||
throws DataFormatException, IOException {
|
||||
try {
|
||||
UnsignedInteger.writeBytes(rawStream, numBytes, value);
|
||||
//UnsignedInteger i = new UnsignedInteger(value);
|
||||
//rawStream.write(i.getBytes(numBytes));
|
||||
} catch (IllegalArgumentException iae) {
|
||||
throw new DataFormatException("Invalid value (must be positive)", iae);
|
||||
|
||||
for (int i = numBytes - 1; i >= 0; i--) {
|
||||
byte cur = (byte)( (value >>> (i*8) ) & 0xFF);
|
||||
rawStream.write(cur);
|
||||
}
|
||||
}
|
||||
|
||||
@ -322,7 +404,7 @@ public class DataHelper {
|
||||
for (long i = 0; i <= 0xFFFF; i++)
|
||||
testLong(2, i);
|
||||
System.out.println("Test 2byte passed");
|
||||
for (long i = 0; i <= 0xFFFFFF; i++)
|
||||
for (long i = 0; i <= 0xFFFFFF; i ++)
|
||||
testLong(3, i);
|
||||
System.out.println("Test 3byte passed");
|
||||
for (long i = 0; i <= 0xFFFFFFFF; i++)
|
||||
@ -344,6 +426,9 @@ public class DataHelper {
|
||||
long read = fromLong(extract, 0, extract.length);
|
||||
if (read != value)
|
||||
throw new RuntimeException("testLong("+numBytes+","+value+") FAILED on read (" + read + ")");
|
||||
read = readLong(new ByteArrayInputStream(written), numBytes);
|
||||
if (read != value)
|
||||
throw new RuntimeException("testLong("+numBytes+","+value+") FAILED on readLong (" + read + ")");
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e.getMessage());
|
||||
}
|
||||
@ -384,6 +469,9 @@ public class DataHelper {
|
||||
else
|
||||
return toLong(DATE_LENGTH, date.getTime());
|
||||
}
|
||||
public static void toDate(byte target[], int offset, long when) throws IllegalArgumentException {
|
||||
toLong(target, offset, DATE_LENGTH, when);
|
||||
}
|
||||
public static Date fromDate(byte src[], int offset) throws DataFormatException {
|
||||
if ( (src == null) || (offset + DATE_LENGTH > src.length) )
|
||||
throw new DataFormatException("Not enough data to read a date");
|
||||
@ -479,9 +567,29 @@ public class DataHelper {
|
||||
writeLong(out, 1, BOOLEAN_FALSE);
|
||||
}
|
||||
|
||||
public static Boolean fromBoolean(byte data[], int offset) {
|
||||
if (data[offset] == BOOLEAN_TRUE)
|
||||
return Boolean.TRUE;
|
||||
else if (data[offset] == BOOLEAN_FALSE)
|
||||
return Boolean.FALSE;
|
||||
else
|
||||
return null;
|
||||
}
|
||||
|
||||
public static void toBoolean(byte data[], int offset, boolean value) {
|
||||
data[offset] = (value ? BOOLEAN_TRUE : BOOLEAN_FALSE);
|
||||
}
|
||||
public static void toBoolean(byte data[], int offset, Boolean value) {
|
||||
if (value == null)
|
||||
data[offset] = BOOLEAN_UNKNOWN;
|
||||
else
|
||||
data[offset] = (value.booleanValue() ? BOOLEAN_TRUE : BOOLEAN_FALSE);
|
||||
}
|
||||
|
||||
public static final byte BOOLEAN_TRUE = 0x1;
|
||||
public static final byte BOOLEAN_FALSE = 0x0;
|
||||
public static final byte BOOLEAN_UNKNOWN = 0x2;
|
||||
public static final int BOOLEAN_LENGTH = 1;
|
||||
|
||||
//
|
||||
// The following comparator helpers make it simpler to write consistently comparing
|
||||
@ -762,12 +870,13 @@ public class DataHelper {
|
||||
public static byte[] compress(byte orig[], int offset, int size) {
|
||||
if ((orig == null) || (orig.length <= 0)) return orig;
|
||||
try {
|
||||
ByteArrayOutputStream baos = new ByteArrayOutputStream(size);
|
||||
CachingByteArrayOutputStream baos = new CachingByteArrayOutputStream(16, 40*1024);
|
||||
GZIPOutputStream out = new GZIPOutputStream(baos, size);
|
||||
out.write(orig, offset, size);
|
||||
out.finish();
|
||||
out.flush();
|
||||
byte rv[] = baos.toByteArray();
|
||||
baos.releaseBuffer();
|
||||
//if (_log.shouldLog(Log.DEBUG))
|
||||
// _log.debug("Compression of " + orig.length + " into " + rv.length + " (or " + 100.0d
|
||||
// * (((double) orig.length) / ((double) rv.length)) + "% savings)");
|
||||
@ -785,7 +894,7 @@ public class DataHelper {
|
||||
public static byte[] decompress(byte orig[], int offset, int length) throws IOException {
|
||||
if ((orig == null) || (orig.length <= 0)) return orig;
|
||||
GZIPInputStream in = new GZIPInputStream(new ByteArrayInputStream(orig, offset, length), length);
|
||||
ByteArrayOutputStream baos = new ByteArrayOutputStream(length * 2);
|
||||
CachingByteArrayOutputStream baos = new CachingByteArrayOutputStream(16, 40*1024);
|
||||
ByteCache cache = ByteCache.getInstance(10, 4*1024);
|
||||
ByteArray ba = cache.acquire();
|
||||
byte buf[] = ba.getData(); // new byte[4 * 1024];
|
||||
@ -796,6 +905,7 @@ public class DataHelper {
|
||||
}
|
||||
byte rv[] = baos.toByteArray();
|
||||
cache.release(ba);
|
||||
baos.releaseBuffer();
|
||||
//if (_log.shouldLog(Log.DEBUG))
|
||||
// _log.debug("Decompression of " + orig.length + " into " + rv.length + " (or " + 100.0d
|
||||
// * (((double) rv.length) / ((double) orig.length)) + "% savings)");
|
||||
|
@ -25,14 +25,14 @@ import net.i2p.util.Log;
|
||||
*/
|
||||
public class Lease extends DataStructureImpl {
|
||||
private final static Log _log = new Log(Lease.class);
|
||||
private RouterIdentity _routerIdentity;
|
||||
private Hash _gateway;
|
||||
private TunnelId _tunnelId;
|
||||
private Date _end;
|
||||
private int _numSuccess;
|
||||
private int _numFailure;
|
||||
|
||||
public Lease() {
|
||||
setRouterIdentity(null);
|
||||
setGateway(null);
|
||||
setTunnelId(null);
|
||||
setEndDate(null);
|
||||
setNumSuccess(0);
|
||||
@ -42,15 +42,15 @@ public class Lease extends DataStructureImpl {
|
||||
/** Retrieve the router at which the destination can be contacted
|
||||
* @return identity of the router acting as a gateway
|
||||
*/
|
||||
public RouterIdentity getRouterIdentity() {
|
||||
return _routerIdentity;
|
||||
public Hash getGateway() {
|
||||
return _gateway;
|
||||
}
|
||||
|
||||
/** Configure the router at which the destination can be contacted
|
||||
* @param ident router acting as the gateway
|
||||
*/
|
||||
public void setRouterIdentity(RouterIdentity ident) {
|
||||
_routerIdentity = ident;
|
||||
public void setGateway(Hash ident) {
|
||||
_gateway = ident;
|
||||
}
|
||||
|
||||
/** Tunnel on the gateway to communicate with
|
||||
@ -113,18 +113,18 @@ public class Lease extends DataStructureImpl {
|
||||
}
|
||||
|
||||
public void readBytes(InputStream in) throws DataFormatException, IOException {
|
||||
_routerIdentity = new RouterIdentity();
|
||||
_routerIdentity.readBytes(in);
|
||||
_gateway = new Hash();
|
||||
_gateway.readBytes(in);
|
||||
_tunnelId = new TunnelId();
|
||||
_tunnelId.readBytes(in);
|
||||
_end = DataHelper.readDate(in);
|
||||
}
|
||||
|
||||
public void writeBytes(OutputStream out) throws DataFormatException, IOException {
|
||||
if ((_routerIdentity == null) || (_tunnelId == null))
|
||||
if ((_gateway == null) || (_tunnelId == null))
|
||||
throw new DataFormatException("Not enough data to write out a Lease");
|
||||
|
||||
_routerIdentity.writeBytes(out);
|
||||
_gateway.writeBytes(out);
|
||||
_tunnelId.writeBytes(out);
|
||||
DataHelper.writeDate(out, _end);
|
||||
}
|
||||
@ -133,12 +133,13 @@ public class Lease extends DataStructureImpl {
|
||||
if ((object == null) || !(object instanceof Lease)) return false;
|
||||
Lease lse = (Lease) object;
|
||||
return DataHelper.eq(getEndDate(), lse.getEndDate())
|
||||
&& DataHelper.eq(getRouterIdentity(), lse.getRouterIdentity());
|
||||
&& DataHelper.eq(getTunnelId(), lse.getTunnelId())
|
||||
&& DataHelper.eq(getGateway(), lse.getGateway());
|
||||
|
||||
}
|
||||
|
||||
public int hashCode() {
|
||||
return DataHelper.hashCode(getEndDate()) + DataHelper.hashCode(getRouterIdentity())
|
||||
return DataHelper.hashCode(getEndDate()) + DataHelper.hashCode(getGateway())
|
||||
+ DataHelper.hashCode(getTunnelId());
|
||||
}
|
||||
|
||||
@ -146,7 +147,7 @@ public class Lease extends DataStructureImpl {
|
||||
StringBuffer buf = new StringBuffer(128);
|
||||
buf.append("[Lease: ");
|
||||
buf.append("\n\tEnd Date: ").append(getEndDate());
|
||||
buf.append("\n\tRouter Identity: ").append(getRouterIdentity());
|
||||
buf.append("\n\tGateway: ").append(getGateway());
|
||||
buf.append("\n\tTunnelId: ").append(getTunnelId());
|
||||
buf.append("]");
|
||||
return buf.toString();
|
||||
|
@ -74,12 +74,18 @@ public class LeaseSet extends DataStructureImpl {
|
||||
}
|
||||
|
||||
public void addLease(Lease lease) {
|
||||
if (lease == null) throw new IllegalArgumentException("erm, null lease");
|
||||
if (lease.getGateway() == null) throw new IllegalArgumentException("erm, lease has no gateway");
|
||||
if (lease.getTunnelId() == null) throw new IllegalArgumentException("erm, lease has no tunnel");
|
||||
_leases.add(lease);
|
||||
}
|
||||
|
||||
public void removeLease(Lease lease) {
|
||||
_leases.remove(lease);
|
||||
}
|
||||
public void removeLease(int index) {
|
||||
_leases.remove(index);
|
||||
}
|
||||
|
||||
public int getLeaseCount() {
|
||||
return _leases.size();
|
||||
@ -208,16 +214,19 @@ public class LeaseSet extends DataStructureImpl {
|
||||
for (int i = 0; i < cnt; i++) {
|
||||
Lease l = getLease(i);
|
||||
if (l.getEndDate().getTime() > insane) {
|
||||
_log.warn("LeaseSet" + calculateHash() + " expires an insane amount in the future - skip it: " + l);
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("LeaseSet" + calculateHash() + " expires an insane amount in the future - skip it: " + l);
|
||||
return false;
|
||||
}
|
||||
// if it hasn't finished, we're current
|
||||
if (l.getEndDate().getTime() > now) {
|
||||
_log.debug("LeaseSet " + calculateHash() + " isn't exired: " + l);
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("LeaseSet " + calculateHash() + " isn't exired: " + l);
|
||||
return true;
|
||||
} else if (l.getEndDate().getTime() > now - fudge) {
|
||||
_log.debug("LeaseSet " + calculateHash()
|
||||
+ " isn't quite expired, but its within the fudge factor so we'll let it slide: " + l);
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("LeaseSet " + calculateHash()
|
||||
+ " isn't quite expired, but its within the fudge factor so we'll let it slide: " + l);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -225,7 +234,14 @@ public class LeaseSet extends DataStructureImpl {
|
||||
}
|
||||
|
||||
private byte[] getBytes() {
|
||||
ByteArrayOutputStream out = new ByteArrayOutputStream();
|
||||
int len = PublicKey.KEYSIZE_BYTES // dest
|
||||
+ SigningPublicKey.KEYSIZE_BYTES // dest
|
||||
+ 4 // cert
|
||||
+ PublicKey.KEYSIZE_BYTES // encryptionKey
|
||||
+ SigningPublicKey.KEYSIZE_BYTES // signingKey
|
||||
+ 1
|
||||
+ _leases.size() * 44; // leases
|
||||
ByteArrayOutputStream out = new ByteArrayOutputStream(len);
|
||||
try {
|
||||
if ((_destination == null) || (_encryptionKey == null) || (_signingKey == null) || (_leases == null))
|
||||
return null;
|
||||
@ -244,7 +260,8 @@ public class LeaseSet extends DataStructureImpl {
|
||||
} catch (DataFormatException dfe) {
|
||||
return null;
|
||||
}
|
||||
return out.toByteArray();
|
||||
byte rv[] = out.toByteArray();
|
||||
return rv;
|
||||
}
|
||||
|
||||
public void readBytes(InputStream in) throws DataFormatException, IOException {
|
||||
|
@ -49,6 +49,8 @@ public class RouterInfo extends DataStructureImpl {
|
||||
private volatile int _hashCode;
|
||||
private volatile boolean _hashCodeInitialized;
|
||||
|
||||
public static final String PROP_NETWORK_ID = "netId";
|
||||
|
||||
public RouterInfo() {
|
||||
setIdentity(null);
|
||||
setPublished(0);
|
||||
@ -243,7 +245,7 @@ public class RouterInfo extends DataStructureImpl {
|
||||
if (_options == null) throw new DataFormatException("Router options isn't set? wtf!");
|
||||
|
||||
long before = Clock.getInstance().now();
|
||||
ByteArrayOutputStream out = new ByteArrayOutputStream();
|
||||
ByteArrayOutputStream out = new ByteArrayOutputStream(6*1024);
|
||||
try {
|
||||
_identity.writeBytes(out);
|
||||
DataHelper.writeDate(out, new Date(_published));
|
||||
@ -279,6 +281,24 @@ public class RouterInfo extends DataStructureImpl {
|
||||
return _isValid;
|
||||
}
|
||||
|
||||
/**
|
||||
* which network is this routerInfo a part of. configured through the property
|
||||
* PROP_NETWORK_ID
|
||||
*/
|
||||
public int getNetworkId() {
|
||||
if (_options == null) return -1;
|
||||
String id = null;
|
||||
synchronized (_options) {
|
||||
id = _options.getProperty(PROP_NETWORK_ID);
|
||||
}
|
||||
if (id != null) {
|
||||
try {
|
||||
return Integer.parseInt(id);
|
||||
} catch (NumberFormatException nfe) {}
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the routing key for the structure using the current modifier in the RoutingKeyGenerator.
|
||||
* This only calculates a new one when necessary though (if the generator's key modifier changes)
|
||||
@ -422,19 +442,17 @@ public class RouterInfo extends DataStructureImpl {
|
||||
public boolean equals(Object object) {
|
||||
if ((object == null) || !(object instanceof RouterInfo)) return false;
|
||||
RouterInfo info = (RouterInfo) object;
|
||||
return DataHelper.eq(_addresses, info.getAddresses())
|
||||
&& DataHelper.eq(_identity, info.getIdentity())
|
||||
&& DataHelper.eq(_options, info.getOptions())
|
||||
&& DataHelper.eq(_peers, info.getPeers())
|
||||
return DataHelper.eq(_identity, info.getIdentity())
|
||||
&& DataHelper.eq(_signature, info.getSignature())
|
||||
&& DataHelper.eq(getPublished(), info.getPublished());
|
||||
&& DataHelper.eq(getPublished(), info.getPublished())
|
||||
&& DataHelper.eq(_addresses, info.getAddresses())
|
||||
&& DataHelper.eq(_options, info.getOptions())
|
||||
&& DataHelper.eq(_peers, info.getPeers());
|
||||
}
|
||||
|
||||
public int hashCode() {
|
||||
if (!_hashCodeInitialized) {
|
||||
_hashCode = DataHelper.hashCode(_addresses) + DataHelper.hashCode(_identity)
|
||||
+ DataHelper.hashCode(_options) + DataHelper.hashCode(_peers)
|
||||
+ DataHelper.hashCode(_signature) + (int) getPublished();
|
||||
_hashCode = DataHelper.hashCode(_identity) + (int) getPublished();
|
||||
_hashCodeInitialized = true;
|
||||
}
|
||||
return _hashCode;
|
||||
|
@ -34,6 +34,8 @@ public class TunnelId extends DataStructureImpl {
|
||||
public final static int TYPE_OUTBOUND = 2;
|
||||
public final static int TYPE_PARTICIPANT = 3;
|
||||
|
||||
public static final TunnelId INVALID = new TunnelId(0, true);
|
||||
|
||||
public TunnelId() {
|
||||
_tunnelId = -1;
|
||||
_type = TYPE_UNSPECIFIED;
|
||||
@ -48,6 +50,9 @@ public class TunnelId extends DataStructureImpl {
|
||||
_tunnelId = id;
|
||||
_type = type;
|
||||
}
|
||||
private TunnelId(long id, boolean forceInvalid) {
|
||||
_tunnelId = id;
|
||||
}
|
||||
|
||||
public long getTunnelId() { return _tunnelId; }
|
||||
public void setTunnelId(long id) {
|
||||
@ -87,7 +92,5 @@ public class TunnelId extends DataStructureImpl {
|
||||
return (int)getTunnelId();
|
||||
}
|
||||
|
||||
public String toString() {
|
||||
return "[TunnelID: " + getTunnelId() + "]";
|
||||
}
|
||||
public String toString() { return String.valueOf(getTunnelId()); }
|
||||
}
|
||||
|
@ -18,7 +18,7 @@ import java.util.List;
|
||||
|
||||
import net.i2p.data.DataFormatException;
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.data.RouterIdentity;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.TunnelId;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
@ -53,7 +53,7 @@ public class RequestLeaseSetMessage extends I2CPMessageImpl {
|
||||
return _endpoints.size();
|
||||
}
|
||||
|
||||
public RouterIdentity getRouter(int endpoint) {
|
||||
public Hash getRouter(int endpoint) {
|
||||
if ((endpoint < 0) || (_endpoints.size() < endpoint)) return null;
|
||||
return ((TunnelEndpoint) _endpoints.get(endpoint)).getRouter();
|
||||
}
|
||||
@ -67,7 +67,9 @@ public class RequestLeaseSetMessage extends I2CPMessageImpl {
|
||||
if ((endpoint >= 0) && (endpoint < _endpoints.size())) _endpoints.remove(endpoint);
|
||||
}
|
||||
|
||||
public void addEndpoint(RouterIdentity router, TunnelId tunnel) {
|
||||
public void addEndpoint(Hash router, TunnelId tunnel) {
|
||||
if (router == null) throw new IllegalArgumentException("Null router (tunnel=" + tunnel +")");
|
||||
if (tunnel == null) throw new IllegalArgumentException("Null tunnel (router=" + router +")");
|
||||
_endpoints.add(new TunnelEndpoint(router, tunnel));
|
||||
}
|
||||
|
||||
@ -86,7 +88,7 @@ public class RequestLeaseSetMessage extends I2CPMessageImpl {
|
||||
int numTunnels = (int) DataHelper.readLong(in, 1);
|
||||
_endpoints.clear();
|
||||
for (int i = 0; i < numTunnels; i++) {
|
||||
RouterIdentity router = new RouterIdentity();
|
||||
Hash router = new Hash();
|
||||
router.readBytes(in);
|
||||
TunnelId tunnel = new TunnelId();
|
||||
tunnel.readBytes(in);
|
||||
@ -106,7 +108,7 @@ public class RequestLeaseSetMessage extends I2CPMessageImpl {
|
||||
_sessionId.writeBytes(os);
|
||||
DataHelper.writeLong(os, 1, _endpoints.size());
|
||||
for (int i = 0; i < _endpoints.size(); i++) {
|
||||
RouterIdentity router = getRouter(i);
|
||||
Hash router = getRouter(i);
|
||||
router.writeBytes(os);
|
||||
TunnelId tunnel = getTunnelId(i);
|
||||
tunnel.writeBytes(os);
|
||||
@ -151,7 +153,7 @@ public class RequestLeaseSetMessage extends I2CPMessageImpl {
|
||||
}
|
||||
|
||||
private class TunnelEndpoint {
|
||||
private RouterIdentity _router;
|
||||
private Hash _router;
|
||||
private TunnelId _tunnelId;
|
||||
|
||||
public TunnelEndpoint() {
|
||||
@ -159,16 +161,16 @@ public class RequestLeaseSetMessage extends I2CPMessageImpl {
|
||||
_tunnelId = null;
|
||||
}
|
||||
|
||||
public TunnelEndpoint(RouterIdentity router, TunnelId id) {
|
||||
public TunnelEndpoint(Hash router, TunnelId id) {
|
||||
_router = router;
|
||||
_tunnelId = id;
|
||||
}
|
||||
|
||||
public RouterIdentity getRouter() {
|
||||
public Hash getRouter() {
|
||||
return _router;
|
||||
}
|
||||
|
||||
public void setRouter(RouterIdentity router) {
|
||||
public void setRouter(Hash router) {
|
||||
_router = router;
|
||||
}
|
||||
|
||||
|
@ -33,6 +33,8 @@ import java.io.InterruptedIOException;
|
||||
import java.net.DatagramPacket;
|
||||
import java.net.DatagramSocket;
|
||||
import java.net.InetAddress;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
|
||||
|
||||
/**
|
||||
@ -62,8 +64,12 @@ public class NtpClient {
|
||||
public static long currentTime(String serverNames[]) {
|
||||
if (serverNames == null)
|
||||
throw new IllegalArgumentException("No NTP servers specified");
|
||||
for (int i = 0; i < serverNames.length; i++) {
|
||||
long now = currentTime(serverNames[i]);
|
||||
ArrayList names = new ArrayList(serverNames.length);
|
||||
for (int i = 0; i < serverNames.length; i++)
|
||||
names.add(serverNames[i]);
|
||||
Collections.shuffle(names);
|
||||
for (int i = 0; i < names.size(); i++) {
|
||||
long now = currentTime((String)names.get(i));
|
||||
if (now > 0)
|
||||
return now;
|
||||
}
|
||||
@ -112,8 +118,9 @@ public class NtpClient {
|
||||
(msg.transmitTimestamp - destinationTimestamp)) / 2;
|
||||
socket.close();
|
||||
|
||||
//System.out.println("host: " + serverName + " rtt: " + roundTripDelay + " offset: " + localClockOffset + " seconds");
|
||||
return (long)(System.currentTimeMillis() + localClockOffset*1000);
|
||||
long rv = (long)(System.currentTimeMillis() + localClockOffset*1000);
|
||||
//System.out.println("host: " + address.getHostAddress() + " rtt: " + roundTripDelay + " offset: " + localClockOffset + " seconds");
|
||||
return rv;
|
||||
} catch (IOException ioe) {
|
||||
//ioe.printStackTrace();
|
||||
return -1;
|
||||
|
@ -20,17 +20,25 @@ public class Timestamper implements Runnable {
|
||||
private List _servers;
|
||||
private List _listeners;
|
||||
private int _queryFrequency;
|
||||
private int _concurringServers;
|
||||
private volatile boolean _disabled;
|
||||
private boolean _daemon;
|
||||
private boolean _initialized;
|
||||
|
||||
private static final int DEFAULT_QUERY_FREQUENCY = 5*60*1000;
|
||||
private static final String DEFAULT_SERVER_LIST = "pool.ntp.org, pool.ntp.org";
|
||||
private static final boolean DEFAULT_DISABLED = true;
|
||||
/** how many times do we have to query if we are changing the clock? */
|
||||
private static final int DEFAULT_CONCURRING_SERVERS = 2;
|
||||
|
||||
public static final String PROP_QUERY_FREQUENCY = "time.queryFrequencyMs";
|
||||
public static final String PROP_SERVER_LIST = "time.sntpServerList";
|
||||
public static final String PROP_DISABLED = "time.disabled";
|
||||
public static final String PROP_CONCURRING_SERVERS = "time.concurringServers";
|
||||
|
||||
/** if different SNTP servers differ by more than 10s, someone is b0rked */
|
||||
private static final int MAX_VARIANCE = 10*1000;
|
||||
|
||||
public Timestamper(I2PAppContext ctx) {
|
||||
this(ctx, null, true);
|
||||
}
|
||||
@ -41,6 +49,7 @@ public class Timestamper implements Runnable {
|
||||
public Timestamper(I2PAppContext ctx, UpdateListener lsnr, boolean daemon) {
|
||||
_context = ctx;
|
||||
_daemon = daemon;
|
||||
_initialized = false;
|
||||
_servers = new ArrayList(1);
|
||||
_listeners = new ArrayList(1);
|
||||
if (lsnr != null)
|
||||
@ -114,10 +123,7 @@ public class Timestamper implements Runnable {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Querying servers " + _servers);
|
||||
try {
|
||||
long now = NtpClient.currentTime(serverList);
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Stamp time");
|
||||
stampTime(now);
|
||||
queryTime(serverList);
|
||||
} catch (IllegalArgumentException iae) {
|
||||
if (!alreadyBitched)
|
||||
_log.log(Log.CRIT, "Unable to reach any of the NTP servers - network disconnected?");
|
||||
@ -132,6 +138,35 @@ public class Timestamper implements Runnable {
|
||||
}
|
||||
}
|
||||
|
||||
private void queryTime(String serverList[]) throws IllegalArgumentException {
|
||||
long localTime = -1;
|
||||
long now = -1;
|
||||
long expectedDelta = 0;
|
||||
for (int i = 0; i < _concurringServers; i++) {
|
||||
localTime = _context.clock().now();
|
||||
now = NtpClient.currentTime(serverList);
|
||||
|
||||
long delta = now - localTime;
|
||||
if (i == 0) {
|
||||
if (Math.abs(delta) < MAX_VARIANCE) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("a single SNTP query was within the tolerance (" + delta + "ms)");
|
||||
return;
|
||||
} else {
|
||||
// outside the tolerance, lets iterate across the concurring queries
|
||||
expectedDelta = delta;
|
||||
}
|
||||
} else {
|
||||
if (Math.abs(delta - expectedDelta) > MAX_VARIANCE) {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("SNTP client variance exceeded at query " + i + ". expected = " + expectedDelta + ", found = " + delta);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
stampTime(now);
|
||||
}
|
||||
|
||||
/**
|
||||
* Send an HTTP request to a given URL specifying the current time
|
||||
*/
|
||||
@ -142,6 +177,8 @@ public class Timestamper implements Runnable {
|
||||
lsnr.setNow(now);
|
||||
}
|
||||
}
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Stamped the time as " + now);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -185,6 +222,31 @@ public class Timestamper implements Runnable {
|
||||
if (disabled == null)
|
||||
disabled = DEFAULT_DISABLED + "";
|
||||
_disabled = Boolean.valueOf(disabled).booleanValue();
|
||||
|
||||
String concurring = _context.getProperty(PROP_CONCURRING_SERVERS);
|
||||
if (concurring == null) {
|
||||
_concurringServers = DEFAULT_CONCURRING_SERVERS;
|
||||
} else {
|
||||
try {
|
||||
int servers = Integer.parseInt(concurring);
|
||||
if ( (servers > 0) && (servers < 5) )
|
||||
_concurringServers = servers;
|
||||
else
|
||||
_concurringServers = DEFAULT_CONCURRING_SERVERS;
|
||||
} catch (NumberFormatException nfe) {
|
||||
_concurringServers = DEFAULT_CONCURRING_SERVERS;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static void main(String args[]) {
|
||||
System.setProperty(PROP_DISABLED, "false");
|
||||
System.setProperty(PROP_QUERY_FREQUENCY, "30000");
|
||||
I2PAppContext ctx = I2PAppContext.getGlobalContext();
|
||||
long now = ctx.clock().now();
|
||||
for (int i = 0; i < 5*60*1000; i += 61*1000) {
|
||||
try { Thread.sleep(61*1000); } catch (InterruptedException ie) {}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -18,6 +18,7 @@ public class Clock implements Timestamper.UpdateListener {
|
||||
private I2PAppContext _context;
|
||||
private Timestamper _timestamper;
|
||||
private long _startedOn;
|
||||
private boolean _statCreated;
|
||||
|
||||
public Clock(I2PAppContext context) {
|
||||
_context = context;
|
||||
@ -26,6 +27,7 @@ public class Clock implements Timestamper.UpdateListener {
|
||||
_listeners = new HashSet(64);
|
||||
_timestamper = new Timestamper(context, this);
|
||||
_startedOn = System.currentTimeMillis();
|
||||
_statCreated = false;
|
||||
}
|
||||
public static Clock getInstance() {
|
||||
return I2PAppContext.getGlobalContext().clock();
|
||||
@ -78,10 +80,15 @@ public class Clock implements Timestamper.UpdateListener {
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (_alreadyChanged)
|
||||
if (_alreadyChanged) {
|
||||
getLog().log(Log.CRIT, "Updating clock offset to " + offsetMs + "ms from " + _offset + "ms");
|
||||
else
|
||||
if (!_statCreated)
|
||||
_context.statManager().createRateStat("clock.skew", "How far is the already adjusted clock being skewed?", "Clock", new long[] { 10*60*1000, 3*60*60*1000, 24*60*60*60 });
|
||||
_statCreated = true;
|
||||
_context.statManager().addRateData("clock.skew", delta, 0);
|
||||
} else {
|
||||
getLog().log(Log.INFO, "Initializing clock offset to " + offsetMs + "ms from " + _offset + "ms");
|
||||
}
|
||||
_alreadyChanged = true;
|
||||
_offset = offsetMs;
|
||||
fireOffsetChanged(delta);
|
||||
|
@ -97,6 +97,7 @@ public class NativeBigInteger extends BigInteger {
|
||||
private final static String JBIGI_OPTIMIZATION_K6_2 = "k62";
|
||||
private final static String JBIGI_OPTIMIZATION_K6_3 = "k63";
|
||||
private final static String JBIGI_OPTIMIZATION_ATHLON = "athlon";
|
||||
private final static String JBIGI_OPTIMIZATION_ATHLON64 = "athlon64";
|
||||
private final static String JBIGI_OPTIMIZATION_PENTIUM = "pentium";
|
||||
private final static String JBIGI_OPTIMIZATION_PENTIUMMMX = "pentiummmx";
|
||||
private final static String JBIGI_OPTIMIZATION_PENTIUM2 = "pentium2";
|
||||
@ -130,6 +131,8 @@ public class NativeBigInteger extends BigInteger {
|
||||
CPUInfo c = CPUID.getInfo();
|
||||
if (c instanceof AMDCPUInfo) {
|
||||
AMDCPUInfo amdcpu = (AMDCPUInfo) c;
|
||||
if (amdcpu.IsAthlon64Compatible())
|
||||
return JBIGI_OPTIMIZATION_ATHLON64;
|
||||
if (amdcpu.IsAthlonCompatible())
|
||||
return JBIGI_OPTIMIZATION_ATHLON;
|
||||
if (amdcpu.IsK6_3_Compatible())
|
||||
|
@ -49,7 +49,9 @@ public class SimpleTimer {
|
||||
*
|
||||
*/
|
||||
public void addEvent(TimedEvent event, long timeoutMs) {
|
||||
long eventTime = System.currentTimeMillis() + timeoutMs;
|
||||
int totalEvents = 0;
|
||||
long now = System.currentTimeMillis();
|
||||
long eventTime = now + timeoutMs;
|
||||
Long time = new Long(eventTime);
|
||||
synchronized (_events) {
|
||||
// remove the old scheduled position, then reinsert it
|
||||
@ -72,8 +74,20 @@ public class SimpleTimer {
|
||||
}
|
||||
}
|
||||
|
||||
totalEvents = _events.size();
|
||||
_events.notifyAll();
|
||||
}
|
||||
if (time.longValue() > eventTime + 5) {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Lots of timer congestion, had to push " + event + " back "
|
||||
+ (time.longValue()-eventTime) + "ms (# events: " + totalEvents + ")");
|
||||
}
|
||||
long timeToAdd = System.currentTimeMillis() - now;
|
||||
if (timeToAdd > 50) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("timer contention: took " + timeToAdd + "ms to add a job");
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public boolean removeEvent(TimedEvent evt) {
|
||||
|
@ -13,7 +13,7 @@ import java.util.Date;
|
||||
import net.i2p.data.DataFormatException;
|
||||
import net.i2p.data.DataStructure;
|
||||
import net.i2p.data.Lease;
|
||||
import net.i2p.data.RouterIdentity;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.TunnelId;
|
||||
|
||||
/**
|
||||
@ -27,12 +27,11 @@ class LeaseTest extends StructureTest {
|
||||
}
|
||||
public DataStructure createDataStructure() throws DataFormatException {
|
||||
Lease lease = new Lease();
|
||||
StructureTest tst = new DestinationTest();
|
||||
lease.setEndDate(new Date(1000*60*2));
|
||||
//lease.setStartDate(new Date(1000*60));
|
||||
tst = new RouterIdentityTest();
|
||||
lease.setRouterIdentity((RouterIdentity)tst.createDataStructure());
|
||||
tst = new TunnelIdTest();
|
||||
byte h[] = new byte[Hash.HASH_LENGTH];
|
||||
lease.setGateway(new Hash(h));
|
||||
StructureTest tst = new TunnelIdTest();
|
||||
lease.setTunnelId((TunnelId)tst.createDataStructure());
|
||||
|
||||
return lease;
|
||||
|
@ -12,7 +12,7 @@ import java.util.Date;
|
||||
|
||||
import net.i2p.data.DataFormatException;
|
||||
import net.i2p.data.DataStructure;
|
||||
import net.i2p.data.RouterIdentity;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.TunnelId;
|
||||
import net.i2p.data.i2cp.RequestLeaseSetMessage;
|
||||
import net.i2p.data.i2cp.SessionId;
|
||||
@ -30,8 +30,8 @@ class RequestLeaseSetMessageTest extends StructureTest {
|
||||
RequestLeaseSetMessage msg = new RequestLeaseSetMessage();
|
||||
msg.setSessionId((SessionId)(new SessionIdTest()).createDataStructure());
|
||||
msg.setEndDate(new Date(1000*60*60*12));
|
||||
msg.addEndpoint((RouterIdentity)(new RouterIdentityTest()).createDataStructure(),
|
||||
(TunnelId)(new TunnelIdTest()).createDataStructure());
|
||||
byte h[] = new byte[Hash.HASH_LENGTH];
|
||||
msg.addEndpoint(new Hash(h), (TunnelId)(new TunnelIdTest()).createDataStructure());
|
||||
return msg;
|
||||
}
|
||||
public DataStructure createStructureToRead() { return new RequestLeaseSetMessage(); }
|
||||
|
58
history.txt
58
history.txt
@ -1,4 +1,60 @@
|
||||
$Id: history.txt,v 1.140 2005/02/09 14:28:29 duck Exp $
|
||||
$Id: history.txt,v 1.141 2005/02/10 21:44:49 smeghead Exp $
|
||||
|
||||
2005-02-16 jrandom
|
||||
* (Merged the 0.5-pre branch back into CVS HEAD)
|
||||
* Replaced the old tunnel routing crypto with the one specified in
|
||||
router/doc/tunnel-alt.html, including updates to the web console to view
|
||||
and tweak it.
|
||||
* Provide the means for routers to reject tunnel requests with a wider
|
||||
range of responses:
|
||||
probabalistic rejection, due to approaching overload
|
||||
transient rejection, due to temporary overload
|
||||
bandwidth rejection, due to persistent bandwidth overload
|
||||
critical rejection, due to general router fault (or imminent shutdown)
|
||||
The different responses are factored into the profiles accordingly.
|
||||
* Replaced the old I2CP tunnel related options (tunnels.depthInbound, etc)
|
||||
with a series of new properties, relevent to the new tunnel routing code:
|
||||
inbound.nickname (used on the console)
|
||||
inbound.quantity (# of tunnels to use in any leaseSets)
|
||||
inbound.backupQuantity (# of tunnels to keep in the ready)
|
||||
inbound.length (# of remote peers in the tunnel)
|
||||
inbound.lengthVariance (if > 0, permute the length by adding a random #
|
||||
up to the variance. if < 0, permute the length
|
||||
by adding or subtracting a random # up to the
|
||||
variance)
|
||||
outbound.* (same as the inbound, except for the, uh, outbound tunnels
|
||||
in that client's pool)
|
||||
There are other options, and more will be added later, but the above are
|
||||
the most relevent ones.
|
||||
* Replaced Jetty 4.2.21 with Jetty 5.1.2
|
||||
* Compress all profile data on disk.
|
||||
* Adjust the reseeding functionality to work even when the JVM's http proxy
|
||||
is set.
|
||||
* Enable a poor-man's interactive-flow in the streaming lib by choking the
|
||||
max window size.
|
||||
* Reduced the default streaming lib max message size to 16KB (though still
|
||||
configurable by the user), also doubling the default maximum window
|
||||
size.
|
||||
* Replaced the RouterIdentity in a Lease with its SHA256 hash.
|
||||
* Reduced the overall I2NP message checksum from a full 32 byte SHA256 to
|
||||
the first byte of the SHA256.
|
||||
* Added a new "netId" flag to let routers drop references to other routers
|
||||
who we won't be able to talk to.
|
||||
* Extended the timestamper to get a second (or third) opinion whenever it
|
||||
wants to actually adjust the clock offset.
|
||||
* Replaced that kludge of a timestamp I2NP message with a full blown
|
||||
DateMessage.
|
||||
* Substantial memory optimizations within the router and the SDK to reduce
|
||||
GC churn. Client apps and the streaming libs have not been tuned,
|
||||
however.
|
||||
* More bugfixes thank you can shake a stick at.
|
||||
|
||||
2005-02-13 jrandom
|
||||
* Updated jbigi source to handle 64bit CPUs. The bundled jbigi.jar still
|
||||
only contains 32bit versions, so build your own, placing libjbigi.so in
|
||||
your install dir if necessary. (thanks mule!)
|
||||
* Added support for libjbigi-$os-athlon64 to NativeBigInteger and CPUID
|
||||
(thanks spaetz!)
|
||||
|
||||
2005-02-10 smeghead
|
||||
* Initial check-in of Pants, a new utility to help us manage our 3rd-party
|
||||
|
@ -4,7 +4,7 @@ tunnel.0.description=HTTP proxy for browsing eepsites and the web
|
||||
tunnel.0.type=httpclient
|
||||
tunnel.0.interface=127.0.0.1
|
||||
tunnel.0.listenPort=4444
|
||||
tunnel.0.proxyList=squid.i2p,www1.squid.i2p
|
||||
tunnel.0.proxyList=squid.i2p
|
||||
tunnel.0.i2cpHost=127.0.0.1
|
||||
tunnel.0.i2cpPort=7654
|
||||
tunnel.0.option.tunnels.depthInbound=2
|
||||
@ -24,6 +24,7 @@ tunnel.1.i2cpPort=7654
|
||||
tunnel.1.option.tunnels.depthInbound=2
|
||||
tunnel.1.option.tunnels.numInbound=2
|
||||
tunnel.1.option.i2p.streaming.connectDelay=1000
|
||||
tunnel.1.option.i2p.maxWindowSize=1
|
||||
tunnel.1.startOnLoad=true
|
||||
|
||||
# I2P's cvs server
|
||||
|
@ -136,6 +136,17 @@
|
||||
</New>
|
||||
</Arg>
|
||||
</Call>
|
||||
|
||||
<Call name="addContext">
|
||||
<Arg>/cgi-bin/*</Arg>
|
||||
<Set name="ResourceBase">./eepsite/cgi-bin</Set>
|
||||
<Call name="addServlet">
|
||||
<Arg>Common Gateway Interface</Arg>
|
||||
<Arg>/</Arg>
|
||||
<Arg>org.mortbay.servlet.CGI</Arg>
|
||||
<Put name="Path">/usr/local/bin:/usr/ucb:/bin:/usr/bin</Put>
|
||||
</Call>
|
||||
</Call>
|
||||
|
||||
<!-- =============================================================== -->
|
||||
<!-- Configure the Request Log -->
|
||||
|
@ -12,27 +12,36 @@ wrapper.java.mainclass=org.tanukisoftware.wrapper.WrapperSimpleApp
|
||||
|
||||
# Java Classpath (include wrapper.jar) Add class path elements as
|
||||
# needed starting from 1
|
||||
wrapper.java.classpath.1=lib/ant.jar
|
||||
wrapper.java.classpath.2=lib/heartbeat.jar
|
||||
wrapper.java.classpath.3=lib/i2p.jar
|
||||
wrapper.java.classpath.4=lib/i2ptunnel.jar
|
||||
wrapper.java.classpath.5=lib/jasper-compiler.jar
|
||||
wrapper.java.classpath.6=lib/jasper-runtime.jar
|
||||
wrapper.java.classpath.7=lib/javax.servlet.jar
|
||||
wrapper.java.classpath.8=lib/jnet.jar
|
||||
wrapper.java.classpath.9=lib/mstreaming.jar
|
||||
wrapper.java.classpath.10=lib/netmonitor.jar
|
||||
wrapper.java.classpath.11=lib/org.mortbay.jetty.jar
|
||||
wrapper.java.classpath.12=lib/router.jar
|
||||
wrapper.java.classpath.13=lib/routerconsole.jar
|
||||
wrapper.java.classpath.14=lib/sam.jar
|
||||
wrapper.java.classpath.15=lib/wrapper.jar
|
||||
# i2p sdk, public domain/BSD/Cryptix
|
||||
wrapper.java.classpath.1=lib/i2p.jar
|
||||
# router, depends on i2p.jar, public domain
|
||||
wrapper.java.classpath.2=lib/router.jar
|
||||
# compiled jbigi libraries, contains static libGMP, lgpl
|
||||
wrapper.java.classpath.3=lib/jbigi.jar
|
||||
# sam bridge, public domain (depends on i2p.jar)
|
||||
wrapper.java.classpath.4=lib/sam.jar
|
||||
# ministreaming lib -interfaces for streaming, BSD (depends on i2p.jar)
|
||||
wrapper.java.classpath.5=lib/mstreaming.jar
|
||||
# full streaming lib, public domain (depends on mstreaming.jar, i2p.jar)
|
||||
wrapper.java.classpath.6=lib/streaming.jar
|
||||
# router console, public domain (depends on i2p.jar, router.jar)
|
||||
wrapper.java.classpath.7=lib/routerconsole.jar
|
||||
# i2ptunnel, GPL (depends on mstreaming.jar, i2p.jar)
|
||||
wrapper.java.classpath.8=lib/i2ptunnel.jar
|
||||
# jetty libraries (and dependencies), apache licensed
|
||||
wrapper.java.classpath.9=lib/org.mortbay.jetty.jar
|
||||
wrapper.java.classpath.10=lib/javax.servlet.jar
|
||||
wrapper.java.classpath.11=lib/jasper-compiler.jar
|
||||
wrapper.java.classpath.12=lib/jasper-runtime.jar
|
||||
wrapper.java.classpath.13=lib/commons-logging.jar
|
||||
wrapper.java.classpath.14=lib/commons-el.jar
|
||||
wrapper.java.classpath.15=lib/ant.jar
|
||||
wrapper.java.classpath.16=lib/xercesImpl.jar
|
||||
wrapper.java.classpath.17=lib/xml-apis.jar
|
||||
wrapper.java.classpath.18=lib/jbigi.jar
|
||||
wrapper.java.classpath.19=lib/systray.jar
|
||||
wrapper.java.classpath.20=lib/systray4j.jar
|
||||
wrapper.java.classpath.21=lib/streaming.jar
|
||||
# java service wrapper, BSD
|
||||
wrapper.java.classpath.17=lib/wrapper.jar
|
||||
# systray, LGPL
|
||||
wrapper.java.classpath.18=lib/systray.jar
|
||||
wrapper.java.classpath.19=lib/systray4j.jar
|
||||
|
||||
# Java Library Path (location of Wrapper.DLL or libwrapper.so)
|
||||
wrapper.java.library.path.1=.
|
||||
@ -41,6 +50,7 @@ wrapper.java.library.path.2=lib
|
||||
# Java Additional Parameters
|
||||
wrapper.java.additional.1=-DloggerFilenameOverride=logs/log-router-@.txt
|
||||
wrapper.java.additional.2=-Dorg.mortbay.http.Version.paranoid=true
|
||||
wrapper.java.additional.3=-Dorg.mortbay.util.FileResource.checkAliases=false
|
||||
|
||||
# Initial Java Heap Size (in MB)
|
||||
#wrapper.java.initmemory=4
|
||||
|
@ -28,19 +28,19 @@ public class DeliveryStatusMessage extends I2NPMessageImpl {
|
||||
private final static Log _log = new Log(DeliveryStatusMessage.class);
|
||||
public final static int MESSAGE_TYPE = 10;
|
||||
private long _id;
|
||||
private Date _arrival;
|
||||
private long _arrival;
|
||||
|
||||
public DeliveryStatusMessage(I2PAppContext context) {
|
||||
super(context);
|
||||
setMessageId(-1);
|
||||
setArrival(null);
|
||||
setArrival(-1);
|
||||
}
|
||||
|
||||
public long getMessageId() { return _id; }
|
||||
public void setMessageId(long id) { _id = id; }
|
||||
|
||||
public Date getArrival() { return _arrival; }
|
||||
public void setArrival(Date arrival) { _arrival = arrival; }
|
||||
public long getArrival() { return _arrival; }
|
||||
public void setArrival(long arrival) { _arrival = arrival; }
|
||||
|
||||
public void readMessage(byte data[], int offset, int dataSize, int type) throws I2NPMessageException, IOException {
|
||||
if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message");
|
||||
@ -48,11 +48,7 @@ public class DeliveryStatusMessage extends I2NPMessageImpl {
|
||||
|
||||
_id = DataHelper.fromLong(data, curIndex, 4);
|
||||
curIndex += 4;
|
||||
try {
|
||||
_arrival = DataHelper.fromDate(data, curIndex);
|
||||
} catch (DataFormatException dfe) {
|
||||
throw new I2NPMessageException("Unable to read the arrival");
|
||||
}
|
||||
_arrival = DataHelper.fromLong(data, curIndex, DataHelper.DATE_LENGTH);
|
||||
}
|
||||
|
||||
/** calculate the message body's length (not including the header and footer */
|
||||
@ -61,13 +57,12 @@ public class DeliveryStatusMessage extends I2NPMessageImpl {
|
||||
}
|
||||
/** write the message body to the output array, starting at the given index */
|
||||
protected int writeMessageBody(byte out[], int curIndex) throws I2NPMessageException {
|
||||
if ( (_id < 0) || (_arrival == null) ) throw new I2NPMessageException("Not enough data to write out");
|
||||
if ( (_id < 0) || (_arrival <= 0) ) throw new I2NPMessageException("Not enough data to write out");
|
||||
|
||||
byte id[] = DataHelper.toLong(4, _id);
|
||||
System.arraycopy(id, 0, out, curIndex, 4);
|
||||
curIndex += 4;
|
||||
byte date[] = DataHelper.toDate(_arrival);
|
||||
System.arraycopy(date, 0, out, curIndex, DataHelper.DATE_LENGTH);
|
||||
DataHelper.toLong(out, curIndex, DataHelper.DATE_LENGTH, _arrival);
|
||||
curIndex += DataHelper.DATE_LENGTH;
|
||||
return curIndex;
|
||||
}
|
||||
@ -75,8 +70,7 @@ public class DeliveryStatusMessage extends I2NPMessageImpl {
|
||||
public int getType() { return MESSAGE_TYPE; }
|
||||
|
||||
public int hashCode() {
|
||||
return (int)getMessageId() +
|
||||
DataHelper.hashCode(getArrival());
|
||||
return (int)getMessageId() + (int)getArrival();
|
||||
}
|
||||
|
||||
public boolean equals(Object object) {
|
||||
@ -93,7 +87,7 @@ public class DeliveryStatusMessage extends I2NPMessageImpl {
|
||||
StringBuffer buf = new StringBuffer();
|
||||
buf.append("[DeliveryStatusMessage: ");
|
||||
buf.append("\n\tMessage ID: ").append(getMessageId());
|
||||
buf.append("\n\tArrival: ").append(_context.clock().now() - _arrival.getTime());
|
||||
buf.append("\n\tArrival: ").append(_context.clock().now() - _arrival);
|
||||
buf.append("ms in the past");
|
||||
buf.append("]");
|
||||
return buf.toString();
|
||||
|
@ -126,7 +126,16 @@ public class GarlicClove extends DataStructureImpl {
|
||||
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Wrote instructions: " + _instructions);
|
||||
out.write(_msg.toByteArray());
|
||||
try {
|
||||
byte m[] = _msg.toByteArray();
|
||||
if (m == null)
|
||||
throw new RuntimeException("foo, returned null");
|
||||
if (m.length <= 0)
|
||||
throw new RuntimeException("foo, returned 0 length");
|
||||
out.write(m);
|
||||
} catch (Exception e) {
|
||||
throw new DataFormatException("Unable to write the clove: " + _msg + " to " + out, e);
|
||||
}
|
||||
DataHelper.writeLong(out, 4, _cloveId);
|
||||
DataHelper.writeDate(out, _expiration);
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
@ -137,6 +146,14 @@ public class GarlicClove extends DataStructureImpl {
|
||||
_log.debug("Written cert: " + _certificate);
|
||||
}
|
||||
|
||||
public int estimateSize() {
|
||||
return 64 // instructions (high estimate)
|
||||
+ _msg.getMessageSize()
|
||||
+ 4 // cloveId
|
||||
+ DataHelper.DATE_LENGTH
|
||||
+ 4; // certificate
|
||||
}
|
||||
|
||||
public boolean equals(Object obj) {
|
||||
if ( (obj == null) || !(obj instanceof GarlicClove))
|
||||
return false;
|
||||
|
@ -10,7 +10,6 @@ package net.i2p.data.i2np;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.util.Date;
|
||||
|
||||
import net.i2p.data.DataStructure;
|
||||
|
||||
@ -67,7 +66,7 @@ public interface I2NPMessage extends DataStructure {
|
||||
* Date after which the message should be dropped (and the associated uniqueId forgotten)
|
||||
*
|
||||
*/
|
||||
public Date getMessageExpiration();
|
||||
public long getMessageExpiration();
|
||||
|
||||
/** How large the message is, including any checksums */
|
||||
public int getMessageSize();
|
||||
|
@ -132,10 +132,14 @@ public class I2NPMessageHandler {
|
||||
return new DatabaseSearchReplyMessage(_context);
|
||||
case DeliveryStatusMessage.MESSAGE_TYPE:
|
||||
return new DeliveryStatusMessage(_context);
|
||||
case DateMessage.MESSAGE_TYPE:
|
||||
return new DateMessage(_context);
|
||||
case GarlicMessage.MESSAGE_TYPE:
|
||||
return new GarlicMessage(_context);
|
||||
case TunnelMessage.MESSAGE_TYPE:
|
||||
return new TunnelMessage(_context);
|
||||
case TunnelDataMessage.MESSAGE_TYPE:
|
||||
return new TunnelDataMessage(_context);
|
||||
case TunnelGatewayMessage.MESSAGE_TYPE:
|
||||
return new TunnelGatewayMessage(_context);
|
||||
case DataMessage.MESSAGE_TYPE:
|
||||
return new DataMessage(_context);
|
||||
case TunnelCreateMessage.MESSAGE_TYPE:
|
||||
|
@ -12,7 +12,6 @@ import java.io.ByteArrayInputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.util.Date;
|
||||
|
||||
import net.i2p.I2PAppContext;
|
||||
import net.i2p.crypto.SHA256EntryCache;
|
||||
@ -30,19 +29,20 @@ import net.i2p.util.Log;
|
||||
public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPMessage {
|
||||
private Log _log;
|
||||
protected I2PAppContext _context;
|
||||
private Date _expiration;
|
||||
private long _expiration;
|
||||
private long _uniqueId;
|
||||
private byte _data[];
|
||||
|
||||
public final static long DEFAULT_EXPIRATION_MS = 1*60*1000; // 1 minute by default
|
||||
public final static int CHECKSUM_LENGTH = 1; //Hash.HASH_LENGTH;
|
||||
|
||||
public I2NPMessageImpl(I2PAppContext context) {
|
||||
_context = context;
|
||||
_log = context.logManager().getLog(I2NPMessageImpl.class);
|
||||
_expiration = new Date(_context.clock().now() + DEFAULT_EXPIRATION_MS);
|
||||
_expiration = _context.clock().now() + DEFAULT_EXPIRATION_MS;
|
||||
_uniqueId = _context.random().nextLong(MAX_ID_VALUE);
|
||||
_context.statManager().createRateStat("i2np.writeTime", "How long it takes to write an I2NP message", "I2NP", new long[] { 10*60*1000, 60*60*1000 });
|
||||
_context.statManager().createRateStat("i2np.readTime", "How long it takes to read an I2NP message", "I2NP", new long[] { 10*60*1000, 60*60*1000 });
|
||||
//_context.statManager().createRateStat("i2np.writeTime", "How long it takes to write an I2NP message", "I2NP", new long[] { 10*60*1000, 60*60*1000 });
|
||||
//_context.statManager().createRateStat("i2np.readTime", "How long it takes to read an I2NP message", "I2NP", new long[] { 10*60*1000, 60*60*1000 });
|
||||
}
|
||||
|
||||
public void readBytes(InputStream in) throws DataFormatException, IOException {
|
||||
@ -57,10 +57,14 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM
|
||||
if (type < 0)
|
||||
type = (int)DataHelper.readLong(in, 1);
|
||||
_uniqueId = DataHelper.readLong(in, 4);
|
||||
_expiration = DataHelper.readDate(in);
|
||||
_expiration = DataHelper.readLong(in, DataHelper.DATE_LENGTH);
|
||||
int size = (int)DataHelper.readLong(in, 2);
|
||||
Hash h = new Hash();
|
||||
h.readBytes(in);
|
||||
byte checksum[] = new byte[CHECKSUM_LENGTH];
|
||||
int read = DataHelper.read(in, checksum);
|
||||
if (read != CHECKSUM_LENGTH)
|
||||
throw new I2NPMessageException("checksum is too small [" + read + "]");
|
||||
//Hash h = new Hash();
|
||||
//h.readBytes(in);
|
||||
if (buffer.length < size) {
|
||||
if (size > 64*1024) throw new I2NPMessageException("size=" + size);
|
||||
buffer = new byte[size];
|
||||
@ -77,18 +81,19 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM
|
||||
|
||||
SHA256EntryCache.CacheEntry cache = _context.sha().cache().acquire(size);
|
||||
Hash calc = _context.sha().calculateHash(buffer, 0, size, cache);
|
||||
boolean eq = calc.equals(h);
|
||||
//boolean eq = calc.equals(h);
|
||||
boolean eq = DataHelper.eq(checksum, 0, calc.getData(), 0, CHECKSUM_LENGTH);
|
||||
_context.sha().cache().release(cache);
|
||||
if (!eq)
|
||||
throw new I2NPMessageException("Hash does not match");
|
||||
throw new I2NPMessageException("Hash does not match for " + getClass().getName());
|
||||
|
||||
long start = _context.clock().now();
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Reading bytes: type = " + type + " / uniqueId : " + _uniqueId + " / expiration : " + _expiration);
|
||||
readMessage(buffer, 0, size, type);
|
||||
long time = _context.clock().now() - start;
|
||||
if (time > 50)
|
||||
_context.statManager().addRateData("i2np.readTime", time, time);
|
||||
//if (time > 50)
|
||||
// _context.statManager().addRateData("i2np.readTime", time, time);
|
||||
return size + Hash.HASH_LENGTH + 1 + 4 + DataHelper.DATE_LENGTH;
|
||||
} catch (DataFormatException dfe) {
|
||||
throw new I2NPMessageException("Error reading the message header", dfe);
|
||||
@ -102,19 +107,15 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM
|
||||
}
|
||||
_uniqueId = DataHelper.fromLong(data, cur, 4);
|
||||
cur += 4;
|
||||
try {
|
||||
_expiration = DataHelper.fromDate(data, cur);
|
||||
cur += DataHelper.DATE_LENGTH;
|
||||
} catch (DataFormatException dfe) {
|
||||
throw new I2NPMessageException("Unable to read the expiration", dfe);
|
||||
}
|
||||
_expiration = DataHelper.fromLong(data, cur, DataHelper.DATE_LENGTH);
|
||||
cur += DataHelper.DATE_LENGTH;
|
||||
int size = (int)DataHelper.fromLong(data, cur, 2);
|
||||
cur += 2;
|
||||
Hash h = new Hash();
|
||||
byte hdata[] = new byte[Hash.HASH_LENGTH];
|
||||
System.arraycopy(data, cur, hdata, 0, Hash.HASH_LENGTH);
|
||||
cur += Hash.HASH_LENGTH;
|
||||
h.setData(hdata);
|
||||
//Hash h = new Hash();
|
||||
byte hdata[] = new byte[CHECKSUM_LENGTH];
|
||||
System.arraycopy(data, cur, hdata, 0, CHECKSUM_LENGTH);
|
||||
cur += CHECKSUM_LENGTH;
|
||||
//h.setData(hdata);
|
||||
|
||||
if (cur + size > data.length)
|
||||
throw new I2NPMessageException("Payload is too short ["
|
||||
@ -125,10 +126,11 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM
|
||||
|
||||
SHA256EntryCache.CacheEntry cache = _context.sha().cache().acquire(size);
|
||||
Hash calc = _context.sha().calculateHash(data, cur, size, cache);
|
||||
boolean eq = calc.equals(h);
|
||||
//boolean eq = calc.equals(h);
|
||||
boolean eq = DataHelper.eq(hdata, 0, calc.getData(), 0, CHECKSUM_LENGTH);
|
||||
_context.sha().cache().release(cache);
|
||||
if (!eq)
|
||||
throw new I2NPMessageException("Hash does not match");
|
||||
throw new I2NPMessageException("Hash does not match for " + getClass().getName());
|
||||
|
||||
long start = _context.clock().now();
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
@ -136,14 +138,14 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM
|
||||
readMessage(data, cur, size, type);
|
||||
cur += size;
|
||||
long time = _context.clock().now() - start;
|
||||
if (time > 50)
|
||||
_context.statManager().addRateData("i2np.readTime", time, time);
|
||||
//if (time > 50)
|
||||
// _context.statManager().addRateData("i2np.readTime", time, time);
|
||||
return cur - offset;
|
||||
}
|
||||
|
||||
public void writeBytes(OutputStream out) throws DataFormatException, IOException {
|
||||
int size = getMessageSize();
|
||||
if (size < 47) throw new DataFormatException("Unable to build the message");
|
||||
if (size < 15 + CHECKSUM_LENGTH) throw new DataFormatException("Unable to build the message");
|
||||
byte buf[] = new byte[size];
|
||||
int read = toByteArray(buf);
|
||||
if (read < 0)
|
||||
@ -160,18 +162,19 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM
|
||||
* Date after which the message should be dropped (and the associated uniqueId forgotten)
|
||||
*
|
||||
*/
|
||||
public Date getMessageExpiration() { return _expiration; }
|
||||
public void setMessageExpiration(Date exp) { _expiration = exp; }
|
||||
public long getMessageExpiration() { return _expiration; }
|
||||
public void setMessageExpiration(long exp) { _expiration = exp; }
|
||||
|
||||
public synchronized int getMessageSize() {
|
||||
return calculateWrittenLength()+47; // 47 bytes in the header
|
||||
return calculateWrittenLength()+15 + CHECKSUM_LENGTH; // 47 bytes in the header
|
||||
}
|
||||
|
||||
public byte[] toByteArray() {
|
||||
byte data[] = new byte[getMessageSize()];
|
||||
int written = toByteArray(data);
|
||||
if (written != data.length) {
|
||||
_log.error("Error writing out " + data.length + " for " + getClass().getName());
|
||||
_log.log(Log.CRIT, "Error writing out " + data.length + " (written: " + written + ", msgSize: " + getMessageSize() +
|
||||
", writtenLen: " + calculateWrittenLength() + ") for " + getClass().getName());
|
||||
return null;
|
||||
}
|
||||
return data;
|
||||
@ -180,34 +183,44 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM
|
||||
public int toByteArray(byte buffer[]) {
|
||||
long start = _context.clock().now();
|
||||
|
||||
byte prefix[][] = new byte[][] { DataHelper.toLong(1, getType()),
|
||||
DataHelper.toLong(4, _uniqueId),
|
||||
DataHelper.toDate(_expiration),
|
||||
new byte[2],
|
||||
new byte[Hash.HASH_LENGTH]};
|
||||
byte suffix[][] = new byte[][] { };
|
||||
int prefixLen = 1 // type
|
||||
+ 4 // uniqueId
|
||||
+ DataHelper.DATE_LENGTH // expiration
|
||||
+ 2 // payload length
|
||||
+ CHECKSUM_LENGTH; // walnuts
|
||||
//byte prefix[][] = new byte[][] { DataHelper.toLong(1, getType()),
|
||||
// DataHelper.toLong(4, _uniqueId),
|
||||
// DataHelper.toLong(DataHelper.DATE_LENGTH, _expiration),
|
||||
// new byte[2],
|
||||
// new byte[CHECKSUM_LENGTH]};
|
||||
//byte suffix[][] = new byte[][] { };
|
||||
try {
|
||||
int writtenLen = toByteArray(buffer, prefix, suffix);
|
||||
int writtenLen = writeMessageBody(buffer, prefixLen);
|
||||
int payloadLen = writtenLen - prefixLen;
|
||||
SHA256EntryCache.CacheEntry cache = _context.sha().cache().acquire(payloadLen);
|
||||
Hash h = _context.sha().calculateHash(buffer, prefixLen, payloadLen, cache);
|
||||
|
||||
int prefixLen = 1+4+8+2+Hash.HASH_LENGTH;
|
||||
int suffixLen = 0;
|
||||
int payloadLen = writtenLen - prefixLen - suffixLen;
|
||||
Hash h = _context.sha().calculateHash(buffer, prefixLen, payloadLen);
|
||||
|
||||
byte len[] = DataHelper.toLong(2, payloadLen);
|
||||
buffer[1+4+8] = len[0];
|
||||
buffer[1+4+8+1] = len[1];
|
||||
for (int i = 0; i < Hash.HASH_LENGTH; i++)
|
||||
System.arraycopy(h.getData(), 0, buffer, 1+4+8+2, Hash.HASH_LENGTH);
|
||||
int off = 0;
|
||||
DataHelper.toLong(buffer, off, 1, getType());
|
||||
off += 1;
|
||||
DataHelper.toLong(buffer, off, 4, _uniqueId);
|
||||
off += 4;
|
||||
DataHelper.toLong(buffer, off, DataHelper.DATE_LENGTH, _expiration);
|
||||
off += DataHelper.DATE_LENGTH;
|
||||
DataHelper.toLong(buffer, off, 2, payloadLen);
|
||||
off += 2;
|
||||
System.arraycopy(h.getData(), 0, buffer, off, CHECKSUM_LENGTH);
|
||||
_context.sha().cache().release(cache);
|
||||
|
||||
long time = _context.clock().now() - start;
|
||||
if (time > 50)
|
||||
_context.statManager().addRateData("i2np.writeTime", time, time);
|
||||
//if (time > 50)
|
||||
// _context.statManager().addRateData("i2np.writeTime", time, time);
|
||||
|
||||
return writtenLen;
|
||||
} catch (I2NPMessageException ime) {
|
||||
_context.logManager().getLog(getClass()).error("Error writing", ime);
|
||||
throw new IllegalStateException("Unable to serialize the message: " + ime.getMessage());
|
||||
_context.logManager().getLog(getClass()).log(Log.CRIT, "Error writing", ime);
|
||||
throw new IllegalStateException("Unable to serialize the message (" + getClass().getName()
|
||||
+ "): " + ime.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
@ -218,7 +231,7 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM
|
||||
* @return the index into the array after the last byte written
|
||||
*/
|
||||
protected abstract int writeMessageBody(byte out[], int curIndex) throws I2NPMessageException;
|
||||
|
||||
/*
|
||||
protected int toByteArray(byte out[], byte[][] prefix, byte[][] suffix) throws I2NPMessageException {
|
||||
int curIndex = 0;
|
||||
for (int i = 0; i < prefix.length; i++) {
|
||||
@ -235,4 +248,5 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM
|
||||
|
||||
return curIndex;
|
||||
}
|
||||
*/
|
||||
}
|
||||
|
@ -11,6 +11,7 @@ package net.i2p.data.i2np;
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.util.Properties;
|
||||
|
||||
import net.i2p.I2PAppContext;
|
||||
import net.i2p.data.Certificate;
|
||||
@ -19,8 +20,6 @@ import net.i2p.data.DataHelper;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.SessionKey;
|
||||
import net.i2p.data.SessionTag;
|
||||
import net.i2p.data.SigningPrivateKey;
|
||||
import net.i2p.data.SigningPublicKey;
|
||||
import net.i2p.data.TunnelId;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
@ -28,95 +27,47 @@ import net.i2p.util.Log;
|
||||
* Defines the message sent to a router to request that it participate in a
|
||||
* tunnel using the included configuration settings.
|
||||
*
|
||||
* @author jrandom
|
||||
*/
|
||||
public class TunnelCreateMessage extends I2NPMessageImpl {
|
||||
private final static Log _log = new Log(TunnelCreateMessage.class);
|
||||
private Log _log;
|
||||
public final static int MESSAGE_TYPE = 6;
|
||||
private int _participantType;
|
||||
private TunnelId _tunnelId;
|
||||
private Hash _nextRouter;
|
||||
private TunnelId _nextTunnelId;
|
||||
private long _tunnelDuration;
|
||||
private TunnelConfigurationSessionKey _configKey;
|
||||
private long _maxPeakMessagesPerMin;
|
||||
private long _maxAvgMessagesPerMin;
|
||||
private long _maxPeakBytesPerMin;
|
||||
private long _maxAvgBytesPerMin;
|
||||
private boolean _includeDummyTraffic;
|
||||
private boolean _reorderMessages;
|
||||
private TunnelSigningPublicKey _verificationPubKey;
|
||||
private TunnelSigningPrivateKey _verificationPrivKey;
|
||||
private TunnelSessionKey _tunnelKey;
|
||||
private Certificate _certificate;
|
||||
private int _durationSeconds;
|
||||
private SessionKey _layerKey;
|
||||
private SessionKey _ivKey;
|
||||
private Properties _options;
|
||||
private Hash _replyGateway;
|
||||
private TunnelId _replyTunnel;
|
||||
private SessionTag _replyTag;
|
||||
private SessionKey _replyKey;
|
||||
private TunnelId _replyTunnel;
|
||||
private Hash _replyPeer;
|
||||
private boolean _isGateway;
|
||||
private long _nonce;
|
||||
private Certificate _certificate;
|
||||
|
||||
private byte[] _optionsCache;
|
||||
private byte[] _certificateCache;
|
||||
|
||||
public static final int PARTICIPANT_TYPE_GATEWAY = 1;
|
||||
public static final int PARTICIPANT_TYPE_ENDPOINT = 2;
|
||||
public static final int PARTICIPANT_TYPE_OTHER = 3;
|
||||
|
||||
private final static long FLAG_DUMMY = 1 << 7;
|
||||
private final static long FLAG_REORDER = 1 << 6;
|
||||
public static final long MAX_NONCE_VALUE = ((1l << 32l) - 1l);
|
||||
|
||||
private static final Hash INVALID_HASH = new Hash(new byte[Hash.HASH_LENGTH]); // all 0s
|
||||
private static final TunnelId INVALID_TUNNEL = TunnelId.INVALID;
|
||||
|
||||
public TunnelCreateMessage(I2PAppContext context) {
|
||||
super(context);
|
||||
setParticipantType(-1);
|
||||
setNextRouter(null);
|
||||
setNextTunnelId(null);
|
||||
setTunnelId(null);
|
||||
setTunnelDurationSeconds(-1);
|
||||
setConfigurationKey(null);
|
||||
setMaxPeakMessagesPerMin(-1);
|
||||
setMaxAvgMessagesPerMin(-1);
|
||||
setMaxPeakBytesPerMin(-1);
|
||||
setMaxAvgBytesPerMin(-1);
|
||||
setIncludeDummyTraffic(false);
|
||||
setReorderMessages(false);
|
||||
setVerificationPublicKey(null);
|
||||
setVerificationPrivateKey(null);
|
||||
setTunnelKey(null);
|
||||
setCertificate(null);
|
||||
setReplyTag(null);
|
||||
setReplyKey(null);
|
||||
setReplyTunnel(null);
|
||||
setReplyPeer(null);
|
||||
_log = context.logManager().getLog(TunnelCreateMessage.class);
|
||||
}
|
||||
|
||||
public void setParticipantType(int type) { _participantType = type; }
|
||||
public int getParticipantType() { return _participantType; }
|
||||
public void setNextRouter(Hash routerIdentityHash) { _nextRouter = routerIdentityHash; }
|
||||
public Hash getNextRouter() { return _nextRouter; }
|
||||
public void setNextTunnelId(TunnelId id) { _nextTunnelId = id; }
|
||||
public TunnelId getNextTunnelId() { return _nextTunnelId; }
|
||||
public void setTunnelId(TunnelId id) { _tunnelId = id; }
|
||||
public TunnelId getTunnelId() { return _tunnelId; }
|
||||
public void setTunnelDurationSeconds(long durationSeconds) { _tunnelDuration = durationSeconds; }
|
||||
public long getTunnelDurationSeconds() { return _tunnelDuration; }
|
||||
public void setConfigurationKey(TunnelConfigurationSessionKey key) { _configKey = key; }
|
||||
public TunnelConfigurationSessionKey getConfigurationKey() { return _configKey; }
|
||||
public void setMaxPeakMessagesPerMin(long msgs) { _maxPeakMessagesPerMin = msgs; }
|
||||
public long getMaxPeakMessagesPerMin() { return _maxPeakMessagesPerMin; }
|
||||
public void setMaxAvgMessagesPerMin(long msgs) { _maxAvgMessagesPerMin = msgs; }
|
||||
public long getMaxAvgMessagesPerMin() { return _maxAvgMessagesPerMin; }
|
||||
public void setMaxPeakBytesPerMin(long bytes) { _maxPeakBytesPerMin = bytes; }
|
||||
public long getMaxPeakBytesPerMin() { return _maxPeakBytesPerMin; }
|
||||
public void setMaxAvgBytesPerMin(long bytes) { _maxAvgBytesPerMin = bytes; }
|
||||
public long getMaxAvgBytesPerMin() { return _maxAvgBytesPerMin; }
|
||||
public void setIncludeDummyTraffic(boolean include) { _includeDummyTraffic = include; }
|
||||
public boolean getIncludeDummyTraffic() { return _includeDummyTraffic; }
|
||||
public void setReorderMessages(boolean reorder) { _reorderMessages = reorder; }
|
||||
public boolean getReorderMessages() { return _reorderMessages; }
|
||||
public void setVerificationPublicKey(TunnelSigningPublicKey key) { _verificationPubKey = key; }
|
||||
public TunnelSigningPublicKey getVerificationPublicKey() { return _verificationPubKey; }
|
||||
public void setVerificationPrivateKey(TunnelSigningPrivateKey key) { _verificationPrivKey = key; }
|
||||
public TunnelSigningPrivateKey getVerificationPrivateKey() { return _verificationPrivKey; }
|
||||
public void setTunnelKey(TunnelSessionKey key) { _tunnelKey = key; }
|
||||
public TunnelSessionKey getTunnelKey() { return _tunnelKey; }
|
||||
public void setDurationSeconds(int seconds) { _durationSeconds = seconds; }
|
||||
public int getDurationSeconds() { return _durationSeconds; }
|
||||
public void setLayerKey(SessionKey key) { _layerKey = key; }
|
||||
public SessionKey getLayerKey() { return _layerKey; }
|
||||
public void setIVKey(SessionKey key) { _ivKey = key; }
|
||||
public SessionKey getIVKey() { return _ivKey; }
|
||||
public void setCertificate(Certificate cert) { _certificate = cert; }
|
||||
public Certificate getCertificate() { return _certificate; }
|
||||
public void setReplyTag(SessionTag tag) { _replyTag = tag; }
|
||||
@ -125,257 +76,185 @@ public class TunnelCreateMessage extends I2NPMessageImpl {
|
||||
public SessionKey getReplyKey() { return _replyKey; }
|
||||
public void setReplyTunnel(TunnelId id) { _replyTunnel = id; }
|
||||
public TunnelId getReplyTunnel() { return _replyTunnel; }
|
||||
public void setReplyPeer(Hash peer) { _replyPeer = peer; }
|
||||
public Hash getReplyPeer() { return _replyPeer; }
|
||||
public void setReplyGateway(Hash peer) { _replyGateway = peer; }
|
||||
public Hash getReplyGateway() { return _replyGateway; }
|
||||
public void setNonce(long nonce) { _nonce = nonce; }
|
||||
public long getNonce() { return _nonce; }
|
||||
public void setIsGateway(boolean isGateway) { _isGateway = isGateway; }
|
||||
public boolean getIsGateway() { return _isGateway; }
|
||||
public Properties getOptions() { return _options; }
|
||||
public void setOptions(Properties opts) { _options = opts; }
|
||||
|
||||
public void readMessage(byte data[], int offset, int dataSize, int type) throws I2NPMessageException, IOException {
|
||||
if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message");
|
||||
int curIndex = offset;
|
||||
|
||||
_participantType = (int)DataHelper.fromLong(data, curIndex, 1);
|
||||
curIndex++;
|
||||
if (_participantType != PARTICIPANT_TYPE_ENDPOINT) {
|
||||
byte peer[] = new byte[Hash.HASH_LENGTH];
|
||||
System.arraycopy(data, curIndex, peer, 0, Hash.HASH_LENGTH);
|
||||
curIndex += Hash.HASH_LENGTH;
|
||||
_nextRouter = new Hash(peer);
|
||||
|
||||
_nextTunnelId = new TunnelId(DataHelper.fromLong(data, curIndex, 4));
|
||||
curIndex += 4;
|
||||
}
|
||||
|
||||
_tunnelId = new TunnelId(DataHelper.fromLong(data, curIndex, 4));
|
||||
curIndex += 4;
|
||||
if (_tunnelId.getTunnelId() <= 0)
|
||||
throw new I2NPMessageException("wtf, tunnelId == " + _tunnelId);
|
||||
|
||||
_tunnelDuration = DataHelper.fromLong(data, curIndex, 4);
|
||||
curIndex += 4;
|
||||
|
||||
byte key[] = new byte[SessionKey.KEYSIZE_BYTES];
|
||||
System.arraycopy(data, curIndex, key, 0, SessionKey.KEYSIZE_BYTES);
|
||||
curIndex += SessionKey.KEYSIZE_BYTES;
|
||||
_configKey = new TunnelConfigurationSessionKey(new SessionKey(key));
|
||||
|
||||
_maxPeakMessagesPerMin = DataHelper.fromLong(data, curIndex, 4);
|
||||
curIndex += 4;
|
||||
_maxAvgMessagesPerMin = DataHelper.fromLong(data, curIndex, 4);
|
||||
curIndex += 4;
|
||||
_maxPeakBytesPerMin = DataHelper.fromLong(data, curIndex, 4);
|
||||
curIndex += 4;
|
||||
_maxAvgBytesPerMin = DataHelper.fromLong(data, curIndex, 4);
|
||||
curIndex += 4;
|
||||
|
||||
int flags = (int)DataHelper.fromLong(data, curIndex, 1);
|
||||
curIndex++;
|
||||
_includeDummyTraffic = flagsIncludeDummy(flags);
|
||||
_reorderMessages = flagsReorder(flags);
|
||||
|
||||
key = new byte[SigningPublicKey.KEYSIZE_BYTES];
|
||||
System.arraycopy(data, curIndex, key, 0, SigningPublicKey.KEYSIZE_BYTES);
|
||||
curIndex += SigningPublicKey.KEYSIZE_BYTES;
|
||||
_verificationPubKey = new TunnelSigningPublicKey(new SigningPublicKey(key));
|
||||
|
||||
if (_participantType == PARTICIPANT_TYPE_GATEWAY) {
|
||||
key = new byte[SigningPrivateKey.KEYSIZE_BYTES];
|
||||
System.arraycopy(data, curIndex, key, 0, SigningPrivateKey.KEYSIZE_BYTES);
|
||||
curIndex += SigningPrivateKey.KEYSIZE_BYTES;
|
||||
_verificationPrivKey = new TunnelSigningPrivateKey(new SigningPrivateKey(key));
|
||||
}
|
||||
|
||||
if ( (_participantType == PARTICIPANT_TYPE_ENDPOINT) || (_participantType == PARTICIPANT_TYPE_GATEWAY) ) {
|
||||
key = new byte[SessionKey.KEYSIZE_BYTES];
|
||||
System.arraycopy(data, curIndex, key, 0, SessionKey.KEYSIZE_BYTES);
|
||||
curIndex += SessionKey.KEYSIZE_BYTES;
|
||||
_tunnelKey = new TunnelSessionKey(new SessionKey(key));
|
||||
}
|
||||
|
||||
int certType = (int) DataHelper.fromLong(data, curIndex, 1);
|
||||
curIndex++;
|
||||
int certLength = (int) DataHelper.fromLong(data, curIndex, 2);
|
||||
curIndex += 2;
|
||||
if (certLength <= 0) {
|
||||
_certificate = new Certificate(certType, null);
|
||||
if (DataHelper.eq(INVALID_HASH.getData(), 0, data, offset, Hash.HASH_LENGTH)) {
|
||||
_nextRouter = null;
|
||||
} else {
|
||||
if (certLength > 16*1024) throw new I2NPMessageException("cert size " + certLength);
|
||||
byte certPayload[] = new byte[certLength];
|
||||
System.arraycopy(data, curIndex, certPayload, 0, certLength);
|
||||
curIndex += certLength;
|
||||
_certificate = new Certificate(certType, certPayload);
|
||||
_nextRouter = new Hash(new byte[Hash.HASH_LENGTH]);
|
||||
System.arraycopy(data, offset, _nextRouter.getData(), 0, Hash.HASH_LENGTH);
|
||||
}
|
||||
offset += Hash.HASH_LENGTH;
|
||||
|
||||
long id = DataHelper.fromLong(data, offset, 4);
|
||||
if (id > 0)
|
||||
_nextTunnelId = new TunnelId(id);
|
||||
offset += 4;
|
||||
|
||||
_durationSeconds = (int)DataHelper.fromLong(data, offset, 2);
|
||||
offset += 2;
|
||||
|
||||
_layerKey = new SessionKey(new byte[SessionKey.KEYSIZE_BYTES]);
|
||||
System.arraycopy(data, offset, _layerKey.getData(), 0, SessionKey.KEYSIZE_BYTES);
|
||||
offset += SessionKey.KEYSIZE_BYTES;
|
||||
|
||||
_ivKey = new SessionKey(new byte[SessionKey.KEYSIZE_BYTES]);
|
||||
System.arraycopy(data, offset, _ivKey.getData(), 0, SessionKey.KEYSIZE_BYTES);
|
||||
offset += SessionKey.KEYSIZE_BYTES;
|
||||
|
||||
try {
|
||||
Properties opts = new Properties();
|
||||
_options = opts;
|
||||
offset = DataHelper.fromProperties(data, offset, opts);
|
||||
} catch (DataFormatException dfe) {
|
||||
throw new I2NPMessageException("Error reading the options", dfe);
|
||||
}
|
||||
|
||||
byte tag[] = new byte[SessionTag.BYTE_LENGTH];
|
||||
System.arraycopy(data, curIndex, tag, 0, SessionTag.BYTE_LENGTH);
|
||||
curIndex += SessionTag.BYTE_LENGTH;
|
||||
_replyTag = new SessionTag(tag);
|
||||
_replyGateway = new Hash(new byte[Hash.HASH_LENGTH]);
|
||||
System.arraycopy(data, offset, _replyGateway.getData(), 0, Hash.HASH_LENGTH);
|
||||
offset += Hash.HASH_LENGTH;
|
||||
|
||||
key = new byte[SessionKey.KEYSIZE_BYTES];
|
||||
System.arraycopy(data, curIndex, key, 0, SessionKey.KEYSIZE_BYTES);
|
||||
curIndex += SessionKey.KEYSIZE_BYTES;
|
||||
_replyKey = new SessionKey(key);
|
||||
_replyTunnel = new TunnelId(DataHelper.fromLong(data, offset, 4));
|
||||
offset += 4;
|
||||
|
||||
_replyTunnel = new TunnelId(DataHelper.fromLong(data, curIndex, 4));
|
||||
curIndex += 4;
|
||||
_replyTag = new SessionTag(new byte[SessionTag.BYTE_LENGTH]);
|
||||
System.arraycopy(data, offset, _replyTag.getData(), 0, SessionTag.BYTE_LENGTH);
|
||||
offset += SessionTag.BYTE_LENGTH;
|
||||
|
||||
byte peer[] = new byte[Hash.HASH_LENGTH];
|
||||
System.arraycopy(data, curIndex, peer, 0, Hash.HASH_LENGTH);
|
||||
curIndex += Hash.HASH_LENGTH;
|
||||
_replyPeer = new Hash(peer);
|
||||
_replyKey = new SessionKey(new byte[SessionKey.KEYSIZE_BYTES]);
|
||||
System.arraycopy(data, offset, _replyKey.getData(), 0, SessionKey.KEYSIZE_BYTES);
|
||||
offset += SessionKey.KEYSIZE_BYTES;
|
||||
|
||||
_nonce = DataHelper.fromLong(data, offset, 4);
|
||||
offset += 4;
|
||||
|
||||
try {
|
||||
Certificate cert = new Certificate();
|
||||
_certificate = cert;
|
||||
offset += cert.readBytes(data, offset);
|
||||
} catch (DataFormatException dfe) {
|
||||
throw new I2NPMessageException("Error reading the certificate", dfe);
|
||||
}
|
||||
|
||||
Boolean b = DataHelper.fromBoolean(data, offset);
|
||||
if (b == null)
|
||||
throw new I2NPMessageException("isGateway == unknown?!");
|
||||
_isGateway = b.booleanValue();
|
||||
offset += DataHelper.BOOLEAN_LENGTH;
|
||||
}
|
||||
|
||||
|
||||
/** calculate the message body's length (not including the header and footer */
|
||||
protected int calculateWrittenLength() {
|
||||
int length = 0;
|
||||
length += 1; // participantType
|
||||
if (_participantType != PARTICIPANT_TYPE_ENDPOINT) {
|
||||
length += Hash.HASH_LENGTH;
|
||||
length += 4; // nextTunnelId
|
||||
}
|
||||
length += 4; // tunnelId
|
||||
length += 4; // duration;
|
||||
length += SessionKey.KEYSIZE_BYTES;
|
||||
length += 4*4; // max limits
|
||||
length += 1; // flags
|
||||
length += SigningPublicKey.KEYSIZE_BYTES;
|
||||
if (_participantType == PARTICIPANT_TYPE_GATEWAY)
|
||||
length += SigningPrivateKey.KEYSIZE_BYTES;
|
||||
if ( (_participantType == PARTICIPANT_TYPE_ENDPOINT)
|
||||
|| (_participantType == PARTICIPANT_TYPE_GATEWAY) )
|
||||
length += SessionKey.KEYSIZE_BYTES;
|
||||
_certificateCache = _certificate.toByteArray();
|
||||
length += _certificateCache.length;
|
||||
length += SessionTag.BYTE_LENGTH;
|
||||
length += SessionKey.KEYSIZE_BYTES;
|
||||
length += Hash.HASH_LENGTH; // nextRouter
|
||||
length += 4; // nextTunnel
|
||||
length += 2; // duration
|
||||
length += SessionKey.KEYSIZE_BYTES; // layerKey
|
||||
length += SessionKey.KEYSIZE_BYTES; // ivKey
|
||||
|
||||
if (_optionsCache == null)
|
||||
_optionsCache = DataHelper.toProperties(_options);
|
||||
length += _optionsCache.length;
|
||||
|
||||
length += Hash.HASH_LENGTH; // replyGateway
|
||||
length += 4; // replyTunnel
|
||||
length += Hash.HASH_LENGTH; // replyPeer
|
||||
length += SessionTag.BYTE_LENGTH; // replyTag
|
||||
length += SessionKey.KEYSIZE_BYTES; // replyKey
|
||||
length += 4; // nonce
|
||||
if (_certificateCache == null)
|
||||
_certificateCache = _certificate.toByteArray();
|
||||
length += _certificateCache.length;
|
||||
length += DataHelper.BOOLEAN_LENGTH;
|
||||
return length;
|
||||
}
|
||||
|
||||
/** write the message body to the output array, starting at the given index */
|
||||
protected int writeMessageBody(byte out[], int curIndex) throws I2NPMessageException {
|
||||
byte type[] = DataHelper.toLong(1, _participantType);
|
||||
out[curIndex++] = type[0];
|
||||
if (_participantType != PARTICIPANT_TYPE_ENDPOINT) {
|
||||
System.arraycopy(_nextRouter.getData(), 0, out, curIndex, Hash.HASH_LENGTH);
|
||||
curIndex += Hash.HASH_LENGTH;
|
||||
byte id[] = DataHelper.toLong(4, _nextTunnelId.getTunnelId());
|
||||
System.arraycopy(id, 0, out, curIndex, 4);
|
||||
curIndex += 4;
|
||||
}
|
||||
byte id[] = DataHelper.toLong(4, _tunnelId.getTunnelId());
|
||||
System.arraycopy(id, 0, out, curIndex, 4);
|
||||
curIndex += 4;
|
||||
byte duration[] = DataHelper.toLong(4, _tunnelDuration);
|
||||
System.arraycopy(duration, 0, out, curIndex, 4);
|
||||
curIndex += 4;
|
||||
System.arraycopy(_configKey.getKey().getData(), 0, out, curIndex, SessionKey.KEYSIZE_BYTES);
|
||||
curIndex += SessionKey.KEYSIZE_BYTES;
|
||||
protected int writeMessageBody(byte data[], int offset) throws I2NPMessageException {
|
||||
if (_nextRouter == null)
|
||||
System.arraycopy(INVALID_HASH.getData(), 0, data, offset, Hash.HASH_LENGTH);
|
||||
else
|
||||
System.arraycopy(_nextRouter.getData(), 0, data, offset, Hash.HASH_LENGTH);
|
||||
offset += Hash.HASH_LENGTH;
|
||||
|
||||
byte val[] = DataHelper.toLong(4, _maxPeakMessagesPerMin);
|
||||
System.arraycopy(val, 0, out, curIndex, 4);
|
||||
curIndex += 4;
|
||||
val = DataHelper.toLong(4, _maxAvgMessagesPerMin);
|
||||
System.arraycopy(val, 0, out, curIndex, 4);
|
||||
curIndex += 4;
|
||||
val = DataHelper.toLong(4, _maxPeakBytesPerMin);
|
||||
System.arraycopy(val, 0, out, curIndex, 4);
|
||||
curIndex += 4;
|
||||
val = DataHelper.toLong(4, _maxAvgBytesPerMin);
|
||||
System.arraycopy(val, 0, out, curIndex, 4);
|
||||
curIndex += 4;
|
||||
|
||||
long flags = getFlags();
|
||||
byte flag[] = DataHelper.toLong(1, flags);
|
||||
out[curIndex++] = flag[0];
|
||||
|
||||
System.arraycopy(_verificationPubKey.getKey().getData(), 0, out, curIndex, SigningPublicKey.KEYSIZE_BYTES);
|
||||
curIndex += SigningPublicKey.KEYSIZE_BYTES;
|
||||
if (_nextTunnelId == null)
|
||||
DataHelper.toLong(data, offset, 4, 0);
|
||||
else
|
||||
DataHelper.toLong(data, offset, 4, _nextTunnelId.getTunnelId());
|
||||
offset += 4;
|
||||
|
||||
if (_participantType == PARTICIPANT_TYPE_GATEWAY) {
|
||||
System.arraycopy(_verificationPrivKey.getKey().getData(), 0, out, curIndex, SigningPrivateKey.KEYSIZE_BYTES);
|
||||
curIndex += SigningPrivateKey.KEYSIZE_BYTES;
|
||||
}
|
||||
DataHelper.toLong(data, offset, 2, _durationSeconds);
|
||||
offset += 2;
|
||||
|
||||
if ( (_participantType == PARTICIPANT_TYPE_ENDPOINT) || (_participantType == PARTICIPANT_TYPE_GATEWAY) ) {
|
||||
System.arraycopy(_tunnelKey.getKey().getData(), 0, out, curIndex, SessionKey.KEYSIZE_BYTES);
|
||||
curIndex += SessionKey.KEYSIZE_BYTES;
|
||||
}
|
||||
System.arraycopy(_certificateCache, 0, out, curIndex, _certificateCache.length);
|
||||
curIndex += _certificateCache.length;
|
||||
System.arraycopy(_replyTag.getData(), 0, out, curIndex, SessionTag.BYTE_LENGTH);
|
||||
curIndex += SessionTag.BYTE_LENGTH;
|
||||
System.arraycopy(_replyKey.getData(), 0, out, curIndex, SessionKey.KEYSIZE_BYTES);
|
||||
curIndex += SessionKey.KEYSIZE_BYTES;
|
||||
id = DataHelper.toLong(4, _replyTunnel.getTunnelId());
|
||||
System.arraycopy(id, 0, out, curIndex, 4);
|
||||
curIndex += 4;
|
||||
System.arraycopy(_replyPeer.getData(), 0, out, curIndex, Hash.HASH_LENGTH);
|
||||
curIndex += Hash.HASH_LENGTH;
|
||||
return curIndex;
|
||||
System.arraycopy(_layerKey.getData(), 0, data, offset, SessionKey.KEYSIZE_BYTES);
|
||||
offset += SessionKey.KEYSIZE_BYTES;
|
||||
|
||||
System.arraycopy(_ivKey.getData(), 0, data, offset, SessionKey.KEYSIZE_BYTES);
|
||||
offset += SessionKey.KEYSIZE_BYTES;
|
||||
|
||||
if (_optionsCache == null)
|
||||
_optionsCache = DataHelper.toProperties(_options);
|
||||
System.arraycopy(_optionsCache, 0, data, offset, _optionsCache.length);
|
||||
offset += _optionsCache.length;
|
||||
|
||||
System.arraycopy(_replyGateway.getData(), 0, data, offset, Hash.HASH_LENGTH);
|
||||
offset += Hash.HASH_LENGTH;
|
||||
|
||||
DataHelper.toLong(data, offset, 4, _replyTunnel.getTunnelId());
|
||||
offset += 4;
|
||||
|
||||
System.arraycopy(_replyTag.getData(), 0, data, offset, SessionTag.BYTE_LENGTH);
|
||||
offset += SessionTag.BYTE_LENGTH;
|
||||
|
||||
System.arraycopy(_replyKey.getData(), 0, data, offset, SessionKey.KEYSIZE_BYTES);
|
||||
offset += SessionKey.KEYSIZE_BYTES;
|
||||
|
||||
DataHelper.toLong(data, offset, 4, _nonce);
|
||||
offset += 4;
|
||||
|
||||
if (_certificateCache == null)
|
||||
_certificateCache = _certificate.toByteArray();
|
||||
System.arraycopy(_certificateCache, 0, data, offset, _certificateCache.length);
|
||||
offset += _certificateCache.length;
|
||||
|
||||
DataHelper.toBoolean(data, offset, _isGateway);
|
||||
offset += DataHelper.BOOLEAN_LENGTH;
|
||||
|
||||
return offset;
|
||||
}
|
||||
|
||||
private boolean flagsIncludeDummy(long flags) {
|
||||
return (0 != (flags & FLAG_DUMMY));
|
||||
}
|
||||
private boolean flagsReorder(long flags) {
|
||||
return (0 != (flags & FLAG_REORDER));
|
||||
}
|
||||
|
||||
private long getFlags() {
|
||||
long val = 0L;
|
||||
if (getIncludeDummyTraffic())
|
||||
val = val | FLAG_DUMMY;
|
||||
if (getReorderMessages())
|
||||
val = val | FLAG_REORDER;
|
||||
return val;
|
||||
public byte[] toByteArray() {
|
||||
byte rv[] = super.toByteArray();
|
||||
if (rv == null)
|
||||
throw new RuntimeException("unable to toByteArray(): " + toString());
|
||||
return rv;
|
||||
}
|
||||
|
||||
public int getType() { return MESSAGE_TYPE; }
|
||||
|
||||
|
||||
public int hashCode() {
|
||||
return (int)(DataHelper.hashCode(getCertificate()) +
|
||||
DataHelper.hashCode(getConfigurationKey()) +
|
||||
DataHelper.hashCode(getNextRouter()) +
|
||||
DataHelper.hashCode(getNextTunnelId()) +
|
||||
DataHelper.hashCode(getReplyPeer()) +
|
||||
DataHelper.hashCode(getReplyTunnel()) +
|
||||
DataHelper.hashCode(getTunnelId()) +
|
||||
DataHelper.hashCode(getTunnelKey()) +
|
||||
DataHelper.hashCode(getVerificationPrivateKey()) +
|
||||
DataHelper.hashCode(getVerificationPublicKey()) +
|
||||
(getIncludeDummyTraffic() ? 1 : 0) +
|
||||
getMaxAvgBytesPerMin() +
|
||||
getMaxAvgMessagesPerMin() +
|
||||
getMaxPeakBytesPerMin() +
|
||||
getMaxPeakMessagesPerMin() +
|
||||
getParticipantType() +
|
||||
(getReorderMessages() ? 1 : 0) +
|
||||
getTunnelDurationSeconds());
|
||||
return DataHelper.hashCode(getNextRouter()) +
|
||||
DataHelper.hashCode(getNextTunnelId()) +
|
||||
DataHelper.hashCode(getReplyGateway()) +
|
||||
DataHelper.hashCode(getReplyTunnel());
|
||||
}
|
||||
|
||||
public boolean equals(Object object) {
|
||||
if ( (object != null) && (object instanceof TunnelCreateMessage) ) {
|
||||
TunnelCreateMessage msg = (TunnelCreateMessage)object;
|
||||
return DataHelper.eq(getCertificate(), msg.getCertificate()) &&
|
||||
DataHelper.eq(getConfigurationKey(), msg.getConfigurationKey()) &&
|
||||
DataHelper.eq(getNextRouter(), msg.getNextRouter()) &&
|
||||
DataHelper.eq(getNextTunnelId(), msg.getNextTunnelId()) &&
|
||||
DataHelper.eq(getReplyTag(), msg.getReplyTag()) &&
|
||||
DataHelper.eq(getReplyKey(), msg.getReplyKey()) &&
|
||||
DataHelper.eq(getReplyTunnel(), msg.getReplyTunnel()) &&
|
||||
DataHelper.eq(getReplyPeer(), msg.getReplyPeer()) &&
|
||||
DataHelper.eq(getTunnelId(), msg.getTunnelId()) &&
|
||||
DataHelper.eq(getTunnelKey(), msg.getTunnelKey()) &&
|
||||
DataHelper.eq(getVerificationPrivateKey(), msg.getVerificationPrivateKey()) &&
|
||||
DataHelper.eq(getVerificationPublicKey(), msg.getVerificationPublicKey()) &&
|
||||
(getIncludeDummyTraffic() == msg.getIncludeDummyTraffic()) &&
|
||||
(getMaxAvgBytesPerMin() == msg.getMaxAvgBytesPerMin()) &&
|
||||
(getMaxAvgMessagesPerMin() == msg.getMaxAvgMessagesPerMin()) &&
|
||||
(getMaxPeakBytesPerMin() == msg.getMaxPeakBytesPerMin()) &&
|
||||
(getMaxPeakMessagesPerMin() == msg.getMaxPeakMessagesPerMin()) &&
|
||||
(getParticipantType() == msg.getParticipantType()) &&
|
||||
(getReorderMessages() == msg.getReorderMessages()) &&
|
||||
(getTunnelDurationSeconds() == msg.getTunnelDurationSeconds());
|
||||
return DataHelper.eq(getNextRouter(), msg.getNextRouter()) &&
|
||||
DataHelper.eq(getNextTunnelId(), msg.getNextTunnelId()) &&
|
||||
DataHelper.eq(getReplyGateway(), msg.getReplyGateway()) &&
|
||||
DataHelper.eq(getReplyTunnel(), msg.getReplyTunnel());
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
@ -384,28 +263,13 @@ public class TunnelCreateMessage extends I2NPMessageImpl {
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer();
|
||||
buf.append("[TunnelCreateMessage: ");
|
||||
buf.append("\n\tParticipant Type: ").append(getParticipantType());
|
||||
buf.append("\n\tCertificate: ").append(getCertificate());
|
||||
buf.append("\n\tConfiguration Key: ").append(getConfigurationKey());
|
||||
buf.append("\n\tNext Router: ").append(getNextRouter());
|
||||
buf.append("\n\tNext Tunnel: ").append(getNextTunnelId());
|
||||
buf.append("\n\tReply Tag: ").append(getReplyTag());
|
||||
buf.append("\n\tReply Key: ").append(getReplyKey());
|
||||
buf.append("\n\tReply Tunnel: ").append(getReplyTunnel());
|
||||
buf.append("\n\tReply Peer: ").append(getReplyPeer());
|
||||
buf.append("\n\tTunnel ID: ").append(getTunnelId());
|
||||
buf.append("\n\tTunnel Key: ").append(getTunnelKey());
|
||||
buf.append("\n\tVerification Private Key: ").append(getVerificationPrivateKey());
|
||||
buf.append("\n\tVerification Public Key: ").append(getVerificationPublicKey());
|
||||
buf.append("\n\tInclude Dummy Traffic: ").append(getIncludeDummyTraffic());
|
||||
buf.append("\n\tMax Avg Bytes / Minute: ").append(getMaxAvgBytesPerMin());
|
||||
buf.append("\n\tMax Peak Bytes / Minute: ").append(getMaxPeakBytesPerMin());
|
||||
buf.append("\n\tMax Avg Messages / Minute: ").append(getMaxAvgMessagesPerMin());
|
||||
buf.append("\n\tMax Peak Messages / Minute: ").append(getMaxPeakMessagesPerMin());
|
||||
buf.append("\n\tReorder Messages: ").append(getReorderMessages());
|
||||
buf.append("\n\tTunnel Duration (seconds): ").append(getTunnelDurationSeconds());
|
||||
buf.append("\n\tReply Peer: ").append(getReplyGateway());
|
||||
buf.append("]");
|
||||
return buf.toString();
|
||||
}
|
||||
|
||||
|
||||
public int getType() { return MESSAGE_TYPE; }
|
||||
}
|
||||
|
@ -28,26 +28,22 @@ import net.i2p.util.Log;
|
||||
public class TunnelCreateStatusMessage extends I2NPMessageImpl {
|
||||
private final static Log _log = new Log(TunnelCreateStatusMessage.class);
|
||||
public final static int MESSAGE_TYPE = 7;
|
||||
private TunnelId _tunnelId;
|
||||
private TunnelId _receiveTunnelId;
|
||||
private int _status;
|
||||
private Hash _from;
|
||||
private long _nonce;
|
||||
|
||||
public final static int STATUS_SUCCESS = 0;
|
||||
public final static int STATUS_FAILED_DUPLICATE_ID = 1;
|
||||
public final static int STATUS_FAILED_OVERLOADED = 2;
|
||||
public final static int STATUS_FAILED_CERTIFICATE = 3;
|
||||
public final static int STATUS_FAILED_DELETED = 100;
|
||||
|
||||
public TunnelCreateStatusMessage(I2PAppContext context) {
|
||||
super(context);
|
||||
setTunnelId(null);
|
||||
setReceiveTunnelId(null);
|
||||
setStatus(-1);
|
||||
setFromHash(null);
|
||||
setNonce(-1);
|
||||
}
|
||||
|
||||
public TunnelId getTunnelId() { return _tunnelId; }
|
||||
public void setTunnelId(TunnelId id) {
|
||||
_tunnelId = id;
|
||||
public TunnelId getReceiveTunnelId() { return _receiveTunnelId; }
|
||||
public void setReceiveTunnelId(TunnelId id) {
|
||||
_receiveTunnelId = id;
|
||||
if ( (id != null) && (id.getTunnelId() <= 0) )
|
||||
throw new IllegalArgumentException("wtf, tunnelId " + id);
|
||||
}
|
||||
@ -55,63 +51,57 @@ public class TunnelCreateStatusMessage extends I2NPMessageImpl {
|
||||
public int getStatus() { return _status; }
|
||||
public void setStatus(int status) { _status = status; }
|
||||
|
||||
/**
|
||||
* Contains the SHA256 Hash of the RouterIdentity sending the message
|
||||
*/
|
||||
public Hash getFromHash() { return _from; }
|
||||
public void setFromHash(Hash from) { _from = from; }
|
||||
public long getNonce() { return _nonce; }
|
||||
public void setNonce(long nonce) { _nonce = nonce; }
|
||||
|
||||
public void readMessage(byte data[], int offset, int dataSize, int type) throws I2NPMessageException, IOException {
|
||||
if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message");
|
||||
int curIndex = offset;
|
||||
|
||||
_tunnelId = new TunnelId(DataHelper.fromLong(data, curIndex, 4));
|
||||
_receiveTunnelId = new TunnelId(DataHelper.fromLong(data, curIndex, 4));
|
||||
curIndex += 4;
|
||||
|
||||
if (_tunnelId.getTunnelId() <= 0)
|
||||
throw new I2NPMessageException("wtf, negative tunnelId? " + _tunnelId);
|
||||
if (_receiveTunnelId.getTunnelId() <= 0)
|
||||
throw new I2NPMessageException("wtf, negative tunnelId? " + _receiveTunnelId);
|
||||
|
||||
_status = (int)DataHelper.fromLong(data, curIndex, 1);
|
||||
curIndex++;
|
||||
byte peer[] = new byte[Hash.HASH_LENGTH];
|
||||
System.arraycopy(data, curIndex, peer, 0, Hash.HASH_LENGTH);
|
||||
curIndex += Hash.HASH_LENGTH;
|
||||
_from = new Hash(peer);
|
||||
|
||||
_nonce = DataHelper.fromLong(data, curIndex, 4);
|
||||
}
|
||||
|
||||
|
||||
/** calculate the message body's length (not including the header and footer */
|
||||
protected int calculateWrittenLength() {
|
||||
return 4 + 1 + Hash.HASH_LENGTH; // id + status + from
|
||||
return 4 + 1 + 4; // id + status + nonce
|
||||
}
|
||||
/** write the message body to the output array, starting at the given index */
|
||||
protected int writeMessageBody(byte out[], int curIndex) throws I2NPMessageException {
|
||||
if ( (_tunnelId == null) || (_from == null) ) throw new I2NPMessageException("Not enough data to write out");
|
||||
if (_tunnelId.getTunnelId() < 0) throw new I2NPMessageException("Negative tunnelId!? " + _tunnelId);
|
||||
if ( (_receiveTunnelId == null) || (_nonce <= 0) ) throw new I2NPMessageException("Not enough data to write out");
|
||||
if (_receiveTunnelId.getTunnelId() <= 0) throw new I2NPMessageException("Invalid tunnelId!? " + _receiveTunnelId);
|
||||
|
||||
byte id[] = DataHelper.toLong(4, _tunnelId.getTunnelId());
|
||||
System.arraycopy(id, 0, out, curIndex, 4);
|
||||
DataHelper.toLong(out, curIndex, 4, _receiveTunnelId.getTunnelId());
|
||||
curIndex += 4;
|
||||
DataHelper.toLong(out, curIndex, 1, _status);
|
||||
curIndex++;
|
||||
DataHelper.toLong(out, curIndex, 4, _nonce);
|
||||
curIndex += 4;
|
||||
byte status[] = DataHelper.toLong(1, _status);
|
||||
out[curIndex++] = status[0];
|
||||
System.arraycopy(_from.getData(), 0, out, curIndex, Hash.HASH_LENGTH);
|
||||
curIndex += Hash.HASH_LENGTH;
|
||||
return curIndex;
|
||||
}
|
||||
|
||||
public int getType() { return MESSAGE_TYPE; }
|
||||
|
||||
public int hashCode() {
|
||||
return DataHelper.hashCode(getTunnelId()) +
|
||||
return DataHelper.hashCode(getReceiveTunnelId()) +
|
||||
getStatus() +
|
||||
DataHelper.hashCode(getFromHash());
|
||||
(int)getNonce();
|
||||
}
|
||||
|
||||
public boolean equals(Object object) {
|
||||
if ( (object != null) && (object instanceof TunnelCreateStatusMessage) ) {
|
||||
TunnelCreateStatusMessage msg = (TunnelCreateStatusMessage)object;
|
||||
return DataHelper.eq(getTunnelId(),msg.getTunnelId()) &&
|
||||
DataHelper.eq(getFromHash(),msg.getFromHash()) &&
|
||||
return DataHelper.eq(getReceiveTunnelId(),msg.getReceiveTunnelId()) &&
|
||||
DataHelper.eq(getNonce(),msg.getNonce()) &&
|
||||
(getStatus() == msg.getStatus());
|
||||
} else {
|
||||
return false;
|
||||
@ -121,9 +111,9 @@ public class TunnelCreateStatusMessage extends I2NPMessageImpl {
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer();
|
||||
buf.append("[TunnelCreateStatusMessage: ");
|
||||
buf.append("\n\tTunnel ID: ").append(getTunnelId());
|
||||
buf.append("\n\tTunnel ID: ").append(getReceiveTunnelId());
|
||||
buf.append("\n\tStatus: ").append(getStatus());
|
||||
buf.append("\n\tFrom: ").append(getFromHash());
|
||||
buf.append("\n\tNonce: ").append(getNonce());
|
||||
buf.append("]");
|
||||
return buf.toString();
|
||||
}
|
||||
|
@ -10,6 +10,8 @@ package net.i2p.router;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Writer;
|
||||
import java.util.Collections;
|
||||
import java.util.Set;
|
||||
|
||||
import net.i2p.data.Destination;
|
||||
import net.i2p.data.Hash;
|
||||
@ -38,6 +40,9 @@ public abstract class ClientManagerFacade implements Service {
|
||||
* @param onFailedJob Job to run after the timeout passes without receiving authorization
|
||||
*/
|
||||
public abstract void requestLeaseSet(Destination dest, LeaseSet set, long timeout, Job onCreateJob, Job onFailedJob);
|
||||
|
||||
public abstract void requestLeaseSet(Hash dest, LeaseSet set);
|
||||
|
||||
/**
|
||||
* Instruct the client (or all clients) that they are under attack. This call
|
||||
* does not block.
|
||||
@ -67,6 +72,14 @@ public abstract class ClientManagerFacade implements Service {
|
||||
|
||||
public boolean verifyClientLiveliness() { return true; }
|
||||
|
||||
|
||||
/**
|
||||
* Return the list of locally connected clients
|
||||
*
|
||||
* @return set of Destination objects
|
||||
*/
|
||||
public Set listClients() { return Collections.EMPTY_SET; }
|
||||
|
||||
/**
|
||||
* Return the client's current config, or null if not connected
|
||||
*
|
||||
@ -96,4 +109,7 @@ class DummyClientManagerFacade extends ClientManagerFacade {
|
||||
public void messageDeliveryStatusUpdate(Destination fromDest, MessageId id, boolean delivered) {}
|
||||
|
||||
public SessionConfig getClientSessionConfig(Destination _dest) { return null; }
|
||||
|
||||
public void requestLeaseSet(Hash dest, LeaseSet set) {}
|
||||
|
||||
}
|
||||
|
@ -12,7 +12,6 @@ import java.util.Properties;
|
||||
|
||||
import net.i2p.client.I2PClient;
|
||||
|
||||
import net.i2p.router.message.OutboundClientMessageJob;
|
||||
import net.i2p.router.message.OutboundClientMessageOneShotJob;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
@ -59,10 +58,7 @@ public class ClientMessagePool {
|
||||
} else {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Adding message for remote delivery");
|
||||
if (isGuaranteed(msg))
|
||||
_context.jobQueue().addJob(new OutboundClientMessageJob(_context, msg));
|
||||
else
|
||||
_context.jobQueue().addJob(new OutboundClientMessageOneShotJob(_context, msg));
|
||||
_context.jobQueue().addJob(new OutboundClientMessageOneShotJob(_context, msg));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -16,160 +16,46 @@ import java.util.Properties;
|
||||
*
|
||||
*/
|
||||
public class ClientTunnelSettings {
|
||||
private int _numInbound;
|
||||
private int _numOutbound;
|
||||
private int _depthInbound;
|
||||
private int _depthOutbound;
|
||||
private long _msgsPerMinuteAvgInbound;
|
||||
private long _bytesPerMinuteAvgInbound;
|
||||
private long _msgsPerMinutePeakInbound;
|
||||
private long _bytesPerMinutePeakInbound;
|
||||
private boolean _includeDummyInbound;
|
||||
private boolean _includeDummyOutbound;
|
||||
private boolean _reorderInbound;
|
||||
private boolean _reorderOutbound;
|
||||
private long _inboundDuration;
|
||||
private boolean _enforceStrictMinimumLength;
|
||||
|
||||
public final static String PROP_NUM_INBOUND = "tunnels.numInbound";
|
||||
public final static String PROP_NUM_OUTBOUND = "tunnels.numOutbound";
|
||||
public final static String PROP_DEPTH_INBOUND = "tunnels.depthInbound";
|
||||
public final static String PROP_DEPTH_OUTBOUND = "tunnels.depthOutbound";
|
||||
public final static String PROP_MSGS_AVG = "tunnels.messagesPerMinuteAverage";
|
||||
public final static String PROP_MSGS_PEAK = "tunnels.messagesPerMinutePeak";
|
||||
public final static String PROP_BYTES_AVG = "tunnels.bytesPerMinuteAverage";
|
||||
public final static String PROP_BYTES_PEAK = "tunnels.bytesPerMinutePeak";
|
||||
public final static String PROP_DUMMY_INBOUND = "tunnels.includeDummyTrafficInbound";
|
||||
public final static String PROP_DUMMY_OUTBOUND = "tunnels.includeDummyTrafficOutbound";
|
||||
public final static String PROP_REORDER_INBOUND = "tunnels.reorderInboundMessages";
|
||||
public final static String PROP_REORDER_OUTBOUND = "tunnels.reoderOutboundMessages";
|
||||
public final static String PROP_DURATION = "tunnels.tunnelDuration";
|
||||
/**
|
||||
* if tunnels.strictMinimumLength=true then never accept a tunnel shorter than the client's
|
||||
* request, otherwise we'll try to meet that minimum, but if we don't have any that length,
|
||||
* we'll accept the longest we do have.
|
||||
*
|
||||
*/
|
||||
public final static String PROP_STRICT_MINIMUM_LENGTH = "tunnels.enforceStrictMinimumLength";
|
||||
|
||||
public final static int DEFAULT_NUM_INBOUND = 2;
|
||||
public final static int DEFAULT_NUM_OUTBOUND = 1;
|
||||
public final static int DEFAULT_DEPTH_INBOUND = 2;
|
||||
public final static int DEFAULT_DEPTH_OUTBOUND = 2;
|
||||
public final static long DEFAULT_MSGS_AVG = 0;
|
||||
public final static long DEFAULT_MSGS_PEAK = 0;
|
||||
public final static long DEFAULT_BYTES_AVG = 0;
|
||||
public final static long DEFAULT_BYTES_PEAK = 0;
|
||||
public final static boolean DEFAULT_DUMMY_INBOUND = false;
|
||||
public final static boolean DEFAULT_DUMMY_OUTBOUND = false;
|
||||
public final static boolean DEFAULT_REORDER_INBOUND = false;
|
||||
public final static boolean DEFAULT_REORDER_OUTBOUND = false;
|
||||
public final static long DEFAULT_DURATION = 10*60*1000;
|
||||
public final static boolean DEFAULT_STRICT_MINIMUM_LENGTH = true;
|
||||
private TunnelPoolSettings _inboundSettings;
|
||||
private TunnelPoolSettings _outboundSettings;
|
||||
|
||||
public ClientTunnelSettings() {
|
||||
_numInbound = DEFAULT_NUM_INBOUND;
|
||||
_numOutbound = DEFAULT_NUM_OUTBOUND;
|
||||
_depthInbound = DEFAULT_DEPTH_INBOUND;
|
||||
_depthOutbound = DEFAULT_DEPTH_OUTBOUND;
|
||||
_msgsPerMinuteAvgInbound = DEFAULT_MSGS_AVG;
|
||||
_bytesPerMinuteAvgInbound = DEFAULT_BYTES_AVG;
|
||||
_msgsPerMinutePeakInbound = DEFAULT_MSGS_PEAK;
|
||||
_bytesPerMinutePeakInbound = DEFAULT_BYTES_PEAK;
|
||||
_includeDummyInbound = DEFAULT_DUMMY_INBOUND;
|
||||
_includeDummyOutbound = DEFAULT_DUMMY_OUTBOUND;
|
||||
_reorderInbound = DEFAULT_REORDER_INBOUND;
|
||||
_reorderOutbound = DEFAULT_REORDER_OUTBOUND;
|
||||
_inboundDuration = DEFAULT_DURATION;
|
||||
_enforceStrictMinimumLength = DEFAULT_STRICT_MINIMUM_LENGTH;
|
||||
_inboundSettings = new TunnelPoolSettings();
|
||||
_inboundSettings.setIsInbound(true);
|
||||
_inboundSettings.setIsExploratory(false);
|
||||
_outboundSettings = new TunnelPoolSettings();
|
||||
_outboundSettings.setIsInbound(false);
|
||||
_outboundSettings.setIsExploratory(false);
|
||||
}
|
||||
|
||||
public int getNumInboundTunnels() { return _numInbound; }
|
||||
public int getNumOutboundTunnels() { return _numOutbound; }
|
||||
public int getDepthInbound() { return _depthInbound; }
|
||||
public int getDepthOutbound() { return _depthOutbound; }
|
||||
public long getMessagesPerMinuteInboundAverage() { return _msgsPerMinuteAvgInbound; }
|
||||
public long getMessagesPerMinuteInboundPeak() { return _msgsPerMinutePeakInbound; }
|
||||
public long getBytesPerMinuteInboundAverage() { return _bytesPerMinuteAvgInbound; }
|
||||
public long getBytesPerMinuteInboundPeak() { return _bytesPerMinutePeakInbound; }
|
||||
public boolean getIncludeDummyInbound() { return _includeDummyInbound; }
|
||||
public boolean getIncludeDummyOutbound() { return _includeDummyOutbound; }
|
||||
public boolean getReorderInbound() { return _reorderInbound; }
|
||||
public boolean getReorderOutbound() { return _reorderOutbound; }
|
||||
public long getInboundDuration() { return _inboundDuration; }
|
||||
public boolean getEnforceStrictMinimumLength() { return _enforceStrictMinimumLength; }
|
||||
|
||||
public void setNumInboundTunnels(int num) { _numInbound = num; }
|
||||
public void setNumOutboundTunnels(int num) { _numOutbound = num; }
|
||||
public void setEnforceStrictMinimumLength(boolean enforce) { _enforceStrictMinimumLength = enforce; }
|
||||
public TunnelPoolSettings getInboundSettings() { return _inboundSettings; }
|
||||
public void setInboundSettings(TunnelPoolSettings settings) { _inboundSettings = settings; }
|
||||
public TunnelPoolSettings getOutboundSettings() { return _outboundSettings; }
|
||||
public void setOutboundSettings(TunnelPoolSettings settings) { _outboundSettings = settings; }
|
||||
|
||||
public void readFromProperties(Properties props) {
|
||||
_numInbound = getInt(props.getProperty(PROP_NUM_INBOUND), DEFAULT_NUM_INBOUND);
|
||||
_numOutbound = getInt(props.getProperty(PROP_NUM_OUTBOUND), DEFAULT_NUM_OUTBOUND);
|
||||
_depthInbound = getInt(props.getProperty(PROP_DEPTH_INBOUND), DEFAULT_DEPTH_INBOUND);
|
||||
_depthOutbound = getInt(props.getProperty(PROP_DEPTH_OUTBOUND), DEFAULT_DEPTH_OUTBOUND);
|
||||
_msgsPerMinuteAvgInbound = getLong(props.getProperty(PROP_MSGS_AVG), DEFAULT_MSGS_AVG);
|
||||
_bytesPerMinuteAvgInbound = getLong(props.getProperty(PROP_MSGS_PEAK), DEFAULT_BYTES_AVG);
|
||||
_msgsPerMinutePeakInbound = getLong(props.getProperty(PROP_BYTES_AVG), DEFAULT_MSGS_PEAK);
|
||||
_bytesPerMinutePeakInbound = getLong(props.getProperty(PROP_BYTES_PEAK), DEFAULT_BYTES_PEAK);
|
||||
_includeDummyInbound = getBoolean(props.getProperty(PROP_DUMMY_INBOUND), DEFAULT_DUMMY_INBOUND);
|
||||
_includeDummyOutbound = getBoolean(props.getProperty(PROP_DUMMY_OUTBOUND), DEFAULT_DUMMY_OUTBOUND);
|
||||
_reorderInbound = getBoolean(props.getProperty(PROP_REORDER_INBOUND), DEFAULT_REORDER_INBOUND);
|
||||
_reorderOutbound = getBoolean(props.getProperty(PROP_REORDER_OUTBOUND), DEFAULT_REORDER_OUTBOUND);
|
||||
_inboundDuration = getLong(props.getProperty(PROP_DURATION), DEFAULT_DURATION);
|
||||
_enforceStrictMinimumLength = getBoolean(props.getProperty(PROP_STRICT_MINIMUM_LENGTH), DEFAULT_STRICT_MINIMUM_LENGTH);
|
||||
}
|
||||
_inboundSettings.readFromProperties("inbound.", props);
|
||||
_outboundSettings.readFromProperties("outbound.", props);
|
||||
}
|
||||
|
||||
public void writeToProperties(Properties props) {
|
||||
if (props == null) return;
|
||||
props.setProperty(PROP_NUM_INBOUND, ""+_numInbound);
|
||||
props.setProperty(PROP_NUM_OUTBOUND, ""+_numOutbound);
|
||||
props.setProperty(PROP_DEPTH_INBOUND, ""+_depthInbound);
|
||||
props.setProperty(PROP_DEPTH_OUTBOUND, ""+_depthOutbound);
|
||||
props.setProperty(PROP_MSGS_AVG, ""+_msgsPerMinuteAvgInbound);
|
||||
props.setProperty(PROP_MSGS_PEAK, ""+_msgsPerMinutePeakInbound);
|
||||
props.setProperty(PROP_BYTES_AVG, ""+_bytesPerMinuteAvgInbound);
|
||||
props.setProperty(PROP_BYTES_PEAK, ""+_bytesPerMinutePeakInbound);
|
||||
props.setProperty(PROP_DUMMY_INBOUND, (_includeDummyInbound ? Boolean.TRUE.toString() : Boolean.FALSE.toString()));
|
||||
props.setProperty(PROP_DUMMY_OUTBOUND, (_includeDummyOutbound ? Boolean.TRUE.toString() : Boolean.FALSE.toString()));
|
||||
props.setProperty(PROP_REORDER_INBOUND, (_reorderInbound ? Boolean.TRUE.toString() : Boolean.FALSE.toString()));
|
||||
props.setProperty(PROP_REORDER_OUTBOUND, (_reorderOutbound ? Boolean.TRUE.toString() : Boolean.FALSE.toString()));
|
||||
props.setProperty(PROP_DURATION, ""+_inboundDuration);
|
||||
props.setProperty(PROP_STRICT_MINIMUM_LENGTH, (_enforceStrictMinimumLength ? Boolean.TRUE.toString() : Boolean.FALSE.toString()));
|
||||
if (props == null) return;
|
||||
_inboundSettings.writeToProperties("inbound.", props);
|
||||
_outboundSettings.writeToProperties("outbound.", props);
|
||||
}
|
||||
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer();
|
||||
Properties p = new Properties();
|
||||
writeToProperties(p);
|
||||
buf.append("Client tunnel settings:\n");
|
||||
buf.append("====================================\n");
|
||||
for (Iterator iter = p.keySet().iterator(); iter.hasNext(); ) {
|
||||
String name = (String)iter.next();
|
||||
String val = p.getProperty(name);
|
||||
buf.append(name).append(" = [").append(val).append("]\n");
|
||||
}
|
||||
buf.append("====================================\n");
|
||||
return buf.toString();
|
||||
}
|
||||
|
||||
////
|
||||
////
|
||||
|
||||
private static final boolean getBoolean(String str, boolean defaultValue) {
|
||||
if (str == null) return defaultValue;
|
||||
String s = str.toUpperCase();
|
||||
boolean v = "TRUE".equals(s) || "YES".equals(s);
|
||||
return v;
|
||||
}
|
||||
private static final int getInt(String str, int defaultValue) { return (int)getLong(str, defaultValue); }
|
||||
private static final long getLong(String str, long defaultValue) {
|
||||
if (str == null) return defaultValue;
|
||||
try {
|
||||
long val = Long.parseLong(str);
|
||||
return val;
|
||||
} catch (NumberFormatException nfe) {
|
||||
return defaultValue;
|
||||
}
|
||||
StringBuffer buf = new StringBuffer();
|
||||
Properties p = new Properties();
|
||||
writeToProperties(p);
|
||||
buf.append("Client tunnel settings:\n");
|
||||
buf.append("====================================\n");
|
||||
for (Iterator iter = p.keySet().iterator(); iter.hasNext(); ) {
|
||||
String name = (String)iter.next();
|
||||
String val = p.getProperty(name);
|
||||
buf.append(name).append(" = [").append(val).append("]\n");
|
||||
}
|
||||
buf.append("====================================\n");
|
||||
return buf.toString();
|
||||
}
|
||||
}
|
||||
|
@ -8,14 +8,22 @@ package net.i2p.router;
|
||||
*
|
||||
*/
|
||||
|
||||
import java.io.Writer;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Date;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.RouterIdentity;
|
||||
import net.i2p.data.i2np.DeliveryStatusMessage;
|
||||
import net.i2p.data.i2np.I2NPMessage;
|
||||
import net.i2p.data.i2np.DatabaseSearchReplyMessage;
|
||||
import net.i2p.data.i2np.TunnelCreateStatusMessage;
|
||||
import net.i2p.data.i2np.TunnelDataMessage;
|
||||
import net.i2p.data.i2np.TunnelGatewayMessage;
|
||||
import net.i2p.util.I2PThread;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
/**
|
||||
@ -24,20 +32,46 @@ import net.i2p.util.Log;
|
||||
* periodically retrieve them for processing.
|
||||
*
|
||||
*/
|
||||
public class InNetMessagePool {
|
||||
public class InNetMessagePool implements Service {
|
||||
private Log _log;
|
||||
private RouterContext _context;
|
||||
private List _messages;
|
||||
private Map _handlerJobBuilders;
|
||||
private List _pendingDataMessages;
|
||||
private List _pendingDataMessagesFrom;
|
||||
private List _pendingGatewayMessages;
|
||||
private SharedShortCircuitDataJob _shortCircuitDataJob;
|
||||
private SharedShortCircuitGatewayJob _shortCircuitGatewayJob;
|
||||
private boolean _alive;
|
||||
private boolean _dispatchThreaded;
|
||||
|
||||
/**
|
||||
* If set to true, we will have two additional threads - one for dispatching
|
||||
* tunnel data messages, and another for dispatching tunnel gateway messages.
|
||||
* These will not use the JobQueue but will operate sequentially. Otherwise,
|
||||
* if this is set to false, the messages will be queued up in the jobQueue,
|
||||
* using the jobQueue's single thread.
|
||||
*
|
||||
*/
|
||||
public static final String PROP_DISPATCH_THREADED = "router.dispatchThreaded";
|
||||
public static final boolean DEFAULT_DISPATCH_THREADED = false;
|
||||
|
||||
public InNetMessagePool(RouterContext context) {
|
||||
_context = context;
|
||||
_messages = new ArrayList();
|
||||
_handlerJobBuilders = new HashMap();
|
||||
_pendingDataMessages = new ArrayList(16);
|
||||
_pendingDataMessagesFrom = new ArrayList(16);
|
||||
_pendingGatewayMessages = new ArrayList(16);
|
||||
_shortCircuitDataJob = new SharedShortCircuitDataJob(context);
|
||||
_shortCircuitGatewayJob = new SharedShortCircuitGatewayJob(context);
|
||||
_log = _context.logManager().getLog(InNetMessagePool.class);
|
||||
_alive = false;
|
||||
_context.statManager().createRateStat("inNetPool.dropped", "How often do we drop a message", "InNetPool", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("inNetPool.droppedDeliveryStatusDelay", "How long after a delivery status message is created do we receive it back again (for messages that are too slow to be handled)", "InNetPool", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("inNetPool.duplicate", "How often do we receive a duplicate message", "InNetPool", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("inNetPool.droppedTunnelCreateStatusMessage", "How often we drop a slow-to-arrive tunnel request response", "InNetPool", new long[] { 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("inNetPool.droppedDbLookupResponseMessage", "How often we drop a slow-to-arrive db search response", "InNetPool", new long[] { 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("pool.dispatchDataTime", "How long a tunnel dispatch takes", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("pool.dispatchGatewayTime", "How long a tunnel gateway dispatch takes", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
}
|
||||
|
||||
public HandlerJobBuilder registerHandlerJobBuilder(int i2npMessageType, HandlerJobBuilder builder) {
|
||||
@ -56,10 +90,8 @@ public class InNetMessagePool {
|
||||
* (though if the builder doesn't create a job, it is added to the pool)
|
||||
*
|
||||
*/
|
||||
public int add(InNetMessage msg) {
|
||||
I2NPMessage messageBody = msg.getMessage();
|
||||
msg.processingComplete();
|
||||
Date exp = messageBody.getMessageExpiration();
|
||||
public int add(I2NPMessage messageBody, RouterIdentity fromRouter, Hash fromRouterHash) {
|
||||
long exp = messageBody.getMessageExpiration();
|
||||
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Received inbound "
|
||||
@ -67,131 +99,298 @@ public class InNetMessagePool {
|
||||
+ " expiring on " + exp
|
||||
+ " of type " + messageBody.getClass().getName());
|
||||
|
||||
boolean valid = _context.messageValidator().validateMessage(messageBody.getUniqueId(), exp.getTime());
|
||||
if (!valid) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Duplicate message received [" + messageBody.getUniqueId()
|
||||
+ " expiring on " + exp + "]: " + messageBody.getClass().getName());
|
||||
_context.statManager().addRateData("inNetPool.dropped", 1, 0);
|
||||
_context.statManager().addRateData("inNetPool.duplicate", 1, 0);
|
||||
_context.messageHistory().droppedOtherMessage(messageBody);
|
||||
_context.messageHistory().messageProcessingError(messageBody.getUniqueId(),
|
||||
messageBody.getClass().getName(),
|
||||
"Duplicate/expired");
|
||||
return -1;
|
||||
} else {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Message received [" + messageBody.getUniqueId()
|
||||
+ " expiring on " + exp + "] is NOT a duplicate or exipired");
|
||||
}
|
||||
|
||||
int size = -1;
|
||||
int type = messageBody.getType();
|
||||
HandlerJobBuilder builder = (HandlerJobBuilder)_handlerJobBuilders.get(new Integer(type));
|
||||
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Add message to the inNetMessage pool - builder: " + builder
|
||||
+ " message class: " + messageBody.getClass().getName());
|
||||
|
||||
if (builder != null) {
|
||||
Job job = builder.createJob(messageBody, msg.getFromRouter(),
|
||||
msg.getFromRouterHash());
|
||||
if (job != null) {
|
||||
_context.jobQueue().addJob(job);
|
||||
synchronized (_messages) {
|
||||
size = _messages.size();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
List origMessages = _context.messageRegistry().getOriginalMessages(messageBody);
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Original messages for inbound message: " + origMessages.size());
|
||||
if (origMessages.size() > 1) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Orig: " + origMessages + " \nthe above are replies for: " + msg,
|
||||
new Exception("Multiple matches"));
|
||||
}
|
||||
|
||||
for (int i = 0; i < origMessages.size(); i++) {
|
||||
OutNetMessage omsg = (OutNetMessage)origMessages.get(i);
|
||||
ReplyJob job = omsg.getOnReplyJob();
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Original message [" + i + "] " + omsg.getReplySelector()
|
||||
+ " : " + omsg + ": reply job: " + job);
|
||||
|
||||
if (job != null) {
|
||||
job.setMessage(messageBody);
|
||||
_context.jobQueue().addJob(job);
|
||||
}
|
||||
}
|
||||
|
||||
if (origMessages.size() <= 0) {
|
||||
// not handled as a reply
|
||||
if (size == -1) {
|
||||
// was not handled via HandlerJobBuilder
|
||||
if (messageBody instanceof TunnelDataMessage) {
|
||||
// do not validate the message with the validator - the IV validator is sufficient
|
||||
} else {
|
||||
boolean valid = _context.messageValidator().validateMessage(messageBody.getUniqueId(), exp);
|
||||
if (!valid) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Duplicate message received [" + messageBody.getUniqueId()
|
||||
+ " expiring on " + exp + "]: " + messageBody.getClass().getName());
|
||||
_context.statManager().addRateData("inNetPool.dropped", 1, 0);
|
||||
_context.statManager().addRateData("inNetPool.duplicate", 1, 0);
|
||||
_context.messageHistory().droppedOtherMessage(messageBody);
|
||||
if (type == DeliveryStatusMessage.MESSAGE_TYPE) {
|
||||
long timeSinceSent = _context.clock().now() -
|
||||
((DeliveryStatusMessage)messageBody).getArrival().getTime();
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Dropping unhandled delivery status message created " + timeSinceSent + "ms ago: " + msg);
|
||||
_context.statManager().addRateData("inNetPool.droppedDeliveryStatusDelay", timeSinceSent, timeSinceSent);
|
||||
} else {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Message " + messageBody + " expiring on "
|
||||
+ (messageBody != null ? (messageBody.getMessageExpiration()+"") : " [unknown]")
|
||||
+ " was not handled by a HandlerJobBuilder - DROPPING: "
|
||||
+ msg, new Exception("DROPPED MESSAGE"));
|
||||
_context.statManager().addRateData("inNetPool.dropped", 1, 0);
|
||||
}
|
||||
_context.messageHistory().messageProcessingError(messageBody.getUniqueId(),
|
||||
messageBody.getClass().getName(),
|
||||
"Duplicate/expired");
|
||||
return -1;
|
||||
} else {
|
||||
String mtype = messageBody.getClass().getName();
|
||||
_context.messageHistory().receiveMessage(mtype, messageBody.getUniqueId(),
|
||||
messageBody.getMessageExpiration(),
|
||||
msg.getFromRouterHash(), true);
|
||||
return size;
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Message received [" + messageBody.getUniqueId()
|
||||
+ " expiring on " + exp + "] is NOT a duplicate or exipired");
|
||||
}
|
||||
}
|
||||
|
||||
boolean jobFound = false;
|
||||
int type = messageBody.getType();
|
||||
boolean allowMatches = true;
|
||||
|
||||
if (messageBody instanceof TunnelGatewayMessage) {
|
||||
shortCircuitTunnelGateway(messageBody);
|
||||
allowMatches = false;
|
||||
} else if (messageBody instanceof TunnelDataMessage) {
|
||||
shortCircuitTunnelData(messageBody, fromRouterHash);
|
||||
allowMatches = false;
|
||||
} else {
|
||||
HandlerJobBuilder builder = (HandlerJobBuilder)_handlerJobBuilders.get(new Integer(type));
|
||||
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Add message to the inNetMessage pool - builder: " + builder
|
||||
+ " message class: " + messageBody.getClass().getName());
|
||||
|
||||
if (builder != null) {
|
||||
Job job = builder.createJob(messageBody, fromRouter,
|
||||
fromRouterHash);
|
||||
if (job != null) {
|
||||
_context.jobQueue().addJob(job);
|
||||
jobFound = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (allowMatches) {
|
||||
List origMessages = _context.messageRegistry().getOriginalMessages(messageBody);
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Original messages for inbound message: " + origMessages.size());
|
||||
if (origMessages.size() > 1) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Orig: " + origMessages + " \nthe above are replies for: " + messageBody,
|
||||
new Exception("Multiple matches"));
|
||||
}
|
||||
|
||||
for (int i = 0; i < origMessages.size(); i++) {
|
||||
OutNetMessage omsg = (OutNetMessage)origMessages.get(i);
|
||||
ReplyJob job = omsg.getOnReplyJob();
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Original message [" + i + "] " + omsg.getReplySelector()
|
||||
+ " : " + omsg + ": reply job: " + job);
|
||||
|
||||
if (job != null) {
|
||||
job.setMessage(messageBody);
|
||||
_context.jobQueue().addJob(job);
|
||||
}
|
||||
}
|
||||
|
||||
if (origMessages.size() <= 0) {
|
||||
// not handled as a reply
|
||||
if (!jobFound) {
|
||||
// was not handled via HandlerJobBuilder
|
||||
_context.messageHistory().droppedOtherMessage(messageBody);
|
||||
if (type == DeliveryStatusMessage.MESSAGE_TYPE) {
|
||||
long timeSinceSent = _context.clock().now() -
|
||||
((DeliveryStatusMessage)messageBody).getArrival();
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Dropping unhandled delivery status message created " + timeSinceSent + "ms ago: " + messageBody);
|
||||
_context.statManager().addRateData("inNetPool.droppedDeliveryStatusDelay", timeSinceSent, timeSinceSent);
|
||||
} else if (type == TunnelCreateStatusMessage.MESSAGE_TYPE) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Dropping slow tunnel create request response: " + messageBody);
|
||||
_context.statManager().addRateData("inNetPool.droppedTunnelCreateStatusMessage", 1, 0);
|
||||
} else if (type == DatabaseSearchReplyMessage.MESSAGE_TYPE) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Dropping slow db lookup response: " + messageBody);
|
||||
_context.statManager().addRateData("inNetPool.droppedDbLookupResponseMessage", 1, 0);
|
||||
} else {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Message " + messageBody + " expiring on "
|
||||
+ (messageBody != null ? (messageBody.getMessageExpiration()+"") : " [unknown]")
|
||||
+ " was not handled by a HandlerJobBuilder - DROPPING: "
|
||||
+ messageBody, new Exception("DROPPED MESSAGE"));
|
||||
_context.statManager().addRateData("inNetPool.dropped", 1, 0);
|
||||
}
|
||||
} else {
|
||||
String mtype = messageBody.getClass().getName();
|
||||
_context.messageHistory().receiveMessage(mtype, messageBody.getUniqueId(),
|
||||
messageBody.getMessageExpiration(),
|
||||
fromRouterHash, true);
|
||||
return 0; // no queue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
String mtype = messageBody.getClass().getName();
|
||||
_context.messageHistory().receiveMessage(mtype, messageBody.getUniqueId(),
|
||||
messageBody.getMessageExpiration(),
|
||||
msg.getFromRouterHash(), true);
|
||||
return size;
|
||||
fromRouterHash, true);
|
||||
return 0; // no queue
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove up to maxNumMessages InNetMessages from the pool and return them.
|
||||
*
|
||||
*/
|
||||
public List getNext(int maxNumMessages) {
|
||||
ArrayList msgs = new ArrayList(maxNumMessages);
|
||||
synchronized (_messages) {
|
||||
for (int i = 0; (i < maxNumMessages) && (_messages.size() > 0); i++)
|
||||
msgs.add(_messages.remove(0));
|
||||
// the following short circuits the tunnel dispatching - i'm not sure whether
|
||||
// we'll want to run the dispatching in jobs or whether it shuold go inline with
|
||||
// others and/or on other threads (e.g. transport threads). lets try 'em both.
|
||||
|
||||
private void shortCircuitTunnelGateway(I2NPMessage messageBody) {
|
||||
if (false) {
|
||||
doShortCircuitTunnelGateway(messageBody);
|
||||
} else {
|
||||
synchronized (_pendingGatewayMessages) {
|
||||
_pendingGatewayMessages.add(messageBody);
|
||||
_pendingGatewayMessages.notifyAll();
|
||||
}
|
||||
if (!_dispatchThreaded)
|
||||
_context.jobQueue().addJob(_shortCircuitGatewayJob);
|
||||
}
|
||||
return msgs;
|
||||
}
|
||||
private void doShortCircuitTunnelGateway(I2NPMessage messageBody) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Shortcut dispatch tunnelGateway message " + messageBody);
|
||||
long before = _context.clock().now();
|
||||
_context.tunnelDispatcher().dispatch((TunnelGatewayMessage)messageBody);
|
||||
long dispatchTime = _context.clock().now() - before;
|
||||
_context.statManager().addRateData("tunnel.dispatchGatewayTime", dispatchTime, dispatchTime);
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve the next message
|
||||
*
|
||||
*/
|
||||
public InNetMessage getNext() {
|
||||
synchronized (_messages) {
|
||||
if (_messages.size() <= 0) return null;
|
||||
return (InNetMessage)_messages.remove(0);
|
||||
private void shortCircuitTunnelData(I2NPMessage messageBody, Hash from) {
|
||||
if (false) {
|
||||
doShortCircuitTunnelData(messageBody, from);
|
||||
} else {
|
||||
synchronized (_pendingDataMessages) {
|
||||
_pendingDataMessages.add(messageBody);
|
||||
_pendingDataMessagesFrom.add(from);
|
||||
_pendingDataMessages.notifyAll();
|
||||
//_context.jobQueue().addJob(new ShortCircuitDataJob(_context, messageBody, from));
|
||||
}
|
||||
if (!_dispatchThreaded)
|
||||
_context.jobQueue().addJob(_shortCircuitDataJob);
|
||||
}
|
||||
}
|
||||
private void doShortCircuitTunnelData(I2NPMessage messageBody, Hash from) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Shortcut dispatch tunnelData message " + messageBody);
|
||||
_context.tunnelDispatcher().dispatch((TunnelDataMessage)messageBody, from);
|
||||
}
|
||||
|
||||
public void renderStatusHTML(Writer out) {}
|
||||
public void restart() {
|
||||
shutdown();
|
||||
try { Thread.sleep(100); } catch (InterruptedException ie) {}
|
||||
startup();
|
||||
}
|
||||
public void shutdown() {
|
||||
_alive = false;
|
||||
synchronized (_pendingDataMessages) {
|
||||
_pendingDataMessages.clear();
|
||||
_pendingDataMessagesFrom.clear();
|
||||
_pendingDataMessages.notifyAll();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve the size of the pool
|
||||
*
|
||||
*/
|
||||
public int getCount() {
|
||||
synchronized (_messages) {
|
||||
return _messages.size();
|
||||
public void startup() {
|
||||
_alive = true;
|
||||
_dispatchThreaded = DEFAULT_DISPATCH_THREADED;
|
||||
String threadedStr = _context.getProperty(PROP_DISPATCH_THREADED);
|
||||
if (threadedStr != null) {
|
||||
_dispatchThreaded = Boolean.valueOf(threadedStr).booleanValue();
|
||||
}
|
||||
if (_dispatchThreaded) {
|
||||
I2PThread data = new I2PThread(new TunnelDataDispatcher(), "Tunnel data dispatcher");
|
||||
data.setDaemon(true);
|
||||
data.start();
|
||||
I2PThread gw = new I2PThread(new TunnelGatewayDispatcher(), "Tunnel gateway dispatcher");
|
||||
gw.setDaemon(true);
|
||||
gw.start();
|
||||
}
|
||||
}
|
||||
|
||||
private class SharedShortCircuitDataJob extends JobImpl {
|
||||
public SharedShortCircuitDataJob(RouterContext ctx) {
|
||||
super(ctx);
|
||||
}
|
||||
public String getName() { return "Dispatch tunnel participant message"; }
|
||||
public void runJob() {
|
||||
int remaining = 0;
|
||||
I2NPMessage msg = null;
|
||||
Hash from = null;
|
||||
synchronized (_pendingDataMessages) {
|
||||
if (_pendingDataMessages.size() > 0) {
|
||||
msg = (I2NPMessage)_pendingDataMessages.remove(0);
|
||||
from = (Hash)_pendingDataMessagesFrom.remove(0);
|
||||
}
|
||||
remaining = _pendingDataMessages.size();
|
||||
}
|
||||
if (msg != null)
|
||||
doShortCircuitTunnelData(msg, from);
|
||||
if (remaining > 0)
|
||||
getContext().jobQueue().addJob(SharedShortCircuitDataJob.this);
|
||||
}
|
||||
}
|
||||
private class SharedShortCircuitGatewayJob extends JobImpl {
|
||||
public SharedShortCircuitGatewayJob(RouterContext ctx) {
|
||||
super(ctx);
|
||||
}
|
||||
public String getName() { return "Dispatch tunnel gateway message"; }
|
||||
public void runJob() {
|
||||
I2NPMessage msg = null;
|
||||
int remaining = 0;
|
||||
synchronized (_pendingGatewayMessages) {
|
||||
if (_pendingGatewayMessages.size() > 0)
|
||||
msg = (I2NPMessage)_pendingGatewayMessages.remove(0);
|
||||
remaining = _pendingGatewayMessages.size();
|
||||
}
|
||||
if (msg != null)
|
||||
doShortCircuitTunnelGateway(msg);
|
||||
if (remaining > 0)
|
||||
getContext().jobQueue().addJob(SharedShortCircuitGatewayJob.this);
|
||||
}
|
||||
}
|
||||
|
||||
private class TunnelGatewayDispatcher implements Runnable {
|
||||
public void run() {
|
||||
while (_alive) {
|
||||
I2NPMessage msg = null;
|
||||
try {
|
||||
synchronized (_pendingGatewayMessages) {
|
||||
if (_pendingGatewayMessages.size() <= 0)
|
||||
_pendingGatewayMessages.wait();
|
||||
else
|
||||
msg = (I2NPMessage)_pendingGatewayMessages.remove(0);
|
||||
}
|
||||
if (msg != null) {
|
||||
long before = _context.clock().now();
|
||||
doShortCircuitTunnelGateway(msg);
|
||||
long elapsed = _context.clock().now() - before;
|
||||
_context.statManager().addRateData("pool.dispatchGatewayTime", elapsed, elapsed);
|
||||
}
|
||||
} catch (InterruptedException ie) {
|
||||
|
||||
} catch (OutOfMemoryError oome) {
|
||||
throw oome;
|
||||
} catch (Exception e) {
|
||||
if (_log.shouldLog(Log.CRIT))
|
||||
_log.log(Log.CRIT, "Error in the tunnel gateway dispatcher", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
private class TunnelDataDispatcher implements Runnable {
|
||||
public void run() {
|
||||
while (_alive) {
|
||||
I2NPMessage msg = null;
|
||||
Hash from = null;
|
||||
try {
|
||||
synchronized (_pendingDataMessages) {
|
||||
if (_pendingDataMessages.size() <= 0) {
|
||||
_pendingDataMessages.wait();
|
||||
} else {
|
||||
msg = (I2NPMessage)_pendingDataMessages.remove(0);
|
||||
from = (Hash)_pendingDataMessagesFrom.remove(0);
|
||||
}
|
||||
}
|
||||
if (msg != null) {
|
||||
long before = _context.clock().now();
|
||||
doShortCircuitTunnelData(msg, from);
|
||||
long elapsed = _context.clock().now() - before;
|
||||
_context.statManager().addRateData("pool.dispatchDataTime", elapsed, elapsed);
|
||||
}
|
||||
} catch (InterruptedException ie) {
|
||||
|
||||
} catch (OutOfMemoryError oome) {
|
||||
throw oome;
|
||||
} catch (Exception e) {
|
||||
if (_log.shouldLog(Log.CRIT))
|
||||
_log.log(Log.CRIT, "Error in the tunnel data dispatcher", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -12,15 +12,13 @@ import java.io.IOException;
|
||||
import java.io.Writer;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Date;
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.SortedMap;
|
||||
import java.util.TreeMap;
|
||||
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.router.networkdb.HandleDatabaseLookupMessageJob;
|
||||
import net.i2p.router.tunnelmanager.HandleTunnelCreateMessageJob;
|
||||
import net.i2p.router.tunnelmanager.RequestTunnelJob;
|
||||
import net.i2p.util.Clock;
|
||||
import net.i2p.util.I2PThread;
|
||||
import net.i2p.util.Log;
|
||||
@ -177,6 +175,13 @@ public class JobQueue {
|
||||
return;
|
||||
}
|
||||
|
||||
public void removeJob(Job job) {
|
||||
synchronized (_jobLock) {
|
||||
_readyJobs.remove(job);
|
||||
_timedJobs.remove(job);
|
||||
}
|
||||
}
|
||||
|
||||
public void timingUpdated() {
|
||||
synchronized (_jobLock) {
|
||||
_jobLock.notifyAll();
|
||||
@ -217,13 +222,6 @@ public class JobQueue {
|
||||
if (cls == HandleDatabaseLookupMessageJob.class)
|
||||
return true;
|
||||
|
||||
// tunnels are a bitch, but its dropped() builds a pair of fake ones just in case
|
||||
if (cls == RequestTunnelJob.class)
|
||||
return true;
|
||||
|
||||
// if we're already this loaded, dont take more tunnels
|
||||
if (cls == HandleTunnelCreateMessageJob.class)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
@ -624,7 +622,9 @@ public class JobQueue {
|
||||
buf.append("# ready/waiting jobs: ").append(readyJobs.size()).append(" <i>(lots of these mean there's likely a big problem)</i><ol>\n");
|
||||
for (int i = 0; i < readyJobs.size(); i++) {
|
||||
Job j = (Job)readyJobs.get(i);
|
||||
buf.append("<li> [waiting ").append(now-j.getTiming().getStartAfter()).append("ms]: ");
|
||||
buf.append("<li> [waiting ");
|
||||
buf.append(DataHelper.formatDuration(now-j.getTiming().getStartAfter()));
|
||||
buf.append("]: ");
|
||||
buf.append(j.toString()).append("</li>\n");
|
||||
}
|
||||
buf.append("</ol>\n");
|
||||
@ -638,8 +638,9 @@ public class JobQueue {
|
||||
}
|
||||
for (Iterator iter = ordered.values().iterator(); iter.hasNext(); ) {
|
||||
Job j = (Job)iter.next();
|
||||
buf.append("<li>").append(j.getName()).append(" @ ");
|
||||
buf.append(new Date(j.getTiming().getStartAfter())).append("</li>\n");
|
||||
long time = j.getTiming().getStartAfter() - now;
|
||||
buf.append("<li>").append(j.getName()).append(" in ");
|
||||
buf.append(DataHelper.formatDuration(time)).append("</li>\n");
|
||||
}
|
||||
buf.append("</ol>\n");
|
||||
|
||||
|
@ -20,6 +20,7 @@ import java.util.Set;
|
||||
import net.i2p.data.DataFormatException;
|
||||
import net.i2p.data.DataStructure;
|
||||
import net.i2p.data.Destination;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.PrivateKey;
|
||||
import net.i2p.data.PublicKey;
|
||||
import net.i2p.data.SigningPrivateKey;
|
||||
@ -97,7 +98,7 @@ public class KeyManager {
|
||||
_log.info("Registering keys for destination " + dest.calculateHash().toBase64());
|
||||
LeaseSetKeys keys = new LeaseSetKeys(dest, leaseRevocationPrivateKey, endpointDecryptionKey);
|
||||
synchronized (_leaseSetKeys) {
|
||||
_leaseSetKeys.put(dest, keys);
|
||||
_leaseSetKeys.put(dest.calculateHash(), keys);
|
||||
}
|
||||
if (dest != null)
|
||||
queueWrite();
|
||||
@ -116,7 +117,7 @@ public class KeyManager {
|
||||
_log.info("Unregistering keys for destination " + dest.calculateHash().toBase64());
|
||||
LeaseSetKeys rv = null;
|
||||
synchronized (_leaseSetKeys) {
|
||||
rv = (LeaseSetKeys)_leaseSetKeys.remove(dest);
|
||||
rv = (LeaseSetKeys)_leaseSetKeys.remove(dest.calculateHash());
|
||||
}
|
||||
if (dest != null)
|
||||
queueWrite();
|
||||
@ -124,6 +125,9 @@ public class KeyManager {
|
||||
}
|
||||
|
||||
public LeaseSetKeys getKeys(Destination dest) {
|
||||
return getKeys(dest.calculateHash());
|
||||
}
|
||||
public LeaseSetKeys getKeys(Hash dest) {
|
||||
synchronized (_leaseSetKeys) {
|
||||
return (LeaseSetKeys)_leaseSetKeys.get(dest);
|
||||
}
|
||||
|
@ -51,7 +51,7 @@ public class MessageHistory {
|
||||
_fmt.setTimeZone(TimeZone.getTimeZone("GMT"));
|
||||
_reinitializeJob = new ReinitializeJob();
|
||||
_writeJob = new WriteJob();
|
||||
_submitMessageHistoryJob = new SubmitMessageHistoryJob(_context);
|
||||
//_submitMessageHistoryJob = new SubmitMessageHistoryJob(_context);
|
||||
initialize(true);
|
||||
}
|
||||
|
||||
@ -103,8 +103,8 @@ public class MessageHistory {
|
||||
updateSettings();
|
||||
addEntry(getPrefix() + "** Router initialized (started up or changed identities)");
|
||||
_context.jobQueue().addJob(_writeJob);
|
||||
_submitMessageHistoryJob.getTiming().setStartAfter(_context.clock().now() + 2*60*1000);
|
||||
_context.jobQueue().addJob(_submitMessageHistoryJob);
|
||||
//_submitMessageHistoryJob.getTiming().setStartAfter(_context.clock().now() + 2*60*1000);
|
||||
//_context.jobQueue().addJob(_submitMessageHistoryJob);
|
||||
}
|
||||
}
|
||||
|
||||
@ -163,7 +163,7 @@ public class MessageHistory {
|
||||
buf.append("receive tunnel create [").append(createTunnel.getTunnelId()).append("] ");
|
||||
if (nextPeer != null)
|
||||
buf.append("(next [").append(getName(nextPeer)).append("]) ");
|
||||
buf.append("ok? ").append(ok).append(" expiring on [").append(getTime(expire)).append("]");
|
||||
buf.append("ok? ").append(ok).append(" expiring on [").append(getTime(expire.getTime())).append("]");
|
||||
addEntry(buf.toString());
|
||||
}
|
||||
|
||||
@ -178,17 +178,7 @@ public class MessageHistory {
|
||||
if (tunnel == null) return;
|
||||
StringBuffer buf = new StringBuffer(128);
|
||||
buf.append(getPrefix());
|
||||
buf.append("joining tunnel [").append(tunnel.getTunnelId().getTunnelId()).append("] as [").append(state).append("] ");
|
||||
buf.append(" (next: ");
|
||||
TunnelInfo cur = tunnel;
|
||||
while (cur.getNextHopInfo() != null) {
|
||||
buf.append('[').append(getName(cur.getNextHopInfo().getThisHop()));
|
||||
buf.append("], ");
|
||||
cur = cur.getNextHopInfo();
|
||||
}
|
||||
if (cur.getNextHop() != null)
|
||||
buf.append('[').append(getName(cur.getNextHop())).append(']');
|
||||
buf.append(") expiring on [").append(getTime(new Date(tunnel.getSettings().getExpiration()))).append("]");
|
||||
buf.append("joining tunnel [").append(tunnel.getReceiveTunnelId(0).getTunnelId()).append("] as [").append(state).append("] ");
|
||||
addEntry(buf.toString());
|
||||
}
|
||||
|
||||
@ -218,19 +208,7 @@ public class MessageHistory {
|
||||
if (tunnel == null) return;
|
||||
StringBuffer buf = new StringBuffer(128);
|
||||
buf.append(getPrefix());
|
||||
buf.append("tunnel ").append(tunnel.getTunnelId().getTunnelId()).append(" tested ok after ").append(timeToTest).append("ms (containing ");
|
||||
TunnelInfo cur = tunnel;
|
||||
while (cur != null) {
|
||||
buf.append('[').append(getName(cur.getThisHop())).append("], ");
|
||||
if (cur.getNextHopInfo() != null) {
|
||||
cur = cur.getNextHopInfo();
|
||||
} else {
|
||||
if (cur.getNextHop() != null)
|
||||
buf.append('[').append(getName(cur.getNextHop())).append(']');
|
||||
cur = null;
|
||||
}
|
||||
}
|
||||
buf.append(')');
|
||||
buf.append("tunnel ").append(tunnel).append(" tested ok after ").append(timeToTest).append("ms");
|
||||
addEntry(buf.toString());
|
||||
}
|
||||
|
||||
@ -278,7 +256,7 @@ public class MessageHistory {
|
||||
buf.append(getPrefix());
|
||||
buf.append("dropped message ").append(msgId).append(" for unknown tunnel [").append(id.getTunnelId());
|
||||
buf.append("] from [").append(getName(from)).append("]").append(" expiring on ");
|
||||
buf.append(getTime(expiration));
|
||||
buf.append(getTime(expiration.getTime()));
|
||||
addEntry(buf.toString());
|
||||
}
|
||||
|
||||
@ -308,7 +286,7 @@ public class MessageHistory {
|
||||
buf.append("timed out waiting for a reply to [").append(sentMessage.getMessageType());
|
||||
buf.append("] [").append(sentMessage.getMessageId()).append("] expiring on [");
|
||||
if (sentMessage != null)
|
||||
buf.append(getTime(new Date(sentMessage.getReplySelector().getExpiration())));
|
||||
buf.append(getTime(sentMessage.getReplySelector().getExpiration()));
|
||||
buf.append("] ").append(sentMessage.getReplySelector().toString());
|
||||
addEntry(buf.toString());
|
||||
}
|
||||
@ -338,8 +316,9 @@ public class MessageHistory {
|
||||
* @param peer router that the message was sent to
|
||||
* @param sentOk whether the message was sent successfully
|
||||
*/
|
||||
public void sendMessage(String messageType, long messageId, Date expiration, Hash peer, boolean sentOk) {
|
||||
public void sendMessage(String messageType, long messageId, long expiration, Hash peer, boolean sentOk) {
|
||||
if (!_doLog) return;
|
||||
if (true) return;
|
||||
StringBuffer buf = new StringBuffer(256);
|
||||
buf.append(getPrefix());
|
||||
buf.append("send [").append(messageType).append("] message [").append(messageId).append("] ");
|
||||
@ -363,8 +342,9 @@ public class MessageHistory {
|
||||
* @param isValid whether the message is valid (non duplicates, etc)
|
||||
*
|
||||
*/
|
||||
public void receiveMessage(String messageType, long messageId, Date expiration, Hash from, boolean isValid) {
|
||||
public void receiveMessage(String messageType, long messageId, long expiration, Hash from, boolean isValid) {
|
||||
if (!_doLog) return;
|
||||
if (true) return;
|
||||
StringBuffer buf = new StringBuffer(256);
|
||||
buf.append(getPrefix());
|
||||
buf.append("receive [").append(messageType).append("] with id [").append(messageId).append("] ");
|
||||
@ -376,7 +356,7 @@ public class MessageHistory {
|
||||
//_log.warn("ReceiveMessage tunnel message ["+messageId+"]", new Exception("Receive tunnel"));
|
||||
}
|
||||
}
|
||||
public void receiveMessage(String messageType, long messageId, Date expiration, boolean isValid) {
|
||||
public void receiveMessage(String messageType, long messageId, long expiration, boolean isValid) {
|
||||
receiveMessage(messageType, messageId, expiration, null, isValid);
|
||||
}
|
||||
|
||||
@ -424,6 +404,55 @@ public class MessageHistory {
|
||||
addEntry(buf.toString());
|
||||
}
|
||||
|
||||
public void receiveTunnelFragment(long messageId, int fragmentId) {
|
||||
if (!_doLog) return;
|
||||
if (messageId == -1) throw new IllegalArgumentException("why are you -1?");
|
||||
StringBuffer buf = new StringBuffer(48);
|
||||
buf.append(getPrefix());
|
||||
buf.append("Receive fragment ").append(fragmentId).append(" in ").append(messageId);
|
||||
addEntry(buf.toString());
|
||||
}
|
||||
public void receiveTunnelFragmentComplete(long messageId) {
|
||||
if (!_doLog) return;
|
||||
if (messageId == -1) throw new IllegalArgumentException("why are you -1?");
|
||||
StringBuffer buf = new StringBuffer(48);
|
||||
buf.append(getPrefix());
|
||||
buf.append("Receive fragmented message completely: ").append(messageId);
|
||||
addEntry(buf.toString());
|
||||
}
|
||||
public void droppedFragmentedMessage(long messageId) {
|
||||
if (!_doLog) return;
|
||||
if (messageId == -1) throw new IllegalArgumentException("why are you -1?");
|
||||
StringBuffer buf = new StringBuffer(48);
|
||||
buf.append(getPrefix());
|
||||
buf.append("Fragmented message dropped: ").append(messageId);
|
||||
addEntry(buf.toString());
|
||||
}
|
||||
public void fragmentMessage(long messageId, int numFragments) {
|
||||
if (!_doLog) return;
|
||||
if (messageId == -1) throw new IllegalArgumentException("why are you -1?");
|
||||
StringBuffer buf = new StringBuffer(48);
|
||||
buf.append(getPrefix());
|
||||
buf.append("Break message ").append(messageId).append(" into fragments: ").append(numFragments);
|
||||
addEntry(buf.toString());
|
||||
}
|
||||
public void droppedTunnelDataMessageUnknown(long msgId, long tunnelId) {
|
||||
if (!_doLog) return;
|
||||
if (msgId == -1) throw new IllegalArgumentException("why are you -1?");
|
||||
StringBuffer buf = new StringBuffer(48);
|
||||
buf.append(getPrefix());
|
||||
buf.append("Dropped data message ").append(msgId).append(" for unknown tunnel ").append(tunnelId);
|
||||
addEntry(buf.toString());
|
||||
}
|
||||
public void droppedTunnelGatewayMessageUnknown(long msgId, long tunnelId) {
|
||||
if (!_doLog) return;
|
||||
if (msgId == -1) throw new IllegalArgumentException("why are you -1?");
|
||||
StringBuffer buf = new StringBuffer(48);
|
||||
buf.append(getPrefix());
|
||||
buf.append("Dropped gateway message ").append(msgId).append(" for unknown tunnel ").append(tunnelId);
|
||||
addEntry(buf.toString());
|
||||
}
|
||||
|
||||
/**
|
||||
* Prettify the hash by doing a base64 and returning the first 6 characters
|
||||
*
|
||||
@ -437,14 +466,14 @@ public class MessageHistory {
|
||||
|
||||
private final String getPrefix() {
|
||||
StringBuffer buf = new StringBuffer(48);
|
||||
buf.append(getTime(new Date(_context.clock().now())));
|
||||
buf.append(getTime(_context.clock().now()));
|
||||
buf.append(' ').append(_localIdent).append(": ");
|
||||
return buf.toString();
|
||||
}
|
||||
|
||||
private final String getTime(Date when) {
|
||||
private final String getTime(long when) {
|
||||
synchronized (_fmt) {
|
||||
return _fmt.format(when);
|
||||
return _fmt.format(new Date(when));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -5,6 +5,7 @@ import java.util.Iterator;
|
||||
import java.util.Set;
|
||||
import java.util.TreeMap;
|
||||
|
||||
import net.i2p.util.DecayingBloomFilter;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
/**
|
||||
@ -18,25 +19,15 @@ import net.i2p.util.Log;
|
||||
public class MessageValidator {
|
||||
private Log _log;
|
||||
private RouterContext _context;
|
||||
/**
|
||||
* Expiration date (as a Long) to message id (as a Long).
|
||||
* The expiration date (key) must be unique, so on collision, increment the value.
|
||||
* This keeps messageIds around longer than they need to be, but hopefully not by much ;)
|
||||
*
|
||||
*/
|
||||
private TreeMap _receivedIdExpirations;
|
||||
/** Message id (as a Long) */
|
||||
private Set _receivedIds;
|
||||
/** synchronize on this before adjusting the received id data */
|
||||
private Object _receivedIdLock;
|
||||
private DecayingBloomFilter _filter;
|
||||
|
||||
|
||||
public MessageValidator(RouterContext context) {
|
||||
_log = context.logManager().getLog(MessageValidator.class);
|
||||
_receivedIdExpirations = new TreeMap();
|
||||
_receivedIds = new HashSet(256);
|
||||
_receivedIdLock = new Object();
|
||||
_filter = null;
|
||||
_context = context;
|
||||
context.statManager().createRateStat("router.duplicateMessageId", "Note that a duplicate messageId was received", "Router",
|
||||
new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
}
|
||||
|
||||
|
||||
@ -51,12 +42,17 @@ public class MessageValidator {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Rejecting message " + messageId + " because it expired " + (now-expiration) + "ms ago");
|
||||
return false;
|
||||
} else if (now + 4*Router.CLOCK_FUDGE_FACTOR < expiration) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Rejecting message " + messageId + " because it will expire too far in the future (" + (expiration-now) + "ms)");
|
||||
return false;
|
||||
}
|
||||
|
||||
boolean isDuplicate = noteReception(messageId, expiration);
|
||||
if (isDuplicate) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Rejecting message " + messageId + " because it is a duplicate", new Exception("Duplicate origin"));
|
||||
_context.statManager().addRateData("router.duplicateMessageId", 1, 0);
|
||||
return false;
|
||||
} else {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
@ -74,75 +70,15 @@ public class MessageValidator {
|
||||
* @return true if we HAVE already seen this message, false if not
|
||||
*/
|
||||
private boolean noteReception(long messageId, long messageExpiration) {
|
||||
Long id = new Long(messageId);
|
||||
synchronized (_receivedIdLock) {
|
||||
locked_cleanReceivedIds(_context.clock().now() - Router.CLOCK_FUDGE_FACTOR);
|
||||
if (_receivedIds.contains(id)) {
|
||||
return true;
|
||||
} else {
|
||||
long date = messageExpiration;
|
||||
while (_receivedIdExpirations.containsKey(new Long(date)))
|
||||
date++;
|
||||
_receivedIdExpirations.put(new Long(date), id);
|
||||
_receivedIds.add(id);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
boolean dup = _filter.add(messageId);
|
||||
return dup;
|
||||
}
|
||||
|
||||
/**
|
||||
* Clean the ids that we no longer need to keep track of to prevent replay
|
||||
* attacks.
|
||||
*
|
||||
*/
|
||||
private void cleanReceivedIds() {
|
||||
long now = _context.clock().now() - Router.CLOCK_FUDGE_FACTOR ;
|
||||
synchronized (_receivedIdLock) {
|
||||
locked_cleanReceivedIds(now);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Clean the ids that we no longer need to keep track of to prevent replay
|
||||
* attacks - only call this from within a block synchronized on the received ID lock.
|
||||
*
|
||||
*/
|
||||
private void locked_cleanReceivedIds(long now) {
|
||||
Set toRemoveIds = null;
|
||||
Set toRemoveDates = null;
|
||||
for (Iterator iter = _receivedIdExpirations.keySet().iterator(); iter.hasNext(); ) {
|
||||
Long date = (Long)iter.next();
|
||||
if (date.longValue() <= now) {
|
||||
// no need to keep track of things in the past
|
||||
if (toRemoveIds == null) {
|
||||
toRemoveIds = new HashSet(2);
|
||||
toRemoveDates = new HashSet(2);
|
||||
}
|
||||
toRemoveDates.add(date);
|
||||
toRemoveIds.add(_receivedIdExpirations.get(date));
|
||||
} else {
|
||||
// the expiration is in the future, we still need to keep track of
|
||||
// it to prevent replays
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (toRemoveIds != null) {
|
||||
for (Iterator iter = toRemoveDates.iterator(); iter.hasNext(); )
|
||||
_receivedIdExpirations.remove(iter.next());
|
||||
for (Iterator iter = toRemoveIds.iterator(); iter.hasNext(); )
|
||||
_receivedIds.remove(iter.next());
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Cleaned out " + toRemoveDates.size()
|
||||
+ " expired messageIds, leaving "
|
||||
+ _receivedIds.size() + " remaining");
|
||||
}
|
||||
public void startup() {
|
||||
_filter = new DecayingBloomFilter(_context, (int)Router.CLOCK_FUDGE_FACTOR * 2, 8);
|
||||
}
|
||||
|
||||
void shutdown() {
|
||||
if (_log.shouldLog(Log.WARN)) {
|
||||
StringBuffer buf = new StringBuffer(1024);
|
||||
buf.append("Validated messages: ").append(_receivedIds.size());
|
||||
_log.log(Log.WARN, buf.toString());
|
||||
}
|
||||
_filter.stopDecaying();
|
||||
}
|
||||
}
|
||||
|
@ -11,10 +11,11 @@ package net.i2p.router;
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.text.SimpleDateFormat;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Date;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
@ -72,17 +73,15 @@ public class OutNetMessage {
|
||||
setOnReplyJob(null);
|
||||
setOnFailedReplyJob(null);
|
||||
setReplySelector(null);
|
||||
_timestamps = new HashMap(8);
|
||||
_timestampOrder = new LinkedList();
|
||||
_failedTransports = new HashSet();
|
||||
_failedTransports = null;
|
||||
_sendBegin = 0;
|
||||
_createdBy = new Exception("Created by");
|
||||
//_createdBy = new Exception("Created by");
|
||||
_created = context.clock().now();
|
||||
timestamp("Created");
|
||||
_context.messageStateMonitor().outboundMessageAdded();
|
||||
_context.statManager().createRateStat("outNetMessage.timeToDiscard",
|
||||
"How long until we discard an outbound msg?",
|
||||
"OutNetMessage", new long[] { 5*60*1000, 30*60*1000, 60*60*1000 });
|
||||
//_context.messageStateMonitor().outboundMessageAdded();
|
||||
//_context.statManager().createRateStat("outNetMessage.timeToDiscard",
|
||||
// "How long until we discard an outbound msg?",
|
||||
// "OutNetMessage", new long[] { 5*60*1000, 30*60*1000, 60*60*1000 });
|
||||
}
|
||||
|
||||
/**
|
||||
@ -92,24 +91,43 @@ public class OutNetMessage {
|
||||
* @return how long this message has been 'in flight'
|
||||
*/
|
||||
public long timestamp(String eventName) {
|
||||
synchronized (_timestamps) {
|
||||
long now = _context.clock().now();
|
||||
while (_timestamps.containsKey(eventName)) {
|
||||
eventName = eventName + '.';
|
||||
long now = _context.clock().now();
|
||||
if (_log.shouldLog(Log.DEBUG)) {
|
||||
// only timestamp if we are debugging
|
||||
synchronized (this) {
|
||||
locked_initTimestamps();
|
||||
while (_timestamps.containsKey(eventName)) {
|
||||
eventName = eventName + '.';
|
||||
}
|
||||
_timestamps.put(eventName, new Long(now));
|
||||
_timestampOrder.add(eventName);
|
||||
}
|
||||
_timestamps.put(eventName, new Long(now));
|
||||
_timestampOrder.add(eventName);
|
||||
return now - _created;
|
||||
}
|
||||
return now - _created;
|
||||
}
|
||||
public Map getTimestamps() {
|
||||
synchronized (_timestamps) {
|
||||
return (Map)_timestamps.clone();
|
||||
if (_log.shouldLog(Log.DEBUG)) {
|
||||
synchronized (this) {
|
||||
locked_initTimestamps();
|
||||
return (Map)_timestamps.clone();
|
||||
}
|
||||
}
|
||||
return Collections.EMPTY_MAP;
|
||||
}
|
||||
public Long getTimestamp(String eventName) {
|
||||
synchronized (_timestamps) {
|
||||
return (Long)_timestamps.get(eventName);
|
||||
if (_log.shouldLog(Log.DEBUG)) {
|
||||
synchronized (this) {
|
||||
locked_initTimestamps();
|
||||
return (Long)_timestamps.get(eventName);
|
||||
}
|
||||
}
|
||||
return ZERO;
|
||||
}
|
||||
private static final Long ZERO = new Long(0);
|
||||
private void locked_initTimestamps() {
|
||||
if (_timestamps == null) {
|
||||
_timestamps = new HashMap(8);
|
||||
_timestampOrder = new ArrayList(8);
|
||||
}
|
||||
}
|
||||
|
||||
@ -204,8 +222,15 @@ public class OutNetMessage {
|
||||
public MessageSelector getReplySelector() { return _replySelector; }
|
||||
public void setReplySelector(MessageSelector selector) { _replySelector = selector; }
|
||||
|
||||
public void transportFailed(String transportStyle) { _failedTransports.add(transportStyle); }
|
||||
public Set getFailedTransports() { return new HashSet(_failedTransports); }
|
||||
public void transportFailed(String transportStyle) {
|
||||
if (_failedTransports == null)
|
||||
_failedTransports = new HashSet(1);
|
||||
_failedTransports.add(transportStyle);
|
||||
}
|
||||
/** not thread safe - dont fail transports and iterate over this at the same time */
|
||||
public Set getFailedTransports() {
|
||||
return (_failedTransports == null ? Collections.EMPTY_SET : _failedTransports);
|
||||
}
|
||||
|
||||
/** when did the sending process begin */
|
||||
public long getSendBegin() { return _sendBegin; }
|
||||
@ -224,10 +249,11 @@ public class OutNetMessage {
|
||||
_log.debug("Discard " + _messageSize + "byte " + _messageType + " message after "
|
||||
+ timeToDiscard);
|
||||
_message = null;
|
||||
_context.statManager().addRateData("outNetMessage.timeToDiscard", timeToDiscard, timeToDiscard);
|
||||
_context.messageStateMonitor().outboundMessageDiscarded();
|
||||
//_context.statManager().addRateData("outNetMessage.timeToDiscard", timeToDiscard, timeToDiscard);
|
||||
//_context.messageStateMonitor().outboundMessageDiscarded();
|
||||
}
|
||||
|
||||
/*
|
||||
public void finalize() throws Throwable {
|
||||
if (_message != null) {
|
||||
if (_log.shouldLog(Log.WARN)) {
|
||||
@ -245,7 +271,7 @@ public class OutNetMessage {
|
||||
_context.messageStateMonitor().outboundMessageFinalized();
|
||||
super.finalize();
|
||||
}
|
||||
|
||||
*/
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer(128);
|
||||
buf.append("[OutNetMessage contains ");
|
||||
@ -256,7 +282,8 @@ public class OutNetMessage {
|
||||
buf.append(_message.getClass().getName());
|
||||
}
|
||||
buf.append(" expiring on ").append(new Date(_expiration));
|
||||
buf.append(" failed delivery on transports ").append(_failedTransports);
|
||||
if (_failedTransports != null)
|
||||
buf.append(" failed delivery on transports ").append(_failedTransports);
|
||||
if (_target == null)
|
||||
buf.append(" targetting no one in particular...");
|
||||
else
|
||||
@ -277,25 +304,27 @@ public class OutNetMessage {
|
||||
}
|
||||
|
||||
private void renderTimestamps(StringBuffer buf) {
|
||||
synchronized (_timestamps) {
|
||||
long lastWhen = -1;
|
||||
for (int i = 0; i < _timestampOrder.size(); i++) {
|
||||
String name = (String)_timestampOrder.get(i);
|
||||
Long when = (Long)_timestamps.get(name);
|
||||
buf.append("\t[");
|
||||
long diff = when.longValue() - lastWhen;
|
||||
if ( (lastWhen > 0) && (diff > 500) )
|
||||
buf.append("**");
|
||||
if (lastWhen > 0)
|
||||
buf.append(diff);
|
||||
else
|
||||
buf.append(0);
|
||||
buf.append("ms: \t").append(name);
|
||||
buf.append('=').append(formatDate(when.longValue()));
|
||||
buf.append("]\n");
|
||||
lastWhen = when.longValue();
|
||||
if (_log.shouldLog(Log.DEBUG)) {
|
||||
synchronized (this) {
|
||||
long lastWhen = -1;
|
||||
for (int i = 0; i < _timestampOrder.size(); i++) {
|
||||
String name = (String)_timestampOrder.get(i);
|
||||
Long when = (Long)_timestamps.get(name);
|
||||
buf.append("\t[");
|
||||
long diff = when.longValue() - lastWhen;
|
||||
if ( (lastWhen > 0) && (diff > 500) )
|
||||
buf.append("**");
|
||||
if (lastWhen > 0)
|
||||
buf.append(diff);
|
||||
else
|
||||
buf.append(0);
|
||||
buf.append("ms: \t").append(name);
|
||||
buf.append('=').append(formatDate(when.longValue()));
|
||||
buf.append("]\n");
|
||||
lastWhen = when.longValue();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private final static SimpleDateFormat _fmt = new SimpleDateFormat("HH:mm:ss.SSS");
|
||||
|
@ -49,10 +49,10 @@ public interface ProfileManager {
|
||||
*
|
||||
* @param peer who rejected us
|
||||
* @param responseTimeMs how long it took to get the rejection
|
||||
* @param explicit true if the tunnel request was explicitly rejected, false
|
||||
* if we just didn't get a reply back in time.
|
||||
* @param severity how much the peer doesnt want to participate in the
|
||||
* tunnel (large == more severe)
|
||||
*/
|
||||
void tunnelRejected(Hash peer, long responseTimeMs, boolean explicit);
|
||||
void tunnelRejected(Hash peer, long responseTimeMs, int severity);
|
||||
|
||||
/**
|
||||
* Note that a tunnel that the router is participating in
|
||||
|
@ -32,9 +32,9 @@ import net.i2p.data.DataHelper;
|
||||
import net.i2p.data.RouterInfo;
|
||||
import net.i2p.data.SigningPrivateKey;
|
||||
import net.i2p.data.i2np.GarlicMessage;
|
||||
import net.i2p.data.i2np.TunnelMessage;
|
||||
//import net.i2p.data.i2np.TunnelMessage;
|
||||
import net.i2p.router.message.GarlicMessageHandler;
|
||||
import net.i2p.router.message.TunnelMessageHandler;
|
||||
//import net.i2p.router.message.TunnelMessageHandler;
|
||||
import net.i2p.router.startup.StartupJob;
|
||||
import net.i2p.stat.Rate;
|
||||
import net.i2p.stat.RateStat;
|
||||
@ -67,6 +67,9 @@ public class Router {
|
||||
|
||||
/** let clocks be off by 1 minute */
|
||||
public final static long CLOCK_FUDGE_FACTOR = 1*60*1000;
|
||||
|
||||
/** used to differentiate routerInfo files on different networks */
|
||||
public static final int NETWORK_ID = 1;
|
||||
|
||||
public final static String PROP_INFO_FILENAME = "router.info.location";
|
||||
public final static String PROP_INFO_FILENAME_DEFAULT = "router.info";
|
||||
@ -153,11 +156,33 @@ public class Router {
|
||||
public String getConfigFilename() { return _configFilename; }
|
||||
public void setConfigFilename(String filename) { _configFilename = filename; }
|
||||
|
||||
public String getConfigSetting(String name) { return _config.getProperty(name); }
|
||||
public void setConfigSetting(String name, String value) { _config.setProperty(name, value); }
|
||||
public void removeConfigSetting(String name) { _config.remove(name); }
|
||||
public Set getConfigSettings() { return new HashSet(_config.keySet()); }
|
||||
public Properties getConfigMap() { return _config; }
|
||||
public String getConfigSetting(String name) {
|
||||
synchronized (_config) {
|
||||
return _config.getProperty(name);
|
||||
}
|
||||
}
|
||||
public void setConfigSetting(String name, String value) {
|
||||
synchronized (_config) {
|
||||
_config.setProperty(name, value);
|
||||
}
|
||||
}
|
||||
public void removeConfigSetting(String name) {
|
||||
synchronized (_config) {
|
||||
_config.remove(name);
|
||||
}
|
||||
}
|
||||
public Set getConfigSettings() {
|
||||
synchronized (_config) {
|
||||
return new HashSet(_config.keySet());
|
||||
}
|
||||
}
|
||||
public Properties getConfigMap() {
|
||||
Properties rv = new Properties();
|
||||
synchronized (_config) {
|
||||
rv.putAll(_config);
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
|
||||
public RouterInfo getRouterInfo() { return _routerInfo; }
|
||||
public void setRouterInfo(RouterInfo info) {
|
||||
@ -191,6 +216,9 @@ public class Router {
|
||||
readConfig();
|
||||
|
||||
setupHandlers();
|
||||
_context.messageValidator().startup();
|
||||
_context.tunnelDispatcher().startup();
|
||||
_context.inNetMessagePool().startup();
|
||||
startupQueue();
|
||||
_context.jobQueue().addJob(new CoalesceStatsJob());
|
||||
_context.jobQueue().addJob(new UpdateRoutingKeyModifierJob());
|
||||
@ -234,7 +262,7 @@ public class Router {
|
||||
}
|
||||
|
||||
public boolean isAlive() { return _isAlive; }
|
||||
|
||||
|
||||
/**
|
||||
* Rebuild and republish our routerInfo since something significant
|
||||
* has changed.
|
||||
@ -252,6 +280,7 @@ public class Router {
|
||||
try {
|
||||
ri.setPublished(_context.clock().now());
|
||||
Properties stats = _context.statPublisher().publishStatistics();
|
||||
stats.setProperty(RouterInfo.PROP_NETWORK_ID, NETWORK_ID+"");
|
||||
ri.setOptions(stats);
|
||||
ri.setAddresses(_context.commSystem().createAddresses());
|
||||
SigningPrivateKey key = _context.keyManager().getSigningPrivateKey();
|
||||
@ -302,7 +331,7 @@ public class Router {
|
||||
}
|
||||
System.out.println("INFO: Restarting the router after removing any old identity files");
|
||||
// hard and ugly
|
||||
System.exit(EXIT_GRACEFUL_RESTART);
|
||||
System.exit(EXIT_HARD_RESTART);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -399,7 +428,7 @@ public class Router {
|
||||
|
||||
private void setupHandlers() {
|
||||
_context.inNetMessagePool().registerHandlerJobBuilder(GarlicMessage.MESSAGE_TYPE, new GarlicMessageHandler(_context));
|
||||
_context.inNetMessagePool().registerHandlerJobBuilder(TunnelMessage.MESSAGE_TYPE, new TunnelMessageHandler(_context));
|
||||
//_context.inNetMessagePool().registerHandlerJobBuilder(TunnelMessage.MESSAGE_TYPE, new TunnelMessageHandler(_context));
|
||||
}
|
||||
|
||||
public void renderStatusHTML(Writer out) throws IOException {
|
||||
@ -687,11 +716,13 @@ public class Router {
|
||||
try { _context.statPublisher().shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the stats manager", t); }
|
||||
try { _context.clientManager().shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the client manager", t); }
|
||||
try { _context.tunnelManager().shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the tunnel manager", t); }
|
||||
try { _context.tunnelDispatcher().shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the tunnel dispatcher", t); }
|
||||
try { _context.netDb().shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the networkDb", t); }
|
||||
try { _context.commSystem().shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the comm system", t); }
|
||||
try { _context.peerManager().shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the peer manager", t); }
|
||||
try { _context.messageRegistry().shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the message registry", t); }
|
||||
try { _context.messageValidator().shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the message validator", t); }
|
||||
try { _context.inNetMessagePool().shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the inbound net pool", t); }
|
||||
try { _sessionKeyPersistenceHelper.shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the session key manager", t); }
|
||||
RouterContext.listContexts().remove(_context);
|
||||
dumpStats();
|
||||
|
@ -22,7 +22,8 @@ import net.i2p.router.transport.CommSystemFacadeImpl;
|
||||
import net.i2p.router.transport.FIFOBandwidthLimiter;
|
||||
import net.i2p.router.transport.OutboundMessageRegistry;
|
||||
import net.i2p.router.transport.VMCommSystem;
|
||||
import net.i2p.router.tunnelmanager.PoolingTunnelManagerFacade;
|
||||
import net.i2p.router.tunnel.pool.TunnelPoolManager;
|
||||
import net.i2p.router.tunnel.TunnelDispatcher;
|
||||
|
||||
/**
|
||||
* Build off the core I2P context to provide a root for a router instance to
|
||||
@ -50,6 +51,7 @@ public class RouterContext extends I2PAppContext {
|
||||
private ProfileManager _profileManager;
|
||||
private FIFOBandwidthLimiter _bandwidthLimiter;
|
||||
private TunnelManagerFacade _tunnelManager;
|
||||
private TunnelDispatcher _tunnelDispatcher;
|
||||
private StatisticsManager _statPublisher;
|
||||
private Shitlist _shitlist;
|
||||
private MessageValidator _messageValidator;
|
||||
@ -103,7 +105,8 @@ public class RouterContext extends I2PAppContext {
|
||||
_peerManagerFacade = new PeerManagerFacadeImpl(this);
|
||||
_profileManager = new ProfileManagerImpl(this);
|
||||
_bandwidthLimiter = new FIFOBandwidthLimiter(this);
|
||||
_tunnelManager = new PoolingTunnelManagerFacade(this);
|
||||
_tunnelManager = new TunnelPoolManager(this);
|
||||
_tunnelDispatcher = new TunnelDispatcher(this);
|
||||
_statPublisher = new StatisticsManager(this);
|
||||
_shitlist = new Shitlist(this);
|
||||
_messageValidator = new MessageValidator(this);
|
||||
@ -215,6 +218,10 @@ public class RouterContext extends I2PAppContext {
|
||||
* Any configuration for the tunnels is rooted from the context's properties
|
||||
*/
|
||||
public TunnelManagerFacade tunnelManager() { return _tunnelManager; }
|
||||
/**
|
||||
* Handle tunnel messages, as well as coordinate the gateways
|
||||
*/
|
||||
public TunnelDispatcher tunnelDispatcher() { return _tunnelDispatcher; }
|
||||
/**
|
||||
* If the router is configured to, gather up some particularly tasty morsels
|
||||
* regarding the stats managed and offer to publish them into the routerInfo.
|
||||
|
@ -22,9 +22,10 @@ public interface RouterThrottle {
|
||||
/**
|
||||
* Should we accept the request to participate in the given tunnel,
|
||||
* taking into account our current load and bandwidth usage commitments?
|
||||
*
|
||||
*
|
||||
* @return 0 if it should be accepted, higher values for more severe rejection
|
||||
*/
|
||||
public boolean acceptTunnelRequest(TunnelCreateMessage msg);
|
||||
public int acceptTunnelRequest(TunnelCreateMessage msg);
|
||||
/**
|
||||
* Should we accept the netDb lookup message, replying either with the
|
||||
* value or some closer peers, or should we simply drop it due to overload?
|
||||
|
@ -2,6 +2,7 @@ package net.i2p.router;
|
||||
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.i2np.TunnelCreateMessage;
|
||||
import net.i2p.router.peermanager.TunnelHistory;
|
||||
import net.i2p.stat.Rate;
|
||||
import net.i2p.stat.RateStat;
|
||||
import net.i2p.util.Log;
|
||||
@ -33,6 +34,9 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
private static final String PROP_DEFAULT_KBPS_THROTTLE = "router.defaultKBpsThrottle";
|
||||
private static final String PROP_BANDWIDTH_SHARE_PERCENTAGE = "router.sharePercentage";
|
||||
|
||||
/** tunnel acceptance */
|
||||
public static final int TUNNEL_ACCEPT = 0;
|
||||
|
||||
public RouterThrottleImpl(RouterContext context) {
|
||||
_context = context;
|
||||
_log = context.logManager().getLog(RouterThrottleImpl.class);
|
||||
@ -71,7 +75,8 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
public boolean acceptTunnelRequest(TunnelCreateMessage msg) {
|
||||
|
||||
public int acceptTunnelRequest(TunnelCreateMessage msg) {
|
||||
long lag = _context.jobQueue().getMaxLag();
|
||||
RateStat rs = _context.statManager().getRate("router.throttleNetworkCause");
|
||||
Rate r = null;
|
||||
@ -84,7 +89,7 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
+ " since there have been " + throttleEvents
|
||||
+ " throttle events in the last 15 minutes or so");
|
||||
_context.statManager().addRateData("router.throttleTunnelCause", lag, lag);
|
||||
return false;
|
||||
return TunnelHistory.TUNNEL_REJECT_TRANSIENT_OVERLOAD;
|
||||
}
|
||||
|
||||
rs = _context.statManager().getRate("transport.sendProcessingTime");
|
||||
@ -97,7 +102,7 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
_log.debug("Refusing tunnel request with the job lag of " + lag
|
||||
+ "since the 10 minute message processing time is too slow (" + processTime + ")");
|
||||
_context.statManager().addRateData("router.throttleTunnelProcessingTime10m", (long)processTime, (long)processTime);
|
||||
return false;
|
||||
return TunnelHistory.TUNNEL_REJECT_TRANSIENT_OVERLOAD;
|
||||
}
|
||||
if (rs != null)
|
||||
r = rs.getRate(60*1000);
|
||||
@ -107,7 +112,7 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
_log.debug("Refusing tunnel request with the job lag of " + lag
|
||||
+ "since the 1 minute message processing time is too slow (" + processTime + ")");
|
||||
_context.statManager().addRateData("router.throttleTunnelProcessingTime1m", (long)processTime, (long)processTime);
|
||||
return false;
|
||||
return TunnelHistory.TUNNEL_REJECT_TRANSIENT_OVERLOAD;
|
||||
}
|
||||
|
||||
int numTunnels = _context.tunnelManager().getParticipatingCount();
|
||||
@ -115,7 +120,7 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
if (_context.getProperty(Router.PROP_SHUTDOWN_IN_PROGRESS) != null) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Refusing tunnel request since we are shutting down ASAP");
|
||||
return false;
|
||||
return TunnelHistory.TUNNEL_REJECT_CRIT;
|
||||
}
|
||||
|
||||
if (numTunnels > getMinThrottleTunnels()) {
|
||||
@ -127,6 +132,9 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
avg = avgTunnels.getAverageValue();
|
||||
else
|
||||
avg = avgTunnels.getLifetimeAverageValue();
|
||||
int min = getMinThrottleTunnels();
|
||||
if (avg < min)
|
||||
avg = min;
|
||||
if ( (avg > 0) && (avg*growthFactor < numTunnels) ) {
|
||||
// we're accelerating, lets try not to take on too much too fast
|
||||
double probAccept = (avg*growthFactor) / numTunnels;
|
||||
@ -141,7 +149,7 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
_log.warn("Probabalistically refusing tunnel request (avg=" + avg
|
||||
+ " current=" + numTunnels + ")");
|
||||
_context.statManager().addRateData("router.throttleTunnelProbTooFast", (long)(numTunnels-avg), 0);
|
||||
return false;
|
||||
return TunnelHistory.TUNNEL_REJECT_PROBABALISTIC_REJECT;
|
||||
}
|
||||
} else {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
@ -160,6 +168,9 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
else
|
||||
avg60m = tunnelTestTime60m.getLifetimeAverageValue();
|
||||
|
||||
if (avg60m < 2000)
|
||||
avg60m = 2000; // minimum before complaining
|
||||
|
||||
if ( (avg60m > 0) && (avg10m > avg60m * growthFactor) ) {
|
||||
double probAccept = (avg60m*growthFactor)/avg10m;
|
||||
int v = _context.random().nextInt(100);
|
||||
@ -173,7 +184,7 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
_log.warn("Probabalistically refusing tunnel request (test time avg 10m=" + avg10m
|
||||
+ " 60m=" + avg60m + ")");
|
||||
_context.statManager().addRateData("router.throttleTunnelProbTestSlow", (long)(avg10m-avg60m), 0);
|
||||
return false;
|
||||
return TunnelHistory.TUNNEL_REJECT_PROBABALISTIC_REJECT;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -188,7 +199,7 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
_log.warn("Refusing tunnel request since we are already participating in "
|
||||
+ numTunnels + " (our max is " + max + ")");
|
||||
_context.statManager().addRateData("router.throttleTunnelMaxExceeded", numTunnels, 0);
|
||||
return false;
|
||||
return TunnelHistory.TUNNEL_REJECT_BANDWIDTH;
|
||||
}
|
||||
} catch (NumberFormatException nfe) {
|
||||
// no default, ignore it
|
||||
@ -197,23 +208,23 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
|
||||
// ok, we're not hosed, but can we handle the bandwidth requirements
|
||||
// of another tunnel?
|
||||
rs = _context.statManager().getRate("tunnel.participatingBytesProcessed");
|
||||
rs = _context.statManager().getRate("tunnel.participatingMessageCount");
|
||||
r = null;
|
||||
if (rs != null)
|
||||
r = rs.getRate(10*60*1000);
|
||||
double bytesAllocated = r.getCurrentTotalValue();
|
||||
double bytesAllocated = (r != null ? r.getCurrentTotalValue() * 1024 : 0);
|
||||
|
||||
if (!allowTunnel(bytesAllocated, numTunnels)) {
|
||||
_context.statManager().addRateData("router.throttleTunnelBandwidthExceeded", (long)bytesAllocated, 0);
|
||||
return false;
|
||||
return TunnelHistory.TUNNEL_REJECT_BANDWIDTH;
|
||||
}
|
||||
_context.statManager().addRateData("tunnel.bytesAllocatedAtAccept", (long)bytesAllocated, msg.getTunnelDurationSeconds()*1000);
|
||||
_context.statManager().addRateData("tunnel.bytesAllocatedAtAccept", (long)bytesAllocated, msg.getDurationSeconds()*1000);
|
||||
|
||||
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Accepting a new tunnel request (now allocating " + bytesAllocated + " bytes across " + numTunnels
|
||||
+ " tunnels with lag of " + lag + " and " + throttleEvents + " throttle events)");
|
||||
return true;
|
||||
return TUNNEL_ACCEPT;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -320,9 +331,9 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
|
||||
private double getTunnelGrowthFactor() {
|
||||
try {
|
||||
return Double.parseDouble(_context.getProperty("router.tunnelGrowthFactor", "1.5"));
|
||||
return Double.parseDouble(_context.getProperty("router.tunnelGrowthFactor", "3.0"));
|
||||
} catch (NumberFormatException nfe) {
|
||||
return 1.5;
|
||||
return 3.0;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -15,9 +15,9 @@ import net.i2p.CoreVersion;
|
||||
*
|
||||
*/
|
||||
public class RouterVersion {
|
||||
public final static String ID = "$Revision: 1.137 $ $Date: 2005/01/23 03:22:11 $";
|
||||
public final static String VERSION = "0.4.2.6";
|
||||
public final static long BUILD = 7;
|
||||
public final static String ID = "$Revision: 1.137.2.12 $ $Date: 2005/02/16 13:59:59 $";
|
||||
public final static String VERSION = "0.5-pre";
|
||||
public final static long BUILD = 12;
|
||||
public static void main(String args[]) {
|
||||
System.out.println("I2P Router version: " + VERSION);
|
||||
System.out.println("Router ID: " + RouterVersion.ID);
|
||||
|
@ -82,7 +82,7 @@ public class Shitlist {
|
||||
}
|
||||
|
||||
//_context.netDb().fail(peer);
|
||||
_context.tunnelManager().peerFailed(peer);
|
||||
//_context.tunnelManager().peerFailed(peer);
|
||||
_context.messageRegistry().peerFailed(peer);
|
||||
return wasAlready;
|
||||
}
|
||||
|
@ -102,56 +102,33 @@ public class StatisticsManager implements Service {
|
||||
stats.putAll(_context.profileManager().summarizePeers(_publishedStats));
|
||||
|
||||
includeThroughput(stats);
|
||||
includeRate("router.duplicateMessageId", stats, new long[] { 24*60*60*1000 });
|
||||
includeRate("tunnel.duplicateIV", stats, new long[] { 24*60*60*1000 });
|
||||
includeRate("tunnel.fragmentedComplete", stats, new long[] { 10*60*1000, 3*60*60*1000 });
|
||||
includeRate("tunnel.fragmentedDropped", stats, new long[] { 10*60*1000, 3*60*60*1000 });
|
||||
includeRate("tunnel.fullFragments", stats, new long[] { 10*60*1000, 3*60*60*1000 });
|
||||
includeRate("tunnel.smallFragments", stats, new long[] { 10*60*1000, 3*60*60*1000 });
|
||||
includeRate("tunnel.testFailedTime", stats, new long[] { 60*60*1000, 3*60*60*1000 });
|
||||
includeRate("tunnel.dispatchOutboundTime", stats, new long[] { 60*60*1000 });
|
||||
includeRate("tunnel.dispatchGatewayTime", stats, new long[] { 60*60*1000 });
|
||||
includeRate("tunnel.dispatchDataTime", stats, new long[] { 60*60*1000 });
|
||||
includeRate("tunnel.buildFailure", stats, new long[] { 10*60*1000, 60*60*1000 });
|
||||
includeRate("tunnel.buildSuccess", stats, new long[] { 10*60*1000, 60*60*1000 });
|
||||
|
||||
includeRate("router.throttleTunnelProbTestSlow", stats, new long[] { 60*60*1000 });
|
||||
includeRate("router.throttleTunnelProbTooFast", stats, new long[] { 60*60*1000 });
|
||||
includeRate("router.throttleTunnelProcessingTime1m", stats, new long[] { 60*60*1000 });
|
||||
|
||||
includeRate("clock.skew", stats, new long[] { 10*60*1000, 3*60*60*1000, 24*60*60*1000 });
|
||||
|
||||
includeRate("transport.sendProcessingTime", stats, new long[] { 60*60*1000 });
|
||||
includeRate("tcp.probabalisticDropQueueSize", stats, new long[] { 60*1000l, 60*60*1000l });
|
||||
//includeRate("tcp.queueSize", stats);
|
||||
//includeRate("jobQueue.jobLag", stats, new long[] { 60*1000, 60*60*1000 });
|
||||
//includeRate("jobQueue.jobRun", stats, new long[] { 60*1000, 60*60*1000 });
|
||||
includeRate("jobQueue.jobRunSlow", stats, new long[] { 10*60*1000l, 60*60*1000l });
|
||||
includeRate("crypto.elGamal.encrypt", stats, new long[] { 60*60*1000 });
|
||||
//includeRate("crypto.garlic.decryptFail", stats, new long[] { 60*60*1000, 24*60*60*1000 });
|
||||
includeRate("tunnel.unknownTunnelTimeLeft", stats, new long[] { 60*60*1000 });
|
||||
//includeRate("jobQueue.readyJobs", stats, new long[] { 60*60*1000 });
|
||||
//includeRate("jobQueue.droppedJobs", stats, new long[] { 60*60*1000, 24*60*60*1000 });
|
||||
//includeRate("inNetPool.dropped", stats, new long[] { 60*60*1000, 24*60*60*1000 });
|
||||
includeRate("tunnel.participatingTunnels", stats, new long[] { 5*60*1000, 60*60*1000 });
|
||||
includeRate("tunnel.participatingBytesProcessed", stats, new long[] { 10*60*1000 });
|
||||
includeRate("tunnel.participatingBytesProcessedActive", stats, new long[] { 10*60*1000 });
|
||||
includeRate("tunnel.testSuccessTime", stats, new long[] { 60*60*1000l, 24*60*60*1000l });
|
||||
//includeRate("tunnel.outboundMessagesProcessed", stats, new long[] { 10*60*1000, 60*60*1000 });
|
||||
//includeRate("tunnel.inboundMessagesProcessed", stats, new long[] { 10*60*1000, 60*60*1000 });
|
||||
//includeRate("tunnel.participatingMessagesProcessed", stats, new long[] { 10*60*1000, 60*60*1000 });
|
||||
//includeRate("tunnel.participatingMessagesProcessedActive", stats, new long[] { 10*60*1000, 60*60*1000 });
|
||||
//includeRate("tunnel.expiredAfterAcceptTime", stats, new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
includeRate("tunnel.bytesAllocatedAtAccept", stats, new long[] { 60*60*1000l });
|
||||
includeRate("netDb.lookupsReceived", stats, new long[] { 60*60*1000 });
|
||||
//includeRate("netDb.lookupsHandled", stats, new long[] { 60*60*1000 });
|
||||
includeRate("netDb.lookupsMatched", stats, new long[] { 60*60*1000 });
|
||||
//includeRate("netDb.storeSent", stats, new long[] { 5*60*1000, 60*60*1000 });
|
||||
includeRate("netDb.successPeers", stats, new long[] { 60*60*1000 });
|
||||
//includeRate("netDb.failedPeers", stats, new long[] { 60*60*1000 });
|
||||
//includeRate("router.throttleNetDbDoSSend", stats, new long[] { 10*60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
//includeRate("router.throttleNetDbDoS", stats, new long[] { 10*60*1000, 60*60*1000 });
|
||||
//includeRate("netDb.searchCount", stats, new long[] { 3*60*60*1000});
|
||||
//includeRate("netDb.searchMessageCount", stats, new long[] { 5*60*1000, 10*60*1000, 60*60*1000 });
|
||||
//includeRate("inNetMessage.timeToDiscard", stats, new long[] { 5*60*1000, 10*60*1000, 60*60*1000 });
|
||||
//includeRate("outNetMessage.timeToDiscard", stats, new long[] { 5*60*1000, 10*60*1000, 60*60*1000 });
|
||||
//includeRate("router.throttleNetworkCause", stats, new long[] { 10*60*1000, 60*60*1000 });
|
||||
//includeRate("transport.receiveMessageSize", stats, new long[] { 5*60*1000, 60*60*1000 });
|
||||
//includeRate("transport.sendMessageSize", stats, new long[] { 5*60*1000, 60*60*1000 });
|
||||
//includeRate("transport.sendMessageSmall", stats, new long[] { 5*60*1000, 60*60*1000 });
|
||||
//includeRate("transport.sendMessageMedium", stats, new long[] { 5*60*1000, 60*60*1000 });
|
||||
//includeRate("transport.sendMessageLarge", stats, new long[] { 5*60*1000, 60*60*1000 });
|
||||
//includeRate("transport.receiveMessageSmall", stats, new long[] { 5*60*1000, 60*60*1000 });
|
||||
//includeRate("transport.receiveMessageMedium", stats, new long[] { 5*60*1000, 60*60*1000 });
|
||||
//includeRate("transport.receiveMessageLarge", stats, new long[] { 5*60*1000, 60*60*1000 });
|
||||
includeRate("client.sendAckTime", stats, new long[] { 60*60*1000 }, true);
|
||||
includeRate("stream.con.sendDuplicateSize", stats, new long[] { 60*60*1000 });
|
||||
includeRate("stream.con.receiveDuplicateSize", stats, new long[] { 60*60*1000 });
|
||||
//includeRate("client.sendsPerFailure", stats, new long[] { 60*60*1000, 24*60*60*1000l }, true);
|
||||
//includeRate("client.timeoutCongestionTunnel", stats, new long[] { 60*60*1000, 24*60*60*1000l }, true);
|
||||
//includeRate("client.timeoutCongestionMessage", stats, new long[] { 60*60*1000, 24*60*60*1000l }, true);
|
||||
//includeRate("client.timeoutCongestionInbound", stats, new long[] { 60*60*1000, 24*60*60*1000l }, true);
|
||||
stats.setProperty("stat_uptime", DataHelper.formatDuration(_context.router().getUptime()));
|
||||
stats.setProperty("stat__rateKey", "avg;maxAvg;pctLifetime;[sat;satLim;maxSat;maxSatLim;][num;lifetimeFreq;maxFreq]");
|
||||
_log.debug("Publishing peer rankings");
|
||||
@ -223,6 +200,7 @@ public class StatisticsManager implements Service {
|
||||
double peakFrequency = rate.getExtremeEventCount();
|
||||
buf.append(num(avgFrequency)).append(';');
|
||||
buf.append(num(rate.getExtremeEventCount())).append(';');
|
||||
buf.append(num((double)rate.getLifetimeEventCount())).append(';');
|
||||
}
|
||||
}
|
||||
return buf.toString();
|
||||
|
@ -8,392 +8,45 @@ package net.i2p.router;
|
||||
*
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.util.Date;
|
||||
import java.util.HashSet;
|
||||
import java.util.Properties;
|
||||
import java.util.Set;
|
||||
|
||||
import net.i2p.I2PAppContext;
|
||||
import net.i2p.data.DataFormatException;
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.data.DataStructureImpl;
|
||||
import net.i2p.data.Destination;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.SessionKey;
|
||||
import net.i2p.data.SigningPrivateKey;
|
||||
import net.i2p.data.SigningPublicKey;
|
||||
import net.i2p.data.TunnelId;
|
||||
import net.i2p.data.i2np.TunnelConfigurationSessionKey;
|
||||
import net.i2p.data.i2np.TunnelSessionKey;
|
||||
import net.i2p.data.i2np.TunnelSigningPrivateKey;
|
||||
import net.i2p.data.i2np.TunnelSigningPublicKey;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
/**
|
||||
* Defines the information associated with a tunnel
|
||||
*/
|
||||
public class TunnelInfo extends DataStructureImpl {
|
||||
private I2PAppContext _context;
|
||||
private static Log _log;
|
||||
private TunnelId _id;
|
||||
private Hash _nextHop;
|
||||
private TunnelId _nextHopId;
|
||||
private Hash _thisHop;
|
||||
private TunnelInfo _nextHopInfo;
|
||||
private TunnelConfigurationSessionKey _configurationKey;
|
||||
private TunnelSigningPublicKey _verificationKey;
|
||||
private TunnelSigningPrivateKey _signingKey;
|
||||
private TunnelSessionKey _encryptionKey;
|
||||
private Destination _destination;
|
||||
private Properties _options;
|
||||
private TunnelSettings _settings;
|
||||
private long _created;
|
||||
private long _lastTested;
|
||||
private boolean _ready;
|
||||
private boolean _wasEverReady;
|
||||
private int _messagesProcessed;
|
||||
private int _tunnelFailures;
|
||||
private long _bytesProcessed;
|
||||
|
||||
public TunnelInfo(I2PAppContext context) {
|
||||
_context = context;
|
||||
if (_log == null)
|
||||
_log = context.logManager().getLog(TunnelInfo.class);
|
||||
setTunnelId(null);
|
||||
setThisHop(null);
|
||||
setNextHop(null);
|
||||
setNextHopId(null);
|
||||
setNextHopInfo(null);
|
||||
_configurationKey = null;
|
||||
_verificationKey = null;
|
||||
_signingKey = null;
|
||||
_encryptionKey = null;
|
||||
setDestination(null);
|
||||
setSettings(null);
|
||||
_options = new Properties();
|
||||
_ready = false;
|
||||
_wasEverReady = false;
|
||||
_created = _context.clock().now();
|
||||
_lastTested = -1;
|
||||
_messagesProcessed = 0;
|
||||
_tunnelFailures = 0;
|
||||
_bytesProcessed = 0;
|
||||
}
|
||||
|
||||
public TunnelId getTunnelId() { return _id; }
|
||||
public void setTunnelId(TunnelId id) { _id = id; }
|
||||
|
||||
public Hash getNextHop() { return _nextHop; }
|
||||
public void setNextHop(Hash nextHopRouterIdentity) { _nextHop = nextHopRouterIdentity; }
|
||||
|
||||
public TunnelId getNextHopId() { return _nextHopId; }
|
||||
public void setNextHopId(TunnelId id) { _nextHopId = id; }
|
||||
|
||||
public Hash getThisHop() { return _thisHop; }
|
||||
public void setThisHop(Hash thisHopRouterIdentity) { _thisHop = thisHopRouterIdentity; }
|
||||
|
||||
public TunnelInfo getNextHopInfo() { return _nextHopInfo; }
|
||||
public void setNextHopInfo(TunnelInfo info) { _nextHopInfo = info; }
|
||||
|
||||
public TunnelConfigurationSessionKey getConfigurationKey() { return _configurationKey; }
|
||||
public void setConfigurationKey(TunnelConfigurationSessionKey key) { _configurationKey = key; }
|
||||
public void setConfigurationKey(SessionKey key) {
|
||||
TunnelConfigurationSessionKey tk = new TunnelConfigurationSessionKey();
|
||||
tk.setKey(key);
|
||||
_configurationKey = tk;
|
||||
}
|
||||
|
||||
public TunnelSigningPublicKey getVerificationKey() { return _verificationKey; }
|
||||
public void setVerificationKey(TunnelSigningPublicKey key) { _verificationKey = key; }
|
||||
public void setVerificationKey(SigningPublicKey key) {
|
||||
TunnelSigningPublicKey tk = new TunnelSigningPublicKey();
|
||||
tk.setKey(key);
|
||||
_verificationKey = tk;
|
||||
}
|
||||
|
||||
public TunnelSigningPrivateKey getSigningKey() { return _signingKey; }
|
||||
public void setSigningKey(TunnelSigningPrivateKey key) { _signingKey = key; }
|
||||
public void setSigningKey(SigningPrivateKey key) {
|
||||
TunnelSigningPrivateKey tk = new TunnelSigningPrivateKey();
|
||||
tk.setKey(key);
|
||||
_signingKey = tk;
|
||||
}
|
||||
|
||||
public TunnelSessionKey getEncryptionKey() { return _encryptionKey; }
|
||||
public void setEncryptionKey(TunnelSessionKey key) { _encryptionKey = key; }
|
||||
public void setEncryptionKey(SessionKey key) {
|
||||
TunnelSessionKey tk = new TunnelSessionKey();
|
||||
tk.setKey(key);
|
||||
_encryptionKey = tk;
|
||||
}
|
||||
|
||||
public Destination getDestination() { return _destination; }
|
||||
public void setDestination(Destination dest) { _destination = dest; }
|
||||
|
||||
public String getProperty(String key) { return _options.getProperty(key); }
|
||||
public void setProperty(String key, String val) { _options.setProperty(key, val); }
|
||||
public void clearProperties() { _options.clear(); }
|
||||
public Set getPropertyNames() { return new HashSet(_options.keySet()); }
|
||||
|
||||
public TunnelSettings getSettings() { return _settings; }
|
||||
public void setSettings(TunnelSettings settings) { _settings = settings; }
|
||||
public interface TunnelInfo {
|
||||
/** how many peers are there in the tunnel (including the creator)? */
|
||||
public int getLength();
|
||||
|
||||
/**
|
||||
* Have all of the routers in this tunnel confirmed participation, and we're ok to
|
||||
* start sending messages through this tunnel?
|
||||
*/
|
||||
public boolean getIsReady() { return _ready; }
|
||||
public void setIsReady(boolean ready) {
|
||||
_ready = ready;
|
||||
if (ready)
|
||||
_wasEverReady = true;
|
||||
}
|
||||
/**
|
||||
* true if this tunnel was ever working (aka rebuildable)
|
||||
* retrieve the tunnelId that the given hop receives messages on.
|
||||
* the gateway is hop 0.
|
||||
*
|
||||
*/
|
||||
public boolean getWasEverReady() { return _wasEverReady; }
|
||||
|
||||
public long getCreated() { return _created; }
|
||||
|
||||
/** when was the peer last tested (or -1 if never)? */
|
||||
public long getLastTested() { return _lastTested; }
|
||||
public void setLastTested(long when) { _lastTested = when; }
|
||||
|
||||
public TunnelId getReceiveTunnelId(int hop);
|
||||
/**
|
||||
* Number of hops left in the tunnel (including this one)
|
||||
* retrieve the tunnelId that the given hop sends messages on.
|
||||
* the gateway is hop 0.
|
||||
*
|
||||
*/
|
||||
public final int getLength() {
|
||||
int len = 0;
|
||||
TunnelInfo info = this;
|
||||
while (info != null) {
|
||||
info = info.getNextHopInfo();
|
||||
len++;
|
||||
}
|
||||
return len;
|
||||
}
|
||||
public TunnelId getSendTunnelId(int hop);
|
||||
|
||||
/** how many messages have passed through this tunnel in its lifetime? */
|
||||
public int getMessagesProcessed() {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Tunnel " + _id.getTunnelId() + " processed " + _messagesProcessed + " messages");
|
||||
return _messagesProcessed;
|
||||
}
|
||||
/** we have just processed a message for this tunnel */
|
||||
public void messageProcessed(int size) {
|
||||
_messagesProcessed++;
|
||||
_bytesProcessed += size;
|
||||
}
|
||||
/** how many bytes have been pumped through this tunnel in its lifetime? */
|
||||
public long getBytesProcessed() { return _bytesProcessed; }
|
||||
/** retrieve the peer at the given hop. the gateway is hop 0 */
|
||||
public Hash getPeer(int hop);
|
||||
|
||||
/** is this an inbound tunnel? */
|
||||
public boolean isInbound();
|
||||
|
||||
/** if this is a client tunnel, what destination is it for? */
|
||||
public Hash getDestination();
|
||||
|
||||
public long getExpiration();
|
||||
/**
|
||||
* the tunnel was (potentially) unable to pass a message through.
|
||||
*
|
||||
* @return the new number of tunnel failures ever for this tunnel
|
||||
* take note that the tunnel was able to measurably Do Good
|
||||
* in the given time
|
||||
*/
|
||||
public int incrementFailures() { return ++_tunnelFailures; }
|
||||
public void testSuccessful(int responseTime);
|
||||
|
||||
public void readBytes(InputStream in) throws DataFormatException, IOException {
|
||||
_options = DataHelper.readProperties(in);
|
||||
Boolean includeDest = DataHelper.readBoolean(in);
|
||||
if (includeDest.booleanValue()) {
|
||||
_destination = new Destination();
|
||||
_destination.readBytes(in);
|
||||
} else {
|
||||
_destination = null;
|
||||
}
|
||||
Boolean includeThis = DataHelper.readBoolean(in);
|
||||
if (includeThis.booleanValue()) {
|
||||
_thisHop = new Hash();
|
||||
_thisHop.readBytes(in);
|
||||
} else {
|
||||
_thisHop = null;
|
||||
}
|
||||
Boolean includeNext = DataHelper.readBoolean(in);
|
||||
if (includeNext.booleanValue()) {
|
||||
_nextHop = new Hash();
|
||||
_nextHop.readBytes(in);
|
||||
_nextHopId = new TunnelId();
|
||||
_nextHopId.readBytes(in);
|
||||
} else {
|
||||
_nextHop = null;
|
||||
}
|
||||
Boolean includeNextInfo = DataHelper.readBoolean(in);
|
||||
if (includeNextInfo.booleanValue()) {
|
||||
_nextHopInfo = new TunnelInfo(_context);
|
||||
_nextHopInfo.readBytes(in);
|
||||
} else {
|
||||
_nextHopInfo = null;
|
||||
}
|
||||
_id = new TunnelId();
|
||||
_id.readBytes(in);
|
||||
Boolean includeConfigKey = DataHelper.readBoolean(in);
|
||||
if (includeConfigKey.booleanValue()) {
|
||||
_configurationKey = new TunnelConfigurationSessionKey();
|
||||
_configurationKey.readBytes(in);
|
||||
} else {
|
||||
_configurationKey = null;
|
||||
}
|
||||
Boolean includeEncryptionKey = DataHelper.readBoolean(in);
|
||||
if (includeEncryptionKey.booleanValue()) {
|
||||
_encryptionKey = new TunnelSessionKey();
|
||||
_encryptionKey.readBytes(in);
|
||||
} else {
|
||||
_encryptionKey = null;
|
||||
}
|
||||
Boolean includeSigningKey = DataHelper.readBoolean(in);
|
||||
if (includeSigningKey.booleanValue()) {
|
||||
_signingKey = new TunnelSigningPrivateKey();
|
||||
_signingKey.readBytes(in);
|
||||
} else {
|
||||
_signingKey = null;
|
||||
}
|
||||
Boolean includeVerificationKey = DataHelper.readBoolean(in);
|
||||
if (includeVerificationKey.booleanValue()) {
|
||||
_verificationKey = new TunnelSigningPublicKey();
|
||||
_verificationKey.readBytes(in);
|
||||
} else {
|
||||
_verificationKey = null;
|
||||
}
|
||||
_settings = new TunnelSettings(_context);
|
||||
_settings.readBytes(in);
|
||||
Boolean ready = DataHelper.readBoolean(in);
|
||||
if (ready != null)
|
||||
setIsReady(ready.booleanValue());
|
||||
}
|
||||
|
||||
public void writeBytes(OutputStream out) throws DataFormatException, IOException {
|
||||
if (_id == null) throw new DataFormatException("Invalid tunnel ID: " + _id);
|
||||
if (_options == null) throw new DataFormatException("Options are null");
|
||||
if (_settings == null) throw new DataFormatException("Settings are null");
|
||||
// everything else is optional in the serialization
|
||||
|
||||
DataHelper.writeProperties(out, _options);
|
||||
if (_destination != null) {
|
||||
DataHelper.writeBoolean(out, Boolean.TRUE);
|
||||
_destination.writeBytes(out);
|
||||
} else {
|
||||
DataHelper.writeBoolean(out, Boolean.FALSE);
|
||||
}
|
||||
if (_thisHop != null) {
|
||||
DataHelper.writeBoolean(out, Boolean.TRUE);
|
||||
_thisHop.writeBytes(out);
|
||||
} else {
|
||||
DataHelper.writeBoolean(out, Boolean.FALSE);
|
||||
}
|
||||
if (_nextHop != null) {
|
||||
DataHelper.writeBoolean(out, Boolean.TRUE);
|
||||
_nextHop.writeBytes(out);
|
||||
_nextHopId.writeBytes(out);
|
||||
} else {
|
||||
DataHelper.writeBoolean(out, Boolean.FALSE);
|
||||
}
|
||||
if (_nextHopInfo != null) {
|
||||
DataHelper.writeBoolean(out, Boolean.TRUE);
|
||||
_nextHopInfo.writeBytes(out);
|
||||
} else {
|
||||
DataHelper.writeBoolean(out, Boolean.FALSE);
|
||||
}
|
||||
_id.writeBytes(out);
|
||||
if (_configurationKey != null) {
|
||||
DataHelper.writeBoolean(out, Boolean.TRUE);
|
||||
_configurationKey.writeBytes(out);
|
||||
} else {
|
||||
DataHelper.writeBoolean(out, Boolean.FALSE);
|
||||
}
|
||||
if (_encryptionKey != null) {
|
||||
DataHelper.writeBoolean(out, Boolean.TRUE);
|
||||
_encryptionKey.writeBytes(out);
|
||||
} else {
|
||||
DataHelper.writeBoolean(out, Boolean.FALSE);
|
||||
}
|
||||
if (_signingKey != null) {
|
||||
DataHelper.writeBoolean(out, Boolean.TRUE);
|
||||
_signingKey.writeBytes(out);
|
||||
} else {
|
||||
DataHelper.writeBoolean(out, Boolean.FALSE);
|
||||
}
|
||||
if (_verificationKey != null) {
|
||||
DataHelper.writeBoolean(out, Boolean.TRUE);
|
||||
_verificationKey.writeBytes(out);
|
||||
} else {
|
||||
DataHelper.writeBoolean(out, Boolean.FALSE);
|
||||
}
|
||||
_settings.writeBytes(out);
|
||||
DataHelper.writeBoolean(out, new Boolean(_ready));
|
||||
}
|
||||
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer();
|
||||
buf.append("[Tunnel ").append(_id.getTunnelId());
|
||||
TunnelInfo cur = this;
|
||||
int i = 0;
|
||||
while (cur != null) {
|
||||
buf.append("\n*Hop ").append(i).append(": ").append(cur.getThisHop());
|
||||
//if (cur.getEncryptionKey() != null)
|
||||
// buf.append("\n Encryption key: ").append(cur.getEncryptionKey());
|
||||
//if (cur.getSigningKey() != null)
|
||||
// buf.append("\n Signing key: ").append(cur.getSigningKey());
|
||||
//if (cur.getVerificationKey() != null)
|
||||
// buf.append("\n Verification key: ").append(cur.getVerificationKey());
|
||||
if (cur.getDestination() != null)
|
||||
buf.append("\n Destination: ").append(cur.getDestination().calculateHash().toBase64());
|
||||
if (cur.getNextHop() != null)
|
||||
buf.append("\n Next: ").append(cur.getNextHop());
|
||||
if (cur.getNextHop() != null)
|
||||
buf.append("\n NextId: ").append(cur.getNextHopId());
|
||||
if (cur.getSettings() == null)
|
||||
buf.append("\n Expiration: ").append("none");
|
||||
else
|
||||
buf.append("\n Expiration: ").append(new Date(cur.getSettings().getExpiration()));
|
||||
//buf.append("\n Ready: ").append(getIsReady());
|
||||
cur = cur.getNextHopInfo();
|
||||
i++;
|
||||
}
|
||||
buf.append("]");
|
||||
return buf.toString();
|
||||
}
|
||||
|
||||
public int hashCode() {
|
||||
int rv = 0;
|
||||
rv = 7*rv + DataHelper.hashCode(_options);
|
||||
rv = 7*rv + DataHelper.hashCode(_destination);
|
||||
rv = 7*rv + DataHelper.hashCode(_nextHop);
|
||||
rv = 7*rv + DataHelper.hashCode(_nextHopId);
|
||||
rv = 7*rv + DataHelper.hashCode(_thisHop);
|
||||
rv = 7*rv + DataHelper.hashCode(_id);
|
||||
rv = 7*rv + DataHelper.hashCode(_configurationKey);
|
||||
rv = 7*rv + DataHelper.hashCode(_encryptionKey);
|
||||
rv = 7*rv + DataHelper.hashCode(_signingKey);
|
||||
rv = 7*rv + DataHelper.hashCode(_verificationKey);
|
||||
rv = 7*rv + DataHelper.hashCode(_settings);
|
||||
rv = 7*rv + (_ready ? 0 : 1);
|
||||
return rv;
|
||||
}
|
||||
|
||||
public boolean equals(Object obj) {
|
||||
if ( (obj != null) && (obj instanceof TunnelInfo) ) {
|
||||
TunnelInfo info = (TunnelInfo)obj;
|
||||
return DataHelper.eq(getConfigurationKey(), info.getConfigurationKey()) &&
|
||||
DataHelper.eq(getDestination(), info.getDestination()) &&
|
||||
getIsReady() == info.getIsReady() &&
|
||||
DataHelper.eq(getEncryptionKey(), info.getEncryptionKey()) &&
|
||||
DataHelper.eq(getNextHop(), info.getNextHop()) &&
|
||||
DataHelper.eq(getNextHopId(), info.getNextHopId()) &&
|
||||
DataHelper.eq(getNextHopInfo(), info.getNextHopInfo()) &&
|
||||
DataHelper.eq(getSettings(), info.getSettings()) &&
|
||||
DataHelper.eq(getSigningKey(), info.getSigningKey()) &&
|
||||
DataHelper.eq(getThisHop(), info.getThisHop()) &&
|
||||
DataHelper.eq(getTunnelId(), info.getTunnelId()) &&
|
||||
DataHelper.eq(getVerificationKey(), info.getVerificationKey()) &&
|
||||
DataHelper.eq(_options, info._options);
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
public long getProcessedMessagesCount();
|
||||
}
|
||||
|
@ -19,42 +19,22 @@ import net.i2p.data.TunnelId;
|
||||
*
|
||||
*/
|
||||
public interface TunnelManagerFacade extends Service {
|
||||
|
||||
/**
|
||||
* React to a request to join the specified tunnel.
|
||||
*
|
||||
* @return true if the router will accept participation, else false.
|
||||
*/
|
||||
boolean joinTunnel(TunnelInfo info);
|
||||
/**
|
||||
* Retrieve the information related to a particular tunnel
|
||||
*
|
||||
* @param id the tunnelId as seen at the gateway
|
||||
*
|
||||
*/
|
||||
TunnelInfo getTunnelInfo(TunnelId id);
|
||||
/**
|
||||
* Retrieve a set of tunnels from the existing ones for various purposes
|
||||
*/
|
||||
List selectOutboundTunnelIds(TunnelSelectionCriteria criteria);
|
||||
/**
|
||||
* Retrieve a set of tunnels from the existing ones for various purposes
|
||||
*/
|
||||
List selectInboundTunnelIds(TunnelSelectionCriteria criteria);
|
||||
/** pick an inbound tunnel not bound to a particular destination */
|
||||
TunnelInfo selectInboundTunnel();
|
||||
/** pick an inbound tunnel bound to the given destination */
|
||||
TunnelInfo selectInboundTunnel(Hash destination);
|
||||
/** pick an outbound tunnel not bound to a particular destination */
|
||||
TunnelInfo selectOutboundTunnel();
|
||||
/** pick an outbound tunnel bound to the given destination */
|
||||
TunnelInfo selectOutboundTunnel(Hash destination);
|
||||
|
||||
/**
|
||||
* Make sure appropriate outbound tunnels are in place, builds requested
|
||||
* inbound tunnels, then fire off a job to ask the ClientManagerFacade to
|
||||
* validate the leaseSet, then publish it in the network database.
|
||||
*
|
||||
*/
|
||||
void createTunnels(Destination destination, ClientTunnelSettings clientSettings, long timeoutMs);
|
||||
|
||||
/**
|
||||
* Called when a peer becomes unreachable - go through all of the current
|
||||
* tunnels and rebuild them if we can, or drop them if we can't.
|
||||
*
|
||||
*/
|
||||
void peerFailed(Hash peer);
|
||||
|
||||
/**
|
||||
* True if the peer currently part of a tunnel
|
||||
*
|
||||
@ -70,4 +50,21 @@ public interface TunnelManagerFacade extends Service {
|
||||
|
||||
/** When does the last tunnel we are participating in expire? */
|
||||
public long getLastParticipatingExpiration();
|
||||
|
||||
/**
|
||||
* the client connected (or updated their settings), so make sure we have
|
||||
* the tunnels for them, and whenever necessary, ask them to authorize
|
||||
* leases.
|
||||
*
|
||||
*/
|
||||
public void buildTunnels(Destination client, ClientTunnelSettings settings);
|
||||
|
||||
public TunnelPoolSettings getInboundSettings();
|
||||
public TunnelPoolSettings getOutboundSettings();
|
||||
public TunnelPoolSettings getInboundSettings(Hash client);
|
||||
public TunnelPoolSettings getOutboundSettings(Hash client);
|
||||
public void setInboundSettings(TunnelPoolSettings settings);
|
||||
public void setOutboundSettings(TunnelPoolSettings settings);
|
||||
public void setInboundSettings(Hash client, TunnelPoolSettings settings);
|
||||
public void setOutboundSettings(Hash client, TunnelPoolSettings settings);
|
||||
}
|
||||
|
@ -343,7 +343,9 @@ public class ClientConnectionRunner {
|
||||
*/
|
||||
void requestLeaseSet(LeaseSet set, long expirationTime, Job onCreateJob, Job onFailedJob) {
|
||||
if (_dead) return;
|
||||
_context.jobQueue().addJob(new RequestLeaseSetJob(_context, this, set, expirationTime, onCreateJob, onFailedJob));
|
||||
if ( (_currentLeaseSet != null) && (_currentLeaseSet.equals(set)) )
|
||||
return; // no change
|
||||
_context.jobQueue().addJob(new RequestLeaseSetJob(_context, this, set, _context.clock().now() + expirationTime, onCreateJob, onFailedJob));
|
||||
}
|
||||
|
||||
void disconnected() {
|
||||
|
@ -222,7 +222,15 @@ public class ClientManager {
|
||||
runner.requestLeaseSet(set, _ctx.clock().now() + timeout, onCreateJob, onFailedJob);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private static final int REQUEST_LEASESET_TIMEOUT = 20*1000;
|
||||
public void requestLeaseSet(Hash dest, LeaseSet ls) {
|
||||
ClientConnectionRunner runner = getRunner(dest);
|
||||
if (runner != null) {
|
||||
// no need to fire off any jobs...
|
||||
runner.requestLeaseSet(ls, REQUEST_LEASESET_TIMEOUT, null, null);
|
||||
}
|
||||
}
|
||||
|
||||
public boolean isLocal(Destination dest) {
|
||||
boolean rv = false;
|
||||
@ -261,6 +269,15 @@ public class ClientManager {
|
||||
return false;
|
||||
}
|
||||
|
||||
public Set listClients() {
|
||||
Set rv = new HashSet();
|
||||
synchronized (_runners) {
|
||||
rv.addAll(_runners.keySet());
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
|
||||
|
||||
ClientConnectionRunner getRunner(Destination dest) {
|
||||
ClientConnectionRunner rv = null;
|
||||
long beforeLock = _ctx.clock().now();
|
||||
|
@ -10,7 +10,9 @@ package net.i2p.router.client;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Writer;
|
||||
import java.util.Collections;
|
||||
import java.util.Iterator;
|
||||
import java.util.Set;
|
||||
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.data.Destination;
|
||||
@ -112,6 +114,12 @@ public class ClientManagerFacadeImpl extends ClientManagerFacade {
|
||||
_log.error("Null manager on requestLeaseSet!");
|
||||
}
|
||||
|
||||
public void requestLeaseSet(Hash dest, LeaseSet set) {
|
||||
if (_manager != null)
|
||||
_manager.requestLeaseSet(dest, set);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Instruct the client (or all clients) that they are under attack. This call
|
||||
* does not block.
|
||||
@ -186,4 +194,16 @@ public class ClientManagerFacadeImpl extends ClientManagerFacade {
|
||||
if (_manager != null)
|
||||
_manager.renderStatusHTML(out);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the list of locally connected clients
|
||||
*
|
||||
* @return set of Destination objects
|
||||
*/
|
||||
public Set listClients() {
|
||||
if (_manager != null)
|
||||
return _manager.listClients();
|
||||
else
|
||||
return Collections.EMPTY_SET;
|
||||
}
|
||||
}
|
||||
|
@ -78,7 +78,8 @@ class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventLi
|
||||
handleDestroySession(reader, (DestroySessionMessage)message);
|
||||
break;
|
||||
default:
|
||||
_log.warn("Unhandled I2CP type received: " + message.getType());
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Unhandled I2CP type received: " + message.getType());
|
||||
}
|
||||
}
|
||||
|
||||
@ -88,7 +89,8 @@ class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventLi
|
||||
*/
|
||||
public void readError(I2CPMessageReader reader, Exception error) {
|
||||
if (_runner.isDead()) return;
|
||||
_log.error("Error occurred", error);
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Error occurred", error);
|
||||
_runner.stopRunning();
|
||||
}
|
||||
|
||||
@ -101,7 +103,8 @@ class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventLi
|
||||
try {
|
||||
_runner.doSend(new SetDateMessage());
|
||||
} catch (I2CPMessageException ime) {
|
||||
_log.error("Error writing out the setDate message", ime);
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Error writing out the setDate message", ime);
|
||||
}
|
||||
}
|
||||
private void handleSetDate(I2CPMessageReader reader, SetDateMessage message) {
|
||||
@ -118,7 +121,8 @@ class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventLi
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Signature verified correctly on create session message");
|
||||
} else {
|
||||
_log.error("Signature verification *FAILED* on a create session message. Hijack attempt?");
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Signature verification *FAILED* on a create session message. Hijack attempt?");
|
||||
_runner.disconnectClient("Invalid signature on CreateSessionMessage");
|
||||
return;
|
||||
}
|
||||
@ -152,12 +156,13 @@ class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventLi
|
||||
*
|
||||
*/
|
||||
private void handleSendMessage(I2CPMessageReader reader, SendMessageMessage message) {
|
||||
_log.debug("handleSendMessage called");
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("handleSendMessage called");
|
||||
long beforeDistribute = _context.clock().now();
|
||||
MessageId id = _runner.distributeMessage(message);
|
||||
long timeToDistribute = _context.clock().now() - beforeDistribute;
|
||||
_runner.ackSendMessage(id, message.getNonce());
|
||||
if (timeToDistribute > 50)
|
||||
if ( (timeToDistribute > 50) && (_log.shouldLog(Log.WARN)) )
|
||||
_log.warn("Took too long to distribute the message (which holds up the ack): " + timeToDistribute);
|
||||
}
|
||||
|
||||
@ -168,14 +173,16 @@ class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventLi
|
||||
*/
|
||||
private void handleReceiveBegin(I2CPMessageReader reader, ReceiveMessageBeginMessage message) {
|
||||
if (_runner.isDead()) return;
|
||||
_log.debug("Handling recieve begin: id = " + message.getMessageId());
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Handling recieve begin: id = " + message.getMessageId());
|
||||
MessagePayloadMessage msg = new MessagePayloadMessage();
|
||||
msg.setMessageId(message.getMessageId());
|
||||
msg.setSessionId(_runner.getSessionId());
|
||||
Payload payload = _runner.getPayload(message.getMessageId());
|
||||
if (payload == null) {
|
||||
_log.error("Payload for message id [" + message.getMessageId()
|
||||
+ "] is null! Unknown message id?");
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Payload for message id [" + message.getMessageId()
|
||||
+ "] is null! Unknown message id?");
|
||||
return;
|
||||
}
|
||||
msg.setPayload(payload);
|
||||
@ -197,17 +204,21 @@ class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventLi
|
||||
}
|
||||
|
||||
private void handleDestroySession(I2CPMessageReader reader, DestroySessionMessage message) {
|
||||
_log.info("Destroying client session " + _runner.getSessionId());
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Destroying client session " + _runner.getSessionId());
|
||||
_runner.stopRunning();
|
||||
}
|
||||
|
||||
private void handleCreateLeaseSet(I2CPMessageReader reader, CreateLeaseSetMessage message) {
|
||||
if ( (message.getLeaseSet() == null) || (message.getPrivateKey() == null) || (message.getSigningPrivateKey() == null) ) {
|
||||
_log.error("Null lease set granted: " + message);
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Null lease set granted: " + message);
|
||||
return;
|
||||
}
|
||||
|
||||
_log.info("New lease set granted for destination " + message.getLeaseSet().getDestination().calculateHash().toBase64());
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("New lease set granted for destination "
|
||||
+ message.getLeaseSet().getDestination().calculateHash().toBase64());
|
||||
_context.keyManager().registerKeys(message.getLeaseSet().getDestination(), message.getSigningPrivateKey(), message.getPrivateKey());
|
||||
_context.netDb().publish(message.getLeaseSet());
|
||||
|
||||
|
@ -27,8 +27,6 @@ class CreateSessionJob extends JobImpl {
|
||||
private Log _log;
|
||||
private ClientConnectionRunner _runner;
|
||||
|
||||
private final static long LEASE_CREATION_TIMEOUT = 30*1000;
|
||||
|
||||
public CreateSessionJob(RouterContext context, ClientConnectionRunner runner) {
|
||||
super(context);
|
||||
_log = context.logManager().getLog(CreateSessionJob.class);
|
||||
@ -65,6 +63,6 @@ class CreateSessionJob extends JobImpl {
|
||||
|
||||
// and load 'em up (using anything not yet set as the software defaults)
|
||||
settings.readFromProperties(props);
|
||||
getContext().tunnelManager().createTunnels(_runner.getConfig().getDestination(), settings, LEASE_CREATION_TIMEOUT);
|
||||
getContext().tunnelManager().buildTunnels(_runner.getConfig().getDestination(), settings);
|
||||
}
|
||||
}
|
||||
|
@ -52,7 +52,9 @@ class MessageReceivedJob extends JobImpl {
|
||||
*
|
||||
*/
|
||||
public void messageAvailable(MessageId id, long size) {
|
||||
_log.debug("Sending message available: " + id + " to sessionId " + _runner.getSessionId() + " (with nonce=1)", new Exception("available"));
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Sending message available: " + id + " to sessionId " + _runner.getSessionId()
|
||||
+ " (with nonce=1)", new Exception("available"));
|
||||
MessageStatusMessage msg = new MessageStatusMessage();
|
||||
msg.setMessageId(id);
|
||||
msg.setSessionId(_runner.getSessionId());
|
||||
@ -62,7 +64,8 @@ class MessageReceivedJob extends JobImpl {
|
||||
try {
|
||||
_runner.doSend(msg);
|
||||
} catch (I2CPMessageException ime) {
|
||||
_log.error("Error writing out the message status message", ime);
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Error writing out the message status message", ime);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -49,7 +49,8 @@ class RequestLeaseSetJob extends JobImpl {
|
||||
LeaseRequestState oldReq = _runner.getLeaseRequest();
|
||||
if (oldReq != null) {
|
||||
if (oldReq.getExpiration() > getContext().clock().now()) {
|
||||
_log.error("Old *current* leaseRequest already exists! Why are we trying to request too quickly?", getAddedBy());
|
||||
_log.info("request of a leaseSet is still active, wait a little bit before asking again");
|
||||
requeue(5*1000);
|
||||
return;
|
||||
} else {
|
||||
_log.error("Old *expired* leaseRequest exists! Why did the old request not get killed? (expiration = " + new Date(oldReq.getExpiration()) + ")", getAddedBy());
|
||||
@ -70,7 +71,7 @@ class RequestLeaseSetJob extends JobImpl {
|
||||
msg.setSessionId(_runner.getSessionId());
|
||||
|
||||
for (int i = 0; i < state.getRequested().getLeaseCount(); i++) {
|
||||
msg.addEndpoint(state.getRequested().getLease(i).getRouterIdentity(), state.getRequested().getLease(i).getTunnelId());
|
||||
msg.addEndpoint(state.getRequested().getLease(i).getGateway(), state.getRequested().getLease(i).getTunnelId());
|
||||
}
|
||||
|
||||
try {
|
||||
|
@ -141,7 +141,7 @@ public class BuildTestMessageJob extends JobImpl {
|
||||
ackInstructions.setEncrypted(false);
|
||||
|
||||
DeliveryStatusMessage msg = new DeliveryStatusMessage(getContext());
|
||||
msg.setArrival(new Date(getContext().clock().now()));
|
||||
msg.setArrival(getContext().clock().now());
|
||||
msg.setMessageId(_testMessageKey);
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Delivery status message key: " + _testMessageKey + " arrival: " + msg.getArrival());
|
||||
|
@ -107,8 +107,7 @@ public class GarlicMessageBuilder {
|
||||
|
||||
byte encData[] = ctx.elGamalAESEngine().encrypt(cloveSet, target, encryptKey, wrappedTags, encryptTag, 128);
|
||||
msg.setData(encData);
|
||||
Date exp = new Date(config.getExpiration());
|
||||
msg.setMessageExpiration(exp);
|
||||
msg.setMessageExpiration(config.getExpiration());
|
||||
|
||||
if (log.shouldLog(Log.WARN))
|
||||
log.warn("CloveSet size for message " + msg.getUniqueId() + " is " + cloveSet.length
|
||||
@ -133,33 +132,42 @@ public class GarlicMessageBuilder {
|
||||
*
|
||||
*/
|
||||
private static byte[] buildCloveSet(RouterContext ctx, GarlicConfig config) {
|
||||
ByteArrayOutputStream baos = new ByteArrayOutputStream(1024);
|
||||
ByteArrayOutputStream baos = null;
|
||||
Log log = ctx.logManager().getLog(GarlicMessageBuilder.class);
|
||||
try {
|
||||
if (config instanceof PayloadGarlicConfig) {
|
||||
byte clove[] = buildClove(ctx, (PayloadGarlicConfig)config);
|
||||
baos = new ByteArrayOutputStream(clove.length + 16);
|
||||
DataHelper.writeLong(baos, 1, 1);
|
||||
baos.write(buildClove(ctx, (PayloadGarlicConfig)config));
|
||||
baos.write(clove);
|
||||
} else {
|
||||
DataHelper.writeLong(baos, 1, config.getCloveCount());
|
||||
byte cloves[][] = new byte[config.getCloveCount()][];
|
||||
for (int i = 0; i < config.getCloveCount(); i++) {
|
||||
GarlicConfig c = config.getClove(i);
|
||||
byte clove[] = null;
|
||||
if (c instanceof PayloadGarlicConfig) {
|
||||
log.debug("Subclove IS a payload garlic clove");
|
||||
clove = buildClove(ctx, (PayloadGarlicConfig)c);
|
||||
cloves[i] = buildClove(ctx, (PayloadGarlicConfig)c);
|
||||
} else {
|
||||
log.debug("Subclove IS NOT a payload garlic clove");
|
||||
clove = buildClove(ctx, c);
|
||||
cloves[i] = buildClove(ctx, c);
|
||||
}
|
||||
if (clove == null)
|
||||
if (cloves[i] == null)
|
||||
throw new DataFormatException("Unable to build clove");
|
||||
else
|
||||
baos.write(clove);
|
||||
}
|
||||
|
||||
int len = 1;
|
||||
for (int i = 0; i < cloves.length; i++)
|
||||
len += cloves[i].length;
|
||||
baos = new ByteArrayOutputStream(len + 16);
|
||||
DataHelper.writeLong(baos, 1, cloves.length);
|
||||
for (int i = 0; i < cloves.length; i++)
|
||||
baos.write(cloves[i]);
|
||||
}
|
||||
if (baos == null)
|
||||
new ByteArrayOutputStream(16);
|
||||
config.getCertificate().writeBytes(baos);
|
||||
DataHelper.writeLong(baos, 4, config.getId());
|
||||
DataHelper.writeDate(baos, new Date(config.getExpiration()));
|
||||
DataHelper.writeLong(baos, DataHelper.DATE_LENGTH, config.getExpiration());
|
||||
} catch (IOException ioe) {
|
||||
log.error("Error building the clove set", ioe);
|
||||
} catch (DataFormatException dfe) {
|
||||
@ -189,7 +197,8 @@ public class GarlicMessageBuilder {
|
||||
clove.setCloveId(config.getId());
|
||||
clove.setExpiration(new Date(config.getExpiration()));
|
||||
clove.setInstructions(config.getDeliveryInstructions());
|
||||
ByteArrayOutputStream baos = new ByteArrayOutputStream(1024);
|
||||
int size = clove.estimateSize();
|
||||
ByteArrayOutputStream baos = new ByteArrayOutputStream(size);
|
||||
clove.writeBytes(baos);
|
||||
return baos.toByteArray();
|
||||
}
|
||||
|
@ -18,8 +18,11 @@ import java.util.Set;
|
||||
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.RouterIdentity;
|
||||
import net.i2p.data.i2np.DeliveryInstructions;
|
||||
import net.i2p.data.i2np.GarlicClove;
|
||||
import net.i2p.data.i2np.GarlicMessage;
|
||||
import net.i2p.data.i2np.I2NPMessage;
|
||||
import net.i2p.data.i2np.TunnelGatewayMessage;
|
||||
import net.i2p.router.JobImpl;
|
||||
import net.i2p.router.LeaseSetKeys;
|
||||
import net.i2p.router.Router;
|
||||
@ -33,13 +36,13 @@ import net.i2p.util.Log;
|
||||
* need to be. soon)
|
||||
*
|
||||
*/
|
||||
public class HandleGarlicMessageJob extends JobImpl {
|
||||
public class HandleGarlicMessageJob extends JobImpl implements GarlicMessageReceiver.CloveReceiver {
|
||||
private Log _log;
|
||||
private GarlicMessage _message;
|
||||
private RouterIdentity _from;
|
||||
private Hash _fromHash;
|
||||
private Map _cloves; // map of clove Id --> Expiration of cloves we've already seen
|
||||
private MessageHandler _handler;
|
||||
//private MessageHandler _handler;
|
||||
private GarlicMessageParser _parser;
|
||||
|
||||
private final static int FORWARD_PRIORITY = 50;
|
||||
@ -54,126 +57,56 @@ public class HandleGarlicMessageJob extends JobImpl {
|
||||
_from = from;
|
||||
_fromHash = fromHash;
|
||||
_cloves = new HashMap();
|
||||
_handler = new MessageHandler(context);
|
||||
//_handler = new MessageHandler(context);
|
||||
_parser = new GarlicMessageParser(context);
|
||||
}
|
||||
|
||||
public String getName() { return "Handle Inbound Garlic Message"; }
|
||||
public void runJob() {
|
||||
CloveSet set = _parser.getGarlicCloves(_message, getContext().keyManager().getPrivateKey());
|
||||
if (set == null) {
|
||||
Set keys = getContext().keyManager().getAllKeys();
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Decryption with the router's key failed, now try with the " + keys.size() + " leaseSet keys");
|
||||
// our router key failed, which means that it was either encrypted wrong
|
||||
// or it was encrypted to a LeaseSet's PublicKey
|
||||
for (Iterator iter = keys.iterator(); iter.hasNext();) {
|
||||
LeaseSetKeys lskeys = (LeaseSetKeys)iter.next();
|
||||
set = _parser.getGarlicCloves(_message, lskeys.getDecryptionKey());
|
||||
if (set != null) {
|
||||
GarlicMessageReceiver recv = new GarlicMessageReceiver(getContext(), this);
|
||||
recv.receive(_message);
|
||||
}
|
||||
|
||||
public void handleClove(DeliveryInstructions instructions, I2NPMessage data) {
|
||||
switch (instructions.getDeliveryMode()) {
|
||||
case DeliveryInstructions.DELIVERY_MODE_LOCAL:
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("local delivery instructions for clove: " + data);
|
||||
getContext().inNetMessagePool().add(data, null, null);
|
||||
return;
|
||||
case DeliveryInstructions.DELIVERY_MODE_DESTINATION:
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("this message didn't come down a tunnel, not forwarding to a destination: "
|
||||
+ instructions + " - " + data);
|
||||
return;
|
||||
case DeliveryInstructions.DELIVERY_MODE_ROUTER:
|
||||
if (getContext().routerHash().equals(instructions.getRouter())) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Decrypted garlic message with lease set key for destination "
|
||||
+ lskeys.getDestination().calculateHash().toBase64() + " SUCCEEDED: " + set);
|
||||
break;
|
||||
_log.debug("router delivery instructions targetting us");
|
||||
getContext().inNetMessagePool().add(data, null, null);
|
||||
} else {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Decrypting garlic message with lease set key for destination "
|
||||
+ lskeys.getDestination().calculateHash().toBase64() + " failed");
|
||||
_log.debug("router delivery instructions targetting "
|
||||
+ instructions.getRouter().toBase64().substring(0,4));
|
||||
SendMessageDirectJob j = new SendMessageDirectJob(getContext(), data,
|
||||
instructions.getRouter(),
|
||||
10*1000, 100);
|
||||
getContext().jobQueue().addJob(j);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Decrypted clove set found " + set.getCloveCount() + " cloves: " + set);
|
||||
return;
|
||||
case DeliveryInstructions.DELIVERY_MODE_TUNNEL:
|
||||
TunnelGatewayMessage gw = new TunnelGatewayMessage(getContext());
|
||||
gw.setMessage(data);
|
||||
gw.setTunnelId(instructions.getTunnelId());
|
||||
gw.setMessageExpiration(data.getMessageExpiration());
|
||||
getContext().jobQueue().addJob(new SendMessageDirectJob(getContext(), gw,
|
||||
instructions.getRouter(),
|
||||
10*1000, 100));
|
||||
return;
|
||||
default:
|
||||
_log.error("Unknown instruction " + instructions.getDeliveryMode() + ": " + instructions);
|
||||
return;
|
||||
}
|
||||
if (set != null) {
|
||||
for (int i = 0; i < set.getCloveCount(); i++) {
|
||||
GarlicClove clove = set.getClove(i);
|
||||
handleClove(clove);
|
||||
}
|
||||
} else {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("CloveMessageParser failed to decrypt the message [" + _message.getUniqueId()
|
||||
+ "] to us when received from [" + _fromHash + "] / [" + _from + "]",
|
||||
new Exception("Decrypt garlic failed"));
|
||||
getContext().statManager().addRateData("crypto.garlic.decryptFail", 1, 0);
|
||||
getContext().messageHistory().messageProcessingError(_message.getUniqueId(),
|
||||
_message.getClass().getName(),
|
||||
"Garlic could not be decrypted");
|
||||
}
|
||||
}
|
||||
|
||||
private boolean isKnown(long cloveId) {
|
||||
boolean known = false;
|
||||
synchronized (_cloves) {
|
||||
known = _cloves.containsKey(new Long(cloveId));
|
||||
}
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("isKnown("+cloveId+"): " + known);
|
||||
return known;
|
||||
}
|
||||
|
||||
private void cleanupCloves() {
|
||||
// this should be in its own thread perhaps? and maybe _cloves should be
|
||||
// synced to disk?
|
||||
List toRemove = new ArrayList(32);
|
||||
long now = getContext().clock().now();
|
||||
synchronized (_cloves) {
|
||||
for (Iterator iter = _cloves.keySet().iterator(); iter.hasNext();) {
|
||||
Long id = (Long)iter.next();
|
||||
Date exp = (Date)_cloves.get(id);
|
||||
if (exp == null) continue; // wtf, not sure how this can happen yet, but i've seen it. grr.
|
||||
if (now > exp.getTime())
|
||||
toRemove.add(id);
|
||||
}
|
||||
for (int i = 0; i < toRemove.size(); i++)
|
||||
_cloves.remove(toRemove.get(i));
|
||||
}
|
||||
}
|
||||
|
||||
private boolean isValid(GarlicClove clove) {
|
||||
if (isKnown(clove.getCloveId())) {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Duplicate garlic clove received - replay attack in progress? [cloveId = "
|
||||
+ clove.getCloveId() + " expiration = " + clove.getExpiration());
|
||||
return false;
|
||||
} else {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Clove " + clove.getCloveId() + " expiring on " + clove.getExpiration()
|
||||
+ " is not known");
|
||||
}
|
||||
long now = getContext().clock().now();
|
||||
if (clove.getExpiration().getTime() < now) {
|
||||
if (clove.getExpiration().getTime() < now + Router.CLOCK_FUDGE_FACTOR) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Expired garlic received, but within our fudge factor ["
|
||||
+ clove.getExpiration() + "]");
|
||||
} else {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Expired garlic clove received - replay attack in progress? [cloveId = "
|
||||
+ clove.getCloveId() + " expiration = " + clove.getExpiration()
|
||||
+ " now = " + (new Date(getContext().clock().now())));
|
||||
return false;
|
||||
}
|
||||
}
|
||||
synchronized (_cloves) {
|
||||
_cloves.put(new Long(clove.getCloveId()), clove.getExpiration());
|
||||
}
|
||||
cleanupCloves();
|
||||
return true;
|
||||
}
|
||||
|
||||
private void handleClove(GarlicClove clove) {
|
||||
if (!isValid(clove)) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.warn("Invalid clove " + clove);
|
||||
return;
|
||||
}
|
||||
long sendExpiration = clove.getExpiration().getTime();
|
||||
// if the clove targets something remote, tunnel route it
|
||||
boolean sendDirect = false;
|
||||
_handler.handleMessage(clove.getInstructions(), clove.getData(),
|
||||
clove.getCloveId(), _from, _fromHash,
|
||||
sendExpiration, FORWARD_PRIORITY, sendDirect);
|
||||
}
|
||||
|
||||
public void dropped() {
|
||||
|
@ -52,38 +52,45 @@ class OutboundClientMessageJobHelper {
|
||||
*
|
||||
* @param bundledReplyLeaseSet if specified, the given LeaseSet will be packaged with the message (allowing
|
||||
* much faster replies, since their netDb search will return almost instantly)
|
||||
* @return garlic, or null if no tunnels were found (or other errors)
|
||||
*/
|
||||
static GarlicMessage createGarlicMessage(RouterContext ctx, long replyToken, long expiration, PublicKey recipientPK,
|
||||
Payload data, Destination dest, SessionKey wrappedKey, Set wrappedTags,
|
||||
Payload data, Hash from, Destination dest, SessionKey wrappedKey, Set wrappedTags,
|
||||
boolean requireAck, LeaseSet bundledReplyLeaseSet) {
|
||||
PayloadGarlicConfig dataClove = buildDataClove(ctx, data, dest, expiration);
|
||||
return createGarlicMessage(ctx, replyToken, expiration, recipientPK, dataClove, dest, wrappedKey,
|
||||
return createGarlicMessage(ctx, replyToken, expiration, recipientPK, dataClove, from, dest, wrappedKey,
|
||||
wrappedTags, requireAck, bundledReplyLeaseSet);
|
||||
}
|
||||
/**
|
||||
* Allow the app to specify the data clove directly, which enables OutboundClientMessage to resend the
|
||||
* same payload (including expiration and unique id) in different garlics (down different tunnels)
|
||||
*
|
||||
* @return garlic, or null if no tunnels were found (or other errors)
|
||||
*/
|
||||
static GarlicMessage createGarlicMessage(RouterContext ctx, long replyToken, long expiration, PublicKey recipientPK,
|
||||
PayloadGarlicConfig dataClove, Destination dest, SessionKey wrappedKey,
|
||||
PayloadGarlicConfig dataClove, Hash from, Destination dest, SessionKey wrappedKey,
|
||||
Set wrappedTags, boolean requireAck, LeaseSet bundledReplyLeaseSet) {
|
||||
GarlicConfig config = createGarlicConfig(ctx, replyToken, expiration, recipientPK, dataClove, dest, requireAck, bundledReplyLeaseSet);
|
||||
GarlicConfig config = createGarlicConfig(ctx, replyToken, expiration, recipientPK, dataClove, from, dest, requireAck, bundledReplyLeaseSet);
|
||||
if (config == null)
|
||||
return null;
|
||||
GarlicMessage msg = GarlicMessageBuilder.buildMessage(ctx, config, wrappedKey, wrappedTags);
|
||||
return msg;
|
||||
}
|
||||
|
||||
private static GarlicConfig createGarlicConfig(RouterContext ctx, long replyToken, long expiration, PublicKey recipientPK,
|
||||
PayloadGarlicConfig dataClove, Destination dest, boolean requireAck,
|
||||
PayloadGarlicConfig dataClove, Hash from, Destination dest, boolean requireAck,
|
||||
LeaseSet bundledReplyLeaseSet) {
|
||||
Log log = ctx.logManager().getLog(OutboundClientMessageJobHelper.class);
|
||||
log.debug("Reply token: " + replyToken);
|
||||
if (log.shouldLog(Log.DEBUG))
|
||||
log.debug("Reply token: " + replyToken);
|
||||
GarlicConfig config = new GarlicConfig();
|
||||
|
||||
config.addClove(dataClove);
|
||||
|
||||
if (requireAck) {
|
||||
PayloadGarlicConfig ackClove = buildAckClove(ctx, replyToken, expiration);
|
||||
PayloadGarlicConfig ackClove = buildAckClove(ctx, from, replyToken, expiration);
|
||||
if (ackClove == null)
|
||||
return null; // no tunnels
|
||||
config.addClove(ackClove);
|
||||
}
|
||||
|
||||
@ -108,7 +115,9 @@ class OutboundClientMessageJobHelper {
|
||||
config.setRecipientPublicKey(recipientPK);
|
||||
config.setRequestAck(false);
|
||||
|
||||
log.info("Creating garlic config to be encrypted to " + recipientPK + " for destination " + dest.calculateHash().toBase64());
|
||||
if (log.shouldLog(Log.INFO))
|
||||
log.info("Creating garlic config to be encrypted to " + recipientPK
|
||||
+ " for destination " + dest.calculateHash().toBase64());
|
||||
|
||||
return config;
|
||||
}
|
||||
@ -116,28 +125,25 @@ class OutboundClientMessageJobHelper {
|
||||
/**
|
||||
* Build a clove that sends a DeliveryStatusMessage to us
|
||||
*/
|
||||
private static PayloadGarlicConfig buildAckClove(RouterContext ctx, long replyToken, long expiration) {
|
||||
private static PayloadGarlicConfig buildAckClove(RouterContext ctx, Hash from, long replyToken, long expiration) {
|
||||
Log log = ctx.logManager().getLog(OutboundClientMessageJobHelper.class);
|
||||
PayloadGarlicConfig ackClove = new PayloadGarlicConfig();
|
||||
|
||||
Hash replyToTunnelRouter = null; // inbound tunnel gateway
|
||||
TunnelId replyToTunnelId = null; // tunnel id on that gateway
|
||||
|
||||
TunnelSelectionCriteria criteria = new TunnelSelectionCriteria();
|
||||
criteria.setMaximumTunnelsRequired(1);
|
||||
criteria.setMinimumTunnelsRequired(1);
|
||||
criteria.setReliabilityPriority(50); // arbitrary. fixme
|
||||
criteria.setAnonymityPriority(50); // arbitrary. fixme
|
||||
criteria.setLatencyPriority(50); // arbitrary. fixme
|
||||
List tunnelIds = ctx.tunnelManager().selectInboundTunnelIds(criteria);
|
||||
if (tunnelIds.size() <= 0) {
|
||||
log.error("No inbound tunnels to receive an ack through!?");
|
||||
TunnelInfo replyToTunnel = ctx.tunnelManager().selectInboundTunnel(from);
|
||||
if (replyToTunnel == null) {
|
||||
if (log.shouldLog(Log.ERROR))
|
||||
log.error("Unable to send client message from " + from.toBase64()
|
||||
+ ", as there are no inbound tunnels available");
|
||||
return null;
|
||||
}
|
||||
replyToTunnelId = (TunnelId)tunnelIds.get(0);
|
||||
TunnelInfo info = ctx.tunnelManager().getTunnelInfo(replyToTunnelId);
|
||||
replyToTunnelRouter = info.getThisHop(); // info is the chain, and the first hop is the gateway
|
||||
log.debug("Ack for the data message will come back along tunnel " + replyToTunnelId + ":\n" + info);
|
||||
replyToTunnelId = replyToTunnel.getReceiveTunnelId(0);
|
||||
replyToTunnelRouter = replyToTunnel.getPeer(0);
|
||||
if (log.shouldLog(Log.DEBUG))
|
||||
log.debug("Ack for the data message will come back along tunnel " + replyToTunnelId
|
||||
+ ":\n" + replyToTunnel);
|
||||
|
||||
DeliveryInstructions ackInstructions = new DeliveryInstructions();
|
||||
ackInstructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_TUNNEL);
|
||||
@ -148,9 +154,10 @@ class OutboundClientMessageJobHelper {
|
||||
ackInstructions.setEncrypted(false);
|
||||
|
||||
DeliveryStatusMessage msg = new DeliveryStatusMessage(ctx);
|
||||
msg.setArrival(new Date(ctx.clock().now()));
|
||||
msg.setArrival(ctx.clock().now());
|
||||
msg.setMessageId(replyToken);
|
||||
log.debug("Delivery status message key: " + replyToken + " arrival: " + msg.getArrival());
|
||||
if (log.shouldLog(Log.DEBUG))
|
||||
log.debug("Delivery status message key: " + replyToken + " arrival: " + msg.getArrival());
|
||||
|
||||
ackClove.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null));
|
||||
ackClove.setDeliveryInstructions(ackInstructions);
|
||||
@ -160,7 +167,11 @@ class OutboundClientMessageJobHelper {
|
||||
ackClove.setRecipient(ctx.router().getRouterInfo());
|
||||
ackClove.setRequestAck(false);
|
||||
|
||||
log.debug("Delivery status message is targetting us [" + ackClove.getRecipient().getIdentity().getHash().toBase64() + "] via tunnel " + replyToTunnelId.getTunnelId() + " on " + replyToTunnelRouter.toBase64());
|
||||
if (log.shouldLog(Log.DEBUG))
|
||||
log.debug("Delivery status message is targetting us ["
|
||||
+ ackClove.getRecipient().getIdentity().getHash().toBase64()
|
||||
+ "] via tunnel " + replyToTunnelId.getTunnelId() + " on "
|
||||
+ replyToTunnelRouter.toBase64());
|
||||
|
||||
return ackClove;
|
||||
}
|
||||
@ -211,7 +222,7 @@ class OutboundClientMessageJobHelper {
|
||||
clove.setId(ctx.random().nextLong(I2NPMessage.MAX_ID_VALUE));
|
||||
DatabaseStoreMessage msg = new DatabaseStoreMessage(ctx);
|
||||
msg.setLeaseSet(replyLeaseSet);
|
||||
msg.setMessageExpiration(new Date(expiration));
|
||||
msg.setMessageExpiration(expiration);
|
||||
msg.setKey(replyLeaseSet.getDestination().calculateHash());
|
||||
clove.setPayload(msg);
|
||||
clove.setRecipientPublicKey(null);
|
||||
|
@ -34,7 +34,7 @@ import net.i2p.router.JobImpl;
|
||||
import net.i2p.router.ReplyJob;
|
||||
import net.i2p.router.Router;
|
||||
import net.i2p.router.RouterContext;
|
||||
import net.i2p.router.TunnelSelectionCriteria;
|
||||
import net.i2p.router.TunnelInfo;
|
||||
import net.i2p.router.MessageSelector;
|
||||
|
||||
import net.i2p.util.Log;
|
||||
@ -147,9 +147,16 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug(getJobId() + ": Clove built");
|
||||
long timeoutMs = _overallExpiration - getContext().clock().now();
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug(getJobId() + ": preparing to search for the leaseSet");
|
||||
Hash key = _to.calculateHash();
|
||||
SendJob success = new SendJob(getContext());
|
||||
LookupLeaseSetFailedJob failed = new LookupLeaseSetFailedJob(getContext());
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug(getJobId() + ": Send outbound client message - sending off leaseSet lookup job");
|
||||
getContext().netDb().lookupLeaseSet(_to.calculateHash(), new SendJob(getContext()), new LookupLeaseSetFailedJob(getContext()), timeoutMs);
|
||||
getContext().netDb().lookupLeaseSet(key, success, failed, timeoutMs);
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug(getJobId() + ": after sending off leaseSet lookup job");
|
||||
}
|
||||
|
||||
private boolean getShouldBundle() {
|
||||
@ -281,10 +288,17 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
|
||||
GarlicMessage msg = OutboundClientMessageJobHelper.createGarlicMessage(getContext(), token,
|
||||
_overallExpiration, key,
|
||||
_clove,
|
||||
_clove, _from.calculateHash(),
|
||||
_to,
|
||||
sessKey, tags,
|
||||
true, replyLeaseSet);
|
||||
if (msg == null) {
|
||||
// set to null if there are no tunnels to ack the reply back through
|
||||
// (should we always fail for this? or should we send it anyway, even if
|
||||
// we dont receive the reply? hmm...)
|
||||
dieFatal();
|
||||
return;
|
||||
}
|
||||
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug(getJobId() + ": send() - token expected " + token);
|
||||
@ -296,21 +310,17 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug(getJobId() + ": Placing GarlicMessage into the new tunnel message bound for "
|
||||
+ _lease.getTunnelId() + " on "
|
||||
+ _lease.getRouterIdentity().getHash().toBase64());
|
||||
+ _lease.getGateway().toBase64());
|
||||
|
||||
TunnelId outTunnelId = selectOutboundTunnel();
|
||||
if (outTunnelId != null) {
|
||||
TunnelInfo outTunnel = selectOutboundTunnel();
|
||||
if (outTunnel != null) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug(getJobId() + ": Sending tunnel message out " + outTunnelId + " to "
|
||||
_log.debug(getJobId() + ": Sending tunnel message out " + outTunnel.getSendTunnelId(0) + " to "
|
||||
+ _lease.getTunnelId() + " on "
|
||||
+ _lease.getRouterIdentity().getHash().toBase64());
|
||||
SendTunnelMessageJob j = new SendTunnelMessageJob(getContext(), msg, outTunnelId,
|
||||
_lease.getRouterIdentity().getHash(),
|
||||
_lease.getTunnelId(), null, onReply,
|
||||
onFail, selector,
|
||||
_overallExpiration-getContext().clock().now(),
|
||||
SEND_PRIORITY);
|
||||
getContext().jobQueue().addJob(j);
|
||||
+ _lease.getGateway().toBase64());
|
||||
|
||||
// dispatch may take 100+ms, so toss it in its own job
|
||||
getContext().jobQueue().addJob(new DispatchJob(getContext(), msg, outTunnel, selector, onReply, onFail, (int)(_overallExpiration-getContext().clock().now())));
|
||||
} else {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error(getJobId() + ": Could not find any outbound tunnels to send the payload through... wtf?");
|
||||
@ -319,21 +329,38 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
_clientMessage = null;
|
||||
_clove = null;
|
||||
}
|
||||
|
||||
private class DispatchJob extends JobImpl {
|
||||
private GarlicMessage _msg;
|
||||
private TunnelInfo _outTunnel;
|
||||
private ReplySelector _selector;
|
||||
private SendSuccessJob _replyFound;
|
||||
private SendTimeoutJob _replyTimeout;
|
||||
private int _timeoutMs;
|
||||
public DispatchJob(RouterContext ctx, GarlicMessage msg, TunnelInfo out, ReplySelector sel, SendSuccessJob success, SendTimeoutJob timeout, int timeoutMs) {
|
||||
super(ctx);
|
||||
_msg = msg;
|
||||
_outTunnel = out;
|
||||
_selector = sel;
|
||||
_replyFound = success;
|
||||
_replyTimeout = timeout;
|
||||
_timeoutMs = timeoutMs;
|
||||
}
|
||||
public String getName() { return "Dispatch outbound client message"; }
|
||||
public void runJob() {
|
||||
getContext().messageRegistry().registerPending(_selector, _replyFound, _replyTimeout, _timeoutMs);
|
||||
getContext().tunnelDispatcher().dispatchOutbound(_msg, _outTunnel.getSendTunnelId(0), _lease.getTunnelId(), _lease.getGateway());
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Pick an arbitrary outbound tunnel to send the message through, or null if
|
||||
* there aren't any around
|
||||
*
|
||||
*/
|
||||
private TunnelId selectOutboundTunnel() {
|
||||
TunnelSelectionCriteria crit = new TunnelSelectionCriteria();
|
||||
crit.setMaximumTunnelsRequired(1);
|
||||
crit.setMinimumTunnelsRequired(1);
|
||||
List tunnelIds = getContext().tunnelManager().selectOutboundTunnelIds(crit);
|
||||
if (tunnelIds.size() <= 0)
|
||||
return null;
|
||||
else
|
||||
return (TunnelId)tunnelIds.get(0);
|
||||
private TunnelInfo selectOutboundTunnel() {
|
||||
return getContext().tunnelManager().selectOutboundTunnel(_from.calculateHash());
|
||||
}
|
||||
|
||||
/**
|
||||
@ -405,13 +432,24 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
private long _pendingToken;
|
||||
public ReplySelector(long token) {
|
||||
_pendingToken = token;
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info(OutboundClientMessageOneShotJob.this.getJobId()
|
||||
+ "Reply selector for client message: token=" + token);
|
||||
}
|
||||
|
||||
public boolean continueMatching() { return false; }
|
||||
public boolean continueMatching() {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug(OutboundClientMessageOneShotJob.this.getJobId()
|
||||
+ "dont continue matching for token=" + _pendingToken);
|
||||
return false;
|
||||
}
|
||||
public long getExpiration() { return _overallExpiration; }
|
||||
|
||||
public boolean isMatch(I2NPMessage inMsg) {
|
||||
if (inMsg.getType() == DeliveryStatusMessage.MESSAGE_TYPE) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info(OutboundClientMessageOneShotJob.this.getJobId()
|
||||
+ "delivery status message received: " + inMsg + " our token: " + _pendingToken);
|
||||
return _pendingToken == ((DeliveryStatusMessage)inMsg).getMessageId();
|
||||
} else {
|
||||
return false;
|
||||
@ -439,7 +477,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
_tags = tags;
|
||||
}
|
||||
|
||||
public String getName() { return "Send client message successful to a lease"; }
|
||||
public String getName() { return "Send client message successful"; }
|
||||
public void runJob() {
|
||||
if (_finished) return;
|
||||
_finished = true;
|
||||
@ -477,7 +515,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
super(enclosingContext);
|
||||
}
|
||||
|
||||
public String getName() { return "Send client message timed out through a lease"; }
|
||||
public String getName() { return "Send client message timed out"; }
|
||||
public void runJob() {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug(OutboundClientMessageOneShotJob.this.getJobId()
|
||||
|
@ -80,9 +80,11 @@ public class SendGarlicJob extends JobImpl {
|
||||
_message = GarlicMessageBuilder.buildMessage(getContext(), _config, _wrappedKey, _wrappedTags);
|
||||
long after = getContext().clock().now();
|
||||
if ( (after - before) > 1000) {
|
||||
_log.warn("Building the garlic took too long [" + (after-before)+" ms]", getAddedBy());
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Building the garlic took too long [" + (after-before)+" ms]", getAddedBy());
|
||||
} else {
|
||||
_log.debug("Building the garlic was fast! " + (after - before) + " ms");
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Building the garlic was fast! " + (after - before) + " ms");
|
||||
}
|
||||
getContext().jobQueue().addJob(new SendJob(getContext()));
|
||||
}
|
||||
@ -103,7 +105,7 @@ public class SendGarlicJob extends JobImpl {
|
||||
|
||||
private void sendGarlic() {
|
||||
OutNetMessage msg = new OutNetMessage(getContext());
|
||||
long when = _message.getMessageExpiration().getTime(); // + Router.CLOCK_FUDGE_FACTOR;
|
||||
long when = _message.getMessageExpiration(); // + Router.CLOCK_FUDGE_FACTOR;
|
||||
msg.setExpiration(when);
|
||||
msg.setMessage(_message);
|
||||
msg.setOnFailedReplyJob(_onReplyFailed);
|
||||
|
@ -13,7 +13,6 @@ import java.util.Date;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.RouterInfo;
|
||||
import net.i2p.data.i2np.I2NPMessage;
|
||||
import net.i2p.router.InNetMessage;
|
||||
import net.i2p.router.Job;
|
||||
import net.i2p.router.JobImpl;
|
||||
import net.i2p.router.MessageSelector;
|
||||
@ -141,10 +140,7 @@ public class SendMessageDirectJob extends JobImpl {
|
||||
if (_onSend != null)
|
||||
getContext().jobQueue().addJob(_onSend);
|
||||
|
||||
InNetMessage msg = new InNetMessage(getContext());
|
||||
msg.setFromRouter(_router.getIdentity());
|
||||
msg.setMessage(_message);
|
||||
getContext().inNetMessagePool().add(msg);
|
||||
getContext().inNetMessagePool().add(_message, _router.getIdentity(), null);
|
||||
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Adding " + _message.getClass().getName()
|
||||
|
@ -25,13 +25,12 @@ import net.i2p.data.i2np.DatabaseLookupMessage;
|
||||
import net.i2p.data.i2np.DatabaseSearchReplyMessage;
|
||||
import net.i2p.data.i2np.DatabaseStoreMessage;
|
||||
import net.i2p.data.i2np.I2NPMessage;
|
||||
import net.i2p.data.i2np.TunnelMessage;
|
||||
import net.i2p.data.i2np.TunnelGatewayMessage;
|
||||
import net.i2p.router.Job;
|
||||
import net.i2p.router.JobImpl;
|
||||
import net.i2p.router.RouterContext;
|
||||
import net.i2p.router.TunnelInfo;
|
||||
import net.i2p.router.message.SendMessageDirectJob;
|
||||
import net.i2p.router.message.SendTunnelMessageJob;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
/**
|
||||
@ -158,17 +157,21 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
|
||||
}
|
||||
|
||||
private void sendThroughTunnel(I2NPMessage message, Hash toPeer, TunnelId replyTunnel) {
|
||||
TunnelInfo info = getContext().tunnelManager().getTunnelInfo(replyTunnel);
|
||||
|
||||
// the sendTunnelMessageJob can't handle injecting into the tunnel anywhere but the beginning
|
||||
// (and if we are the beginning, we have the signing key)
|
||||
if ( (info == null) || (info.getSigningKey() != null)) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Sending reply through " + replyTunnel + " on " + toPeer);
|
||||
getContext().jobQueue().addJob(new SendTunnelMessageJob(getContext(), message, replyTunnel, toPeer, null, null, null, null, null, REPLY_TIMEOUT, MESSAGE_PRIORITY));
|
||||
if (getContext().routerHash().equals(toPeer)) {
|
||||
// if we are the gateway, act as if we received it
|
||||
TunnelGatewayMessage m = new TunnelGatewayMessage(getContext());
|
||||
m.setMessage(message);
|
||||
m.setTunnelId(replyTunnel);
|
||||
m.setMessageExpiration(message.getMessageExpiration());
|
||||
getContext().tunnelDispatcher().dispatch(m);
|
||||
} else {
|
||||
// its a tunnel we're participating in, but we're NOT the gateway, so
|
||||
sendToGateway(message, toPeer, replyTunnel, info);
|
||||
// if we aren't the gateway, forward it on
|
||||
TunnelGatewayMessage m = new TunnelGatewayMessage(getContext());
|
||||
m.setMessage(message);
|
||||
m.setMessageExpiration(message.getMessageExpiration());
|
||||
m.setTunnelId(replyTunnel);
|
||||
SendMessageDirectJob j = new SendMessageDirectJob(getContext(), m, toPeer, 10*1000, 100);
|
||||
getContext().jobQueue().addJob(j);
|
||||
}
|
||||
}
|
||||
|
||||
@ -184,14 +187,14 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
|
||||
|
||||
long expiration = REPLY_TIMEOUT + getContext().clock().now();
|
||||
|
||||
TunnelMessage msg = new TunnelMessage(getContext());
|
||||
msg.setData(message.toByteArray());
|
||||
TunnelGatewayMessage msg = new TunnelGatewayMessage(getContext());
|
||||
msg.setMessage(message);
|
||||
msg.setTunnelId(replyTunnel);
|
||||
msg.setMessageExpiration(new Date(expiration));
|
||||
msg.setMessageExpiration(expiration);
|
||||
getContext().jobQueue().addJob(new SendMessageDirectJob(getContext(), msg, toPeer, null, null, null, null, REPLY_TIMEOUT, MESSAGE_PRIORITY));
|
||||
|
||||
String bodyType = message.getClass().getName();
|
||||
getContext().messageHistory().wrap(bodyType, message.getUniqueId(), TunnelMessage.class.getName(), msg.getUniqueId());
|
||||
getContext().messageHistory().wrap(bodyType, message.getUniqueId(), TunnelGatewayMessage.class.getName(), msg.getUniqueId());
|
||||
}
|
||||
|
||||
public String getName() { return "Handle Database Lookup Message"; }
|
||||
|
@ -18,8 +18,7 @@ import net.i2p.data.i2np.DatabaseStoreMessage;
|
||||
import net.i2p.data.i2np.DeliveryStatusMessage;
|
||||
import net.i2p.router.JobImpl;
|
||||
import net.i2p.router.RouterContext;
|
||||
import net.i2p.router.TunnelSelectionCriteria;
|
||||
import net.i2p.router.message.SendTunnelMessageJob;
|
||||
import net.i2p.router.TunnelInfo;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
/**
|
||||
@ -32,7 +31,7 @@ public class HandleDatabaseStoreMessageJob extends JobImpl {
|
||||
private RouterIdentity _from;
|
||||
private Hash _fromHash;
|
||||
|
||||
private static final long ACK_TIMEOUT = 15*1000;
|
||||
private static final int ACK_TIMEOUT = 15*1000;
|
||||
private static final int ACK_PRIORITY = 100;
|
||||
|
||||
public HandleDatabaseStoreMessageJob(RouterContext ctx, DatabaseStoreMessage receivedMessage, RouterIdentity from, Hash fromHash) {
|
||||
@ -93,33 +92,19 @@ public class HandleDatabaseStoreMessageJob extends JobImpl {
|
||||
private void sendAck() {
|
||||
DeliveryStatusMessage msg = new DeliveryStatusMessage(getContext());
|
||||
msg.setMessageId(_message.getReplyToken());
|
||||
msg.setArrival(new Date(getContext().clock().now()));
|
||||
TunnelId outTunnelId = selectOutboundTunnel();
|
||||
if (outTunnelId == null) {
|
||||
msg.setArrival(getContext().clock().now());
|
||||
TunnelInfo outTunnel = selectOutboundTunnel();
|
||||
if (outTunnel == null) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("No outbound tunnel could be found");
|
||||
return;
|
||||
} else {
|
||||
getContext().jobQueue().addJob(new SendTunnelMessageJob(getContext(), msg, outTunnelId,
|
||||
_message.getReplyGateway(), _message.getReplyTunnel(),
|
||||
null, null, null, null, ACK_TIMEOUT, ACK_PRIORITY));
|
||||
getContext().tunnelDispatcher().dispatchOutbound(msg, outTunnel.getSendTunnelId(0), _message.getReplyTunnel(), _message.getReplyGateway());
|
||||
}
|
||||
}
|
||||
|
||||
private TunnelId selectOutboundTunnel() {
|
||||
TunnelSelectionCriteria criteria = new TunnelSelectionCriteria();
|
||||
criteria.setAnonymityPriority(80);
|
||||
criteria.setLatencyPriority(50);
|
||||
criteria.setReliabilityPriority(20);
|
||||
criteria.setMaximumTunnelsRequired(1);
|
||||
criteria.setMinimumTunnelsRequired(1);
|
||||
List tunnelIds = getContext().tunnelManager().selectOutboundTunnelIds(criteria);
|
||||
if (tunnelIds.size() <= 0) {
|
||||
_log.error("No outbound tunnels?!");
|
||||
return null;
|
||||
} else {
|
||||
return (TunnelId)tunnelIds.get(0);
|
||||
}
|
||||
private TunnelInfo selectOutboundTunnel() {
|
||||
return getContext().tunnelManager().selectOutboundTunnel();
|
||||
}
|
||||
|
||||
public String getName() { return "Handle Database Store Message"; }
|
||||
|
@ -16,6 +16,7 @@ import net.i2p.data.RouterInfo;
|
||||
import net.i2p.data.SigningPrivateKey;
|
||||
import net.i2p.router.JobImpl;
|
||||
import net.i2p.router.RouterContext;
|
||||
import net.i2p.router.Router;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
/**
|
||||
@ -38,6 +39,7 @@ public class PublishLocalRouterInfoJob extends JobImpl {
|
||||
_log.debug("Old routerInfo contains " + ri.getAddresses().size()
|
||||
+ " addresses and " + ri.getOptions().size() + " options");
|
||||
Properties stats = getContext().statPublisher().publishStatistics();
|
||||
stats.setProperty(RouterInfo.PROP_NETWORK_ID, ""+Router.NETWORK_ID);
|
||||
try {
|
||||
ri.setPublished(getContext().clock().now());
|
||||
ri.setOptions(stats);
|
||||
|
@ -72,7 +72,7 @@ class ExploreJob extends SearchJob {
|
||||
msg.setSearchKey(getState().getTarget());
|
||||
msg.setFrom(replyGateway.getIdentity().getHash());
|
||||
msg.setDontIncludePeers(getState().getAttempted());
|
||||
msg.setMessageExpiration(new Date(expiration));
|
||||
msg.setMessageExpiration(expiration);
|
||||
msg.setReplyTunnel(replyTunnelId);
|
||||
|
||||
Set attempted = getState().getAttempted();
|
||||
|
@ -110,7 +110,7 @@ class HarvesterJob extends JobImpl {
|
||||
long now = getContext().clock().now();
|
||||
DatabaseLookupMessage msg = new DatabaseLookupMessage(getContext(), true);
|
||||
msg.setFrom(getContext().routerHash());
|
||||
msg.setMessageExpiration(new Date(10*1000+now));
|
||||
msg.setMessageExpiration(10*1000+now);
|
||||
msg.setSearchKey(peer);
|
||||
msg.setReplyTunnel(null);
|
||||
SendMessageDirectJob job = new SendMessageDirectJob(getContext(), msg, peer, 10*1000, PRIORITY);
|
||||
|
@ -75,9 +75,9 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
private Set _publishingLeaseSets;
|
||||
|
||||
/**
|
||||
* Hash of the key currently being searched for, pointing at a List of
|
||||
* DeferredSearchJob elements for each additional request waiting for that
|
||||
* search to complete.
|
||||
* Hash of the key currently being searched for, pointing the SearchJob that
|
||||
* is currently operating. Subsequent requests for that same key are simply
|
||||
* added on to the list of jobs fired on success/failure
|
||||
*
|
||||
*/
|
||||
private Map _activeRequests;
|
||||
@ -87,72 +87,14 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
*
|
||||
*/
|
||||
void searchComplete(Hash key) {
|
||||
List deferred = null;
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("search Complete: " + key);
|
||||
SearchJob removed = null;
|
||||
synchronized (_activeRequests) {
|
||||
deferred = (List)_activeRequests.remove(key);
|
||||
}
|
||||
if (deferred != null) {
|
||||
for (int i = 0; i < deferred.size(); i++) {
|
||||
DeferredSearchJob j = (DeferredSearchJob)deferred.get(i);
|
||||
_context.jobQueue().addJob(j);
|
||||
}
|
||||
removed = (SearchJob)_activeRequests.remove(key);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* We want to search for a given key, but since there is already a job
|
||||
* out searching for it, we can just sit back and wait for them to finish.
|
||||
* Perhaps we should also queue up a 'wakeup' job, in case that already
|
||||
* active search won't expire/complete until after we time out? Though in
|
||||
* practice, pretty much all of the searches are the same duration...
|
||||
*
|
||||
* Anyway, this job is fired when that already active search completes -
|
||||
* successfully or not - and either fires off the success task (or the fail
|
||||
* task if we have expired), or it runs up its own search.
|
||||
*
|
||||
*/
|
||||
private class DeferredSearchJob extends JobImpl {
|
||||
private Hash _key;
|
||||
private Job _onFind;
|
||||
private Job _onFailed;
|
||||
private long _expiration;
|
||||
private boolean _isLease;
|
||||
|
||||
public DeferredSearchJob(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs, boolean isLease) {
|
||||
super(KademliaNetworkDatabaseFacade.this._context);
|
||||
_key = key;
|
||||
_onFind = onFindJob;
|
||||
_onFailed = onFailedLookupJob;
|
||||
_isLease = isLease;
|
||||
_expiration = getContext().clock().now() + timeoutMs;
|
||||
}
|
||||
public String getName() { return "Execute deferred search"; }
|
||||
public void runJob() {
|
||||
long remaining = getContext().clock().now() - _expiration;
|
||||
if (remaining <= 0) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Deferred search for " + _key.toBase64() + " expired prior to sending");
|
||||
if (_onFailed != null)
|
||||
getContext().jobQueue().addJob(_onFailed);
|
||||
} else {
|
||||
// ok, didn't time out - either we have the key or we can search
|
||||
// for it
|
||||
LeaseSet ls = lookupLeaseSetLocally(_key);
|
||||
if (ls == null) {
|
||||
RouterInfo ri = lookupRouterInfoLocally(_key);
|
||||
if (ri == null) {
|
||||
search(_key, _onFind, _onFailed, remaining, _isLease);
|
||||
} else {
|
||||
if (_onFind != null)
|
||||
getContext().jobQueue().addJob(_onFind);
|
||||
}
|
||||
} else {
|
||||
if (_onFind != null)
|
||||
getContext().jobQueue().addJob(_onFind);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
@ -165,6 +107,10 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
/** don't probe or broadcast data, just respond and search when explicitly needed */
|
||||
private boolean _quiet = false;
|
||||
|
||||
public static final String PROP_ENFORCE_NETID = "router.networkDatabase.enforceNetId";
|
||||
private static final boolean DEFAULT_ENFORCE_NETID = false;
|
||||
private boolean _enforceNetId = DEFAULT_ENFORCE_NETID;
|
||||
|
||||
public final static String PROP_DB_DIR = "router.networkDatabase.dbDir";
|
||||
public final static String DEFAULT_DB_DIR = "netDb";
|
||||
|
||||
@ -185,6 +131,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
_publishingLeaseSets = new HashSet(8);
|
||||
_lastExploreNew = 0;
|
||||
_activeRequests = new HashMap(8);
|
||||
_enforceNetId = DEFAULT_ENFORCE_NETID;
|
||||
}
|
||||
|
||||
KBucketSet getKBuckets() { return _kb; }
|
||||
@ -280,6 +227,11 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
_log.info("No DB dir specified [" + PROP_DB_DIR + "], using [" + DEFAULT_DB_DIR + "]");
|
||||
_dbDir = DEFAULT_DB_DIR;
|
||||
}
|
||||
String enforce = _context.getProperty(PROP_ENFORCE_NETID);
|
||||
if (enforce != null)
|
||||
_enforceNetId = Boolean.valueOf(enforce).booleanValue();
|
||||
else
|
||||
_enforceNetId = DEFAULT_ENFORCE_NETID;
|
||||
_ds.restart();
|
||||
synchronized (_explicitSendKeys) { _explicitSendKeys.clear(); }
|
||||
synchronized (_exploreKeys) { _exploreKeys.clear(); }
|
||||
@ -301,6 +253,11 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
_log.info("No DB dir specified [" + PROP_DB_DIR + "], using [" + DEFAULT_DB_DIR + "]");
|
||||
dbDir = DEFAULT_DB_DIR;
|
||||
}
|
||||
String enforce = _context.getProperty(PROP_ENFORCE_NETID);
|
||||
if (enforce != null)
|
||||
_enforceNetId = Boolean.valueOf(enforce).booleanValue();
|
||||
else
|
||||
_enforceNetId = DEFAULT_ENFORCE_NETID;
|
||||
|
||||
_kb = new KBucketSet(_context, ri.getIdentity().getHash());
|
||||
_ds = new PersistentDataStore(_context, dbDir, this);
|
||||
@ -406,11 +363,17 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
if (!_initialized) return;
|
||||
LeaseSet ls = lookupLeaseSetLocally(key);
|
||||
if (ls != null) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("leaseSet found locally, firing " + onFindJob);
|
||||
if (onFindJob != null)
|
||||
_context.jobQueue().addJob(onFindJob);
|
||||
} else {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("leaseSet not found locally, running search");
|
||||
search(key, onFindJob, onFailedLookupJob, timeoutMs, true);
|
||||
}
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("after lookupLeaseSet");
|
||||
}
|
||||
|
||||
public LeaseSet lookupLeaseSetLocally(Hash key) {
|
||||
@ -647,6 +610,10 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
_log.warn("Peer " + key.toBase64() + " published their routerInfo in the future?! ["
|
||||
+ new Date(routerInfo.getPublished()) + "]", new Exception("Rejecting store"));
|
||||
return "Peer " + key.toBase64() + " published " + DataHelper.formatDuration(age) + " in the future?!";
|
||||
} else if (_enforceNetId && (routerInfo.getNetworkId() != Router.NETWORK_ID) ){
|
||||
String rv = "Peer " + key.toBase64() + " is from another network, not accepting it (id="
|
||||
+ routerInfo.getNetworkId() + ", want " + Router.NETWORK_ID + ")";
|
||||
return rv;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
@ -764,28 +731,28 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
* without any match)
|
||||
*
|
||||
*/
|
||||
private void search(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs, boolean isLease) {
|
||||
void search(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs, boolean isLease) {
|
||||
if (!_initialized) return;
|
||||
int pendingRequests = 0;
|
||||
boolean allowSearch = false;
|
||||
boolean isNew = true;
|
||||
SearchJob searchJob = null;
|
||||
synchronized (_activeRequests) {
|
||||
List pending = (List)_activeRequests.get(key);
|
||||
if (pending == null) {
|
||||
_activeRequests.put(key, new ArrayList(0));
|
||||
allowSearch = true;
|
||||
searchJob = (SearchJob)_activeRequests.get(key);
|
||||
if (searchJob == null) {
|
||||
searchJob = new SearchJob(_context, this, key, onFindJob, onFailedLookupJob,
|
||||
timeoutMs, true, isLease);
|
||||
_activeRequests.put(key, searchJob);
|
||||
} else {
|
||||
pending.add(new DeferredSearchJob(key, onFindJob, onFailedLookupJob, timeoutMs, isLease));
|
||||
pendingRequests = pending.size();
|
||||
allowSearch = false;
|
||||
isNew = false;
|
||||
}
|
||||
}
|
||||
if (allowSearch) {
|
||||
_context.jobQueue().addJob(new SearchJob(_context, this, key, onFindJob, onFailedLookupJob,
|
||||
timeoutMs, true, isLease));
|
||||
if (isNew) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("this is the first search for that key, fire off the SearchJob");
|
||||
_context.jobQueue().addJob(searchJob);
|
||||
} else {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Deferring search for " + key.toBase64() + ": there are " + pendingRequests
|
||||
+ " other concurrent requests for it");
|
||||
_log.info("Deferring search for " + key.toBase64() + " with " + onFindJob);
|
||||
searchJob.addDeferred(onFindJob, onFailedLookupJob, timeoutMs, isLease);
|
||||
}
|
||||
}
|
||||
|
||||
@ -839,7 +806,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
buf.append("Earliest expiration date was: <i>").append(DataHelper.formatDuration(0-exp)).append(" ago</i><br />\n");
|
||||
for (int i = 0; i < ls.getLeaseCount(); i++) {
|
||||
buf.append("Lease ").append(i).append(": gateway <i>");
|
||||
buf.append(ls.getLease(i).getRouterIdentity().getHash().toBase64().substring(0,6));
|
||||
buf.append(ls.getLease(i).getGateway().toBase64().substring(0,6));
|
||||
buf.append("</i> tunnelId <i>").append(ls.getLease(i).getTunnelId().getTunnelId()).append("</i><br />\n");
|
||||
}
|
||||
buf.append("<hr />\n");
|
||||
|
@ -43,7 +43,7 @@ public class RepublishLeaseSetJob extends JobImpl {
|
||||
if (!ls.isCurrent(Router.CLOCK_FUDGE_FACTOR)) {
|
||||
_log.warn("Not publishing a LOCAL lease that isn't current - " + _dest, new Exception("Publish expired LOCAL lease?"));
|
||||
} else {
|
||||
getContext().jobQueue().addJob(new StoreJob(getContext(), _facade, _dest, ls, null, null, REPUBLISH_LEASESET_DELAY));
|
||||
getContext().jobQueue().addJob(new StoreJob(getContext(), _facade, _dest, ls, new OnSuccess(getContext()), new OnFailure(getContext()), REPUBLISH_LEASESET_DELAY));
|
||||
}
|
||||
} else {
|
||||
_log.warn("Client " + _dest + " is local, but we can't find a valid LeaseSet? perhaps its being rebuilt?");
|
||||
@ -60,4 +60,21 @@ public class RepublishLeaseSetJob extends JobImpl {
|
||||
throw re;
|
||||
}
|
||||
}
|
||||
|
||||
private class OnSuccess extends JobImpl {
|
||||
public OnSuccess(RouterContext ctx) { super(ctx); }
|
||||
public String getName() { return "Publish leaseSet successful"; }
|
||||
public void runJob() {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("successful publishing of the leaseSet for " + _dest.toBase64());
|
||||
}
|
||||
}
|
||||
private class OnFailure extends JobImpl {
|
||||
public OnFailure(RouterContext ctx) { super(ctx); }
|
||||
public String getName() { return "Publish leaseSet failed"; }
|
||||
public void runJob() {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("FAILED publishing of the leaseSet for " + _dest.toBase64());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -8,11 +8,13 @@ package net.i2p.router.networkdb.kademlia;
|
||||
*
|
||||
*/
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Date;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.data.DataStructure;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.RouterInfo;
|
||||
@ -25,7 +27,6 @@ import net.i2p.router.RouterContext;
|
||||
import net.i2p.router.TunnelInfo;
|
||||
import net.i2p.router.TunnelSelectionCriteria;
|
||||
import net.i2p.router.message.SendMessageDirectJob;
|
||||
import net.i2p.router.message.SendTunnelMessageJob;
|
||||
import net.i2p.router.peermanager.PeerProfile;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
@ -46,6 +47,9 @@ class SearchJob extends JobImpl {
|
||||
private boolean _isLease;
|
||||
private Job _pendingRequeueJob;
|
||||
private PeerSelector _peerSelector;
|
||||
private List _deferredSearches;
|
||||
private boolean _deferredCleared;
|
||||
private long _startedOn;
|
||||
|
||||
private static final int SEARCH_BREDTH = 3; // 3 peers at a time
|
||||
private static final int SEARCH_PRIORITY = 400; // large because the search is probably for a real search
|
||||
@ -80,7 +84,10 @@ class SearchJob extends JobImpl {
|
||||
_timeoutMs = timeoutMs;
|
||||
_keepStats = keepStats;
|
||||
_isLease = isLease;
|
||||
_deferredSearches = new ArrayList(0);
|
||||
_deferredCleared = false;
|
||||
_peerSelector = new PeerSelector(getContext());
|
||||
_startedOn = -1;
|
||||
_expiration = getContext().clock().now() + timeoutMs;
|
||||
getContext().statManager().createRateStat("netDb.successTime", "How long a successful search takes", "NetworkDatabase", new long[] { 60*60*1000l, 24*60*60*1000l });
|
||||
getContext().statManager().createRateStat("netDb.failedTime", "How long a failed search takes", "NetworkDatabase", new long[] { 60*60*1000l, 24*60*60*1000l });
|
||||
@ -96,12 +103,13 @@ class SearchJob extends JobImpl {
|
||||
}
|
||||
|
||||
public void runJob() {
|
||||
_startedOn = getContext().clock().now();
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info(getJobId() + ": Searching for " + _state.getTarget()); // , getAddedBy());
|
||||
getContext().statManager().addRateData("netDb.searchCount", 1, 0);
|
||||
searchNext();
|
||||
}
|
||||
|
||||
|
||||
protected SearchState getState() { return _state; }
|
||||
protected KademliaNetworkDatabaseFacade getFacade() { return _facade; }
|
||||
protected long getExpiration() { return _expiration; }
|
||||
@ -276,15 +284,15 @@ class SearchJob extends JobImpl {
|
||||
*
|
||||
*/
|
||||
protected void sendLeaseSearch(RouterInfo router) {
|
||||
TunnelId inTunnelId = getInboundTunnelId();
|
||||
if (inTunnelId == null) {
|
||||
TunnelInfo inTunnel = getInboundTunnelId();
|
||||
if (inTunnel == null) {
|
||||
_log.error("No tunnels to get search replies through! wtf!");
|
||||
getContext().jobQueue().addJob(new FailedJob(getContext(), router));
|
||||
return;
|
||||
}
|
||||
|
||||
TunnelInfo inTunnel = getContext().tunnelManager().getTunnelInfo(inTunnelId);
|
||||
RouterInfo inGateway = getContext().netDb().lookupRouterInfoLocally(inTunnel.getThisHop());
|
||||
TunnelId inTunnelId = inTunnel.getReceiveTunnelId(0);
|
||||
|
||||
RouterInfo inGateway = getContext().netDb().lookupRouterInfoLocally(inTunnel.getPeer(0));
|
||||
if (inGateway == null) {
|
||||
_log.error("We can't find the gateway to our inbound tunnel?! wtf");
|
||||
getContext().jobQueue().addJob(new FailedJob(getContext(), router));
|
||||
@ -295,12 +303,14 @@ class SearchJob extends JobImpl {
|
||||
|
||||
DatabaseLookupMessage msg = buildMessage(inTunnelId, inGateway, expiration);
|
||||
|
||||
TunnelId outTunnelId = getOutboundTunnelId();
|
||||
if (outTunnelId == null) {
|
||||
TunnelInfo outTunnel = getOutboundTunnelId();
|
||||
if (outTunnel == null) {
|
||||
_log.error("No tunnels to send search out through! wtf!");
|
||||
getContext().jobQueue().addJob(new FailedJob(getContext(), router));
|
||||
return;
|
||||
}
|
||||
}
|
||||
TunnelId outTunnelId = outTunnel.getSendTunnelId(0);
|
||||
|
||||
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug(getJobId() + ": Sending leaseSet search to " + router.getIdentity().getHash().toBase64()
|
||||
@ -310,10 +320,9 @@ class SearchJob extends JobImpl {
|
||||
|
||||
SearchMessageSelector sel = new SearchMessageSelector(getContext(), router, _expiration, _state);
|
||||
SearchUpdateReplyFoundJob reply = new SearchUpdateReplyFoundJob(getContext(), router, _state, _facade, this);
|
||||
SendTunnelMessageJob j = new SendTunnelMessageJob(getContext(), msg, outTunnelId, router.getIdentity().getHash(),
|
||||
null, null, reply, new FailedJob(getContext(), router), sel,
|
||||
PER_PEER_TIMEOUT, SEARCH_PRIORITY);
|
||||
getContext().jobQueue().addJob(j);
|
||||
|
||||
getContext().messageRegistry().registerPending(sel, reply, new FailedJob(getContext(), router), PER_PEER_TIMEOUT);
|
||||
getContext().tunnelDispatcher().dispatchOutbound(msg, outTunnelId, router.getIdentity().getHash());
|
||||
}
|
||||
|
||||
/** we're searching for a router, so we can just send direct */
|
||||
@ -338,16 +347,8 @@ class SearchJob extends JobImpl {
|
||||
*
|
||||
* @return tunnel id (or null if none are found)
|
||||
*/
|
||||
private TunnelId getOutboundTunnelId() {
|
||||
TunnelSelectionCriteria crit = new TunnelSelectionCriteria();
|
||||
crit.setMaximumTunnelsRequired(1);
|
||||
crit.setMinimumTunnelsRequired(1);
|
||||
List tunnelIds = getContext().tunnelManager().selectOutboundTunnelIds(crit);
|
||||
if (tunnelIds.size() <= 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return (TunnelId)tunnelIds.get(0);
|
||||
private TunnelInfo getOutboundTunnelId() {
|
||||
return getContext().tunnelManager().selectOutboundTunnel();
|
||||
}
|
||||
|
||||
/**
|
||||
@ -355,15 +356,8 @@ class SearchJob extends JobImpl {
|
||||
*
|
||||
* @return tunnel id (or null if none are found)
|
||||
*/
|
||||
private TunnelId getInboundTunnelId() {
|
||||
TunnelSelectionCriteria crit = new TunnelSelectionCriteria();
|
||||
crit.setMaximumTunnelsRequired(1);
|
||||
crit.setMinimumTunnelsRequired(1);
|
||||
List tunnelIds = getContext().tunnelManager().selectInboundTunnelIds(crit);
|
||||
if (tunnelIds.size() <= 0) {
|
||||
return null;
|
||||
}
|
||||
return (TunnelId)tunnelIds.get(0);
|
||||
private TunnelInfo getInboundTunnelId() {
|
||||
return getContext().tunnelManager().selectInboundTunnel();
|
||||
}
|
||||
|
||||
/**
|
||||
@ -378,7 +372,7 @@ class SearchJob extends JobImpl {
|
||||
msg.setSearchKey(_state.getTarget());
|
||||
msg.setFrom(replyGateway.getIdentity().getHash());
|
||||
msg.setDontIncludePeers(_state.getAttempted());
|
||||
msg.setMessageExpiration(new Date(expiration));
|
||||
msg.setMessageExpiration(expiration);
|
||||
msg.setReplyTunnel(replyTunnelId);
|
||||
return msg;
|
||||
}
|
||||
@ -393,7 +387,7 @@ class SearchJob extends JobImpl {
|
||||
msg.setSearchKey(_state.getTarget());
|
||||
msg.setFrom(getContext().routerHash());
|
||||
msg.setDontIncludePeers(_state.getAttempted());
|
||||
msg.setMessageExpiration(new Date(expiration));
|
||||
msg.setMessageExpiration(expiration);
|
||||
msg.setReplyTunnel(null);
|
||||
return msg;
|
||||
}
|
||||
@ -583,6 +577,8 @@ class SearchJob extends JobImpl {
|
||||
|
||||
_facade.searchComplete(_state.getTarget());
|
||||
|
||||
handleDeferred(true);
|
||||
|
||||
resend();
|
||||
}
|
||||
|
||||
@ -605,6 +601,13 @@ class SearchJob extends JobImpl {
|
||||
* Search totally failed
|
||||
*/
|
||||
protected void fail() {
|
||||
if (isLocal()) {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error(getJobId() + ": why did we fail if the target is local?: " + _state.getTarget().toBase64(), new Exception("failure cause"));
|
||||
succeed();
|
||||
return;
|
||||
}
|
||||
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info(getJobId() + ": Failed search for key " + _state.getTarget());
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
@ -613,13 +616,81 @@ class SearchJob extends JobImpl {
|
||||
if (_keepStats) {
|
||||
long time = getContext().clock().now() - _state.getWhenStarted();
|
||||
getContext().statManager().addRateData("netDb.failedTime", time, 0);
|
||||
_facade.fail(_state.getTarget());
|
||||
//_facade.fail(_state.getTarget());
|
||||
}
|
||||
if (_onFailure != null)
|
||||
getContext().jobQueue().addJob(_onFailure);
|
||||
|
||||
_facade.searchComplete(_state.getTarget());
|
||||
handleDeferred(false);
|
||||
}
|
||||
|
||||
public void addDeferred(Job onFind, Job onFail, long expiration, boolean isLease) {
|
||||
Search search = new Search(onFind, onFail, expiration, isLease);
|
||||
boolean ok = true;
|
||||
synchronized (_deferredSearches) {
|
||||
if (_deferredCleared)
|
||||
ok = false;
|
||||
else
|
||||
_deferredSearches.add(search);
|
||||
}
|
||||
|
||||
if (!ok) {
|
||||
// race between adding deferred and search completing
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Race deferred before searchCompleting? our onFind=" + _onSuccess + " new one: " + onFind);
|
||||
|
||||
// the following /shouldn't/ be necessary, but it doesnt hurt
|
||||
_facade.searchComplete(_state.getTarget());
|
||||
_facade.search(_state.getTarget(), onFind, onFail, expiration - getContext().clock().now(), isLease);
|
||||
}
|
||||
}
|
||||
|
||||
private void handleDeferred(boolean success) {
|
||||
List deferred = null;
|
||||
synchronized (_deferredSearches) {
|
||||
if (_deferredSearches.size() > 0) {
|
||||
deferred = new ArrayList(_deferredSearches);
|
||||
_deferredSearches.clear();
|
||||
}
|
||||
_deferredCleared = true;
|
||||
}
|
||||
if (deferred != null) {
|
||||
long now = getContext().clock().now();
|
||||
for (int i = 0; i < deferred.size(); i++) {
|
||||
Search cur = (Search)deferred.get(i);
|
||||
if (cur.getExpiration() < now)
|
||||
getContext().jobQueue().addJob(cur.getOnFail());
|
||||
else if (success)
|
||||
getContext().jobQueue().addJob(cur.getOnFind());
|
||||
else // failed search, not yet expired, but it took too long to reasonably continue
|
||||
getContext().jobQueue().addJob(cur.getOnFail());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private class Search {
|
||||
private Job _onFind;
|
||||
private Job _onFail;
|
||||
private long _expiration;
|
||||
private boolean _isLease;
|
||||
|
||||
public Search(Job onFind, Job onFail, long expiration, boolean isLease) {
|
||||
_onFind = onFind;
|
||||
_onFail = onFail;
|
||||
_expiration = expiration;
|
||||
_isLease = isLease;
|
||||
}
|
||||
public Job getOnFind() { return _onFind; }
|
||||
public Job getOnFail() { return _onFail; }
|
||||
public long getExpiration() { return _expiration; }
|
||||
public boolean getIsLease() { return _isLease; }
|
||||
}
|
||||
|
||||
public String getName() { return "Kademlia NetDb Search"; }
|
||||
|
||||
public String toString() {
|
||||
return super.toString() + " started "
|
||||
+ DataHelper.formatDuration((getContext().clock().now() - _startedOn)) + " ago";
|
||||
}
|
||||
}
|
||||
|
@ -42,14 +42,22 @@ class SearchMessageSelector implements MessageSelector {
|
||||
}
|
||||
|
||||
public boolean continueMatching() {
|
||||
boolean expired = _context.clock().now() > _exp;
|
||||
if (expired) return false;
|
||||
|
||||
// so we dont drop outstanding replies after receiving the value
|
||||
// > 1 to account for the 'current' match
|
||||
if (_state.getPending().size() > 1)
|
||||
return true;
|
||||
|
||||
if (_found) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("[" + _id + "] Dont continue matching! looking for a reply from "
|
||||
+ _peer + " with regards to " + _state.getTarget());
|
||||
return false;
|
||||
} else {
|
||||
return true;
|
||||
}
|
||||
long now = _context.clock().now();
|
||||
return now < _exp;
|
||||
}
|
||||
public long getExpiration() { return _exp; }
|
||||
public boolean isMatch(I2NPMessage message) {
|
||||
|
@ -25,8 +25,6 @@ import net.i2p.router.JobImpl;
|
||||
import net.i2p.router.ReplyJob;
|
||||
import net.i2p.router.RouterContext;
|
||||
import net.i2p.router.TunnelInfo;
|
||||
import net.i2p.router.TunnelSelectionCriteria;
|
||||
import net.i2p.router.message.SendTunnelMessageJob;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
class StoreJob extends JobImpl {
|
||||
@ -54,7 +52,7 @@ class StoreJob extends JobImpl {
|
||||
private final static int STORE_PRIORITY = 100;
|
||||
|
||||
/** how long we allow for an ACK to take after a store */
|
||||
private final static long STORE_TIMEOUT_MS = 10*1000;
|
||||
private final static int STORE_TIMEOUT_MS = 10*1000;
|
||||
|
||||
/**
|
||||
* Create a new search for the routingKey specified
|
||||
@ -189,7 +187,7 @@ class StoreJob extends JobImpl {
|
||||
msg.setLeaseSet((LeaseSet)_state.getData());
|
||||
else
|
||||
throw new IllegalArgumentException("Storing an unknown data type! " + _state.getData());
|
||||
msg.setMessageExpiration(new Date(getContext().clock().now() + _timeoutMs));
|
||||
msg.setMessageExpiration(getContext().clock().now() + _timeoutMs);
|
||||
|
||||
if (router.getIdentity().equals(getContext().router().getRouterInfo().getIdentity())) {
|
||||
// don't send it to ourselves
|
||||
@ -212,19 +210,19 @@ class StoreJob extends JobImpl {
|
||||
private void sendStoreThroughGarlic(DatabaseStoreMessage msg, RouterInfo peer, long expiration) {
|
||||
long token = getContext().random().nextLong(I2NPMessage.MAX_ID_VALUE);
|
||||
|
||||
TunnelId replyTunnelId = selectInboundTunnel();
|
||||
if (replyTunnelId == null) {
|
||||
TunnelInfo replyTunnel = selectInboundTunnel();
|
||||
if (replyTunnel == null) {
|
||||
_log.error("No reply inbound tunnels available!");
|
||||
return;
|
||||
}
|
||||
TunnelInfo replyTunnel = getContext().tunnelManager().getTunnelInfo(replyTunnelId);
|
||||
TunnelId replyTunnelId = replyTunnel.getReceiveTunnelId(0);
|
||||
if (replyTunnel == null) {
|
||||
_log.error("No reply inbound tunnels available!");
|
||||
return;
|
||||
}
|
||||
msg.setReplyToken(token);
|
||||
msg.setReplyTunnel(replyTunnelId);
|
||||
msg.setReplyGateway(replyTunnel.getThisHop());
|
||||
msg.setReplyGateway(replyTunnel.getPeer(0));
|
||||
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug(getJobId() + ": send(dbStore) w/ token expected " + token);
|
||||
@ -235,19 +233,18 @@ class StoreJob extends JobImpl {
|
||||
FailedJob onFail = new FailedJob(getContext(), peer);
|
||||
StoreMessageSelector selector = new StoreMessageSelector(getContext(), getJobId(), peer, token, expiration);
|
||||
|
||||
TunnelId outTunnelId = selectOutboundTunnel();
|
||||
if (outTunnelId != null) {
|
||||
TunnelInfo outTunnel = selectOutboundTunnel();
|
||||
if (outTunnel != null) {
|
||||
//if (_log.shouldLog(Log.DEBUG))
|
||||
// _log.debug(getJobId() + ": Sending tunnel message out " + outTunnelId + " to "
|
||||
// + peer.getIdentity().getHash().toBase64());
|
||||
TunnelId targetTunnelId = null; // not needed
|
||||
Job onSend = null; // not wanted
|
||||
SendTunnelMessageJob j = new SendTunnelMessageJob(getContext(), msg, outTunnelId,
|
||||
peer.getIdentity().getHash(),
|
||||
targetTunnelId, onSend, onReply,
|
||||
onFail, selector, STORE_TIMEOUT_MS,
|
||||
STORE_PRIORITY);
|
||||
getContext().jobQueue().addJob(j);
|
||||
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("sending store to " + peer.getIdentity().getHash() + " through " + outTunnel + ": " + msg);
|
||||
getContext().messageRegistry().registerPending(selector, onReply, onFail, STORE_TIMEOUT_MS);
|
||||
getContext().tunnelDispatcher().dispatchOutbound(msg, outTunnel.getSendTunnelId(0), null, peer.getIdentity().getHash());
|
||||
} else {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("No outbound tunnels to send a dbStore out!");
|
||||
@ -255,36 +252,12 @@ class StoreJob extends JobImpl {
|
||||
}
|
||||
}
|
||||
|
||||
private TunnelId selectOutboundTunnel() {
|
||||
TunnelSelectionCriteria criteria = new TunnelSelectionCriteria();
|
||||
criteria.setAnonymityPriority(80);
|
||||
criteria.setLatencyPriority(50);
|
||||
criteria.setReliabilityPriority(20);
|
||||
criteria.setMaximumTunnelsRequired(1);
|
||||
criteria.setMinimumTunnelsRequired(1);
|
||||
List tunnelIds = getContext().tunnelManager().selectOutboundTunnelIds(criteria);
|
||||
if (tunnelIds.size() <= 0) {
|
||||
_log.error("No outbound tunnels?!");
|
||||
return null;
|
||||
} else {
|
||||
return (TunnelId)tunnelIds.get(0);
|
||||
}
|
||||
private TunnelInfo selectOutboundTunnel() {
|
||||
return getContext().tunnelManager().selectOutboundTunnel();
|
||||
}
|
||||
|
||||
private TunnelId selectInboundTunnel() {
|
||||
TunnelSelectionCriteria criteria = new TunnelSelectionCriteria();
|
||||
criteria.setAnonymityPriority(80);
|
||||
criteria.setLatencyPriority(50);
|
||||
criteria.setReliabilityPriority(20);
|
||||
criteria.setMaximumTunnelsRequired(1);
|
||||
criteria.setMinimumTunnelsRequired(1);
|
||||
List tunnelIds = getContext().tunnelManager().selectInboundTunnelIds(criteria);
|
||||
if (tunnelIds.size() <= 0) {
|
||||
_log.error("No inbound tunnels?!");
|
||||
return null;
|
||||
} else {
|
||||
return (TunnelId)tunnelIds.get(0);
|
||||
}
|
||||
private TunnelInfo selectInboundTunnel() {
|
||||
return getContext().tunnelManager().selectInboundTunnel();
|
||||
}
|
||||
|
||||
/**
|
||||
@ -335,7 +308,8 @@ class StoreJob extends JobImpl {
|
||||
}
|
||||
public void runJob() {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn(StoreJob.this.getJobId() + ": Peer " + _peer.getIdentity().getHash().toBase64() + " timed out");
|
||||
_log.warn(StoreJob.this.getJobId() + ": Peer " + _peer.getIdentity().getHash().toBase64()
|
||||
+ " timed out sending " + _state.getTarget());
|
||||
_state.replyTimeout(_peer.getIdentity().getHash());
|
||||
getContext().profileManager().dbStoreFailed(_peer.getIdentity().getHash());
|
||||
|
||||
@ -362,8 +336,8 @@ class StoreJob extends JobImpl {
|
||||
* Send totally failed
|
||||
*/
|
||||
private void fail() {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info(getJobId() + ": Failed sending key " + _state.getTarget());
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn(getJobId() + ": Failed sending key " + _state.getTarget());
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug(getJobId() + ": State of failed send: " + _state, new Exception("Who failed me?"));
|
||||
if (_onFailure != null)
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user