propagate from branch 'i2p.i2p.zzz.test3' (head 459a56e53d8d694591071574e87474b5e95d6494)
to branch 'i2p.i2p' (head ffa1aab7aa8e75d75c183fd6f76140f7d840a6ae)
This commit is contained in:
@ -24,7 +24,7 @@ mkdir -p $PKG
|
||||
# es: usr/local
|
||||
NAME=i2p-base
|
||||
VERSION=0.0.1
|
||||
BUILD=1sim
|
||||
BUILD=1sponge
|
||||
ARCH=noarch
|
||||
INSTALL_DIR=opt
|
||||
cd $PKG
|
||||
@ -38,5 +38,9 @@ sed "s|directory|/$INSTALL_DIR/i2p/|g" $CWD/doinst.sh > $PKG/install/doinst.sh
|
||||
cat $CWD/slack-desc > $PKG/install/slack-desc
|
||||
|
||||
cd $PKG
|
||||
requiredbuilder -v -y -s $CWD $PKG
|
||||
#
|
||||
# Not really that important to exec this.
|
||||
#requiredbuilder -v -y -s $CWD $PKG
|
||||
#
|
||||
cat $CWD/slack-required > $PKG/install/slack-required
|
||||
makepkg -l y -c n $CWD/${NAME}-$VERSION-$ARCH-$BUILD.tgz
|
||||
|
@ -15,7 +15,7 @@
|
||||
# It's suggested to subscribe to various dns host, like i2host.i2p
|
||||
# For any additional information, visit i2host.i2p and forum.i2p
|
||||
|
||||
BUILD=1sim
|
||||
BUILD=1sponge
|
||||
|
||||
# put here installation dir, without first and last /
|
||||
# eg: usr/local
|
||||
@ -113,5 +113,11 @@ sed "s|directory|/$INSTALL_DIR/i2p/|g" $CWD/doinst.sh > $PKG/install/doinst.sh
|
||||
cat $CWD/slack-desc > $PKG/install/slack-desc
|
||||
|
||||
cd $PKG
|
||||
requiredbuilder -v -y -s $CWD $PKG
|
||||
#
|
||||
# requiredbuilder fucks up REALLY bad, and thinks java is perl?!
|
||||
# It also did not catch the shell requirements! BOOOOOOOOOOO! HISSSSSSSS!
|
||||
#
|
||||
#requiredbuilder -v -y -s $CWD $PKG
|
||||
#
|
||||
cat $CWD/slack-required > $PKG/install/slack-required
|
||||
makepkg -l y -c n $CWD/${NAME}-$VERSION-$ARCH-$BUILD.tgz
|
||||
|
@ -1,2 +1,4 @@
|
||||
glibc >= 2.7-i486-17 | glibc-solibs >= 2.7-i486-17
|
||||
perl >= 5.10.0-i486-1
|
||||
jre >= 5
|
||||
i2p-base >= 0.0.1
|
||||
bash >= 3.1.017
|
||||
|
||||
|
@ -85,7 +85,7 @@ public class I2PSnarkServlet extends HttpServlet {
|
||||
out.write("<meta http-equiv=\"refresh\" content=\"60;" + req.getRequestURI() + peerString + "\">\n");
|
||||
out.write(HEADER);
|
||||
out.write("</head><body>");
|
||||
out.write("<center><div class=\"page\">");
|
||||
out.write("<center>");
|
||||
out.write("<div class=\"snarknavbar\"><a href=\"" + req.getRequestURI() + peerString + "\" title=\"Refresh page\" class=\"snarkRefresh\">I2PSnark</a> <a href=\"http://forum.i2p/viewforum.php?f=21\" class=\"snarkRefresh\" target=\"_blank\">Forum</a>\n");
|
||||
Map trackers = _manager.getTrackers();
|
||||
for (Iterator iter = trackers.entrySet().iterator(); iter.hasNext(); ) {
|
||||
@ -99,7 +99,7 @@ public class I2PSnarkServlet extends HttpServlet {
|
||||
out.write(" <a href=\"" + baseURL + "\" class=\"snarkRefresh\" target=\"_blank\">" + name + "</a>");
|
||||
}
|
||||
out.write("</div>\n");
|
||||
out.write("<div class=\"mainsection\"><div class=\"snarkMessages\"><table><tr><td align=\"left\"><pre>");
|
||||
out.write("<div class=\"page\"><div class=\"mainsection\"><div class=\"snarkMessages\"><table><tr><td align=\"left\"><pre>");
|
||||
List msgs = _manager.getMessages();
|
||||
for (int i = msgs.size()-1; i >= 0; i--) {
|
||||
String msg = (String)msgs.get(i);
|
||||
@ -498,7 +498,7 @@ public class I2PSnarkServlet extends HttpServlet {
|
||||
|
||||
if (remaining == 0)
|
||||
out.write("<a href=\"" + _manager.linkPrefix() + snark.meta.getName()
|
||||
+ "\" title=\"Click to access completed downloaded..\">");
|
||||
+ "\" title=\"View file\">");
|
||||
out.write(filename);
|
||||
if (remaining == 0)
|
||||
out.write("</a>");
|
||||
@ -573,7 +573,7 @@ public class I2PSnarkServlet extends HttpServlet {
|
||||
out.write("<tr class=\"" + rowClass + "\">");
|
||||
out.write("<td align=\"center\" class=\"snarkTorrentStatus " + rowClass + "\">");
|
||||
out.write("</td>\n\t");
|
||||
out.write("<td align=\"center\" class=\"snarkTorrentStatus " + rowClass + "\">");
|
||||
out.write("<td align=\"left\" class=\"snarkTorrentStatus " + rowClass + "\">");
|
||||
String ch = peer.toString().substring(0, 4);
|
||||
String client;
|
||||
if ("AwMD".equals(ch))
|
||||
@ -592,7 +592,7 @@ public class I2PSnarkServlet extends HttpServlet {
|
||||
client = "Robert";
|
||||
else
|
||||
client = "Unknown (" + ch + ')';
|
||||
out.write("<font size=-1>" + client + "</font> " + peer.toString().substring(5, 9) + "");
|
||||
out.write("" + client + " " + peer.toString().substring(5, 9) + "");
|
||||
if (showDebug)
|
||||
out.write(" inactive " + (peer.getInactiveTime() / 1000) + "s");
|
||||
out.write("</td>\n\t");
|
||||
@ -601,12 +601,12 @@ public class I2PSnarkServlet extends HttpServlet {
|
||||
out.write("<td align=\"right\" class=\"snarkTorrentStatus " + rowClass + "\">");
|
||||
float pct = (float) (100.0 * (float) peer.completed() / snark.meta.getPieces());
|
||||
if (pct == 100.0)
|
||||
out.write("<font size=-1>Seed</font>");
|
||||
out.write("Seed");
|
||||
else {
|
||||
String ps = String.valueOf(pct);
|
||||
if (ps.length() > 5)
|
||||
ps = ps.substring(0, 5);
|
||||
out.write("<font size=-1>" + ps + "%</font>");
|
||||
out.write("" + ps + "%");
|
||||
}
|
||||
out.write("</td>\n\t");
|
||||
out.write("<td class=\"snarkTorrentStatus " + rowClass + "\">");
|
||||
@ -615,14 +615,14 @@ public class I2PSnarkServlet extends HttpServlet {
|
||||
if (remaining > 0) {
|
||||
if (peer.isInteresting() && !peer.isChoked()) {
|
||||
out.write("<font color=#008000>");
|
||||
out.write("<font size=-1>" + formatSize(peer.getDownloadRate()) + "ps</font></font>");
|
||||
out.write("" + formatSize(peer.getDownloadRate()) + "ps</font>");
|
||||
} else {
|
||||
out.write("<font color=#a00000><font size=-1><a title=\"");
|
||||
out.write("<font color=#a00000><a title=\"");
|
||||
if (!peer.isInteresting())
|
||||
out.write("Uninteresting\">");
|
||||
else
|
||||
out.write("Choked\">");
|
||||
out.write(formatSize(peer.getDownloadRate()) + "ps</a></font></font>");
|
||||
out.write(formatSize(peer.getDownloadRate()) + "ps</a></font>");
|
||||
}
|
||||
}
|
||||
out.write("</td>\n\t");
|
||||
@ -630,14 +630,14 @@ public class I2PSnarkServlet extends HttpServlet {
|
||||
if (pct != 100.0) {
|
||||
if (peer.isInterested() && !peer.isChoking()) {
|
||||
out.write("<font color=#008000>");
|
||||
out.write("<font size=-1>" + formatSize(peer.getUploadRate()) + "ps</font></font>");
|
||||
out.write("" + formatSize(peer.getUploadRate()) + "ps</font>");
|
||||
} else {
|
||||
out.write("<font color=#a00000><font size=-1><a title=\"");
|
||||
out.write("<font color=#a00000><a title=\"");
|
||||
if (!peer.isInterested())
|
||||
out.write("Uninterested\">");
|
||||
else
|
||||
out.write("Choking\">");
|
||||
out.write(formatSize(peer.getUploadRate()) + "ps</a></font></font>");
|
||||
out.write(formatSize(peer.getUploadRate()) + "ps</a></font>");
|
||||
}
|
||||
}
|
||||
out.write("</td>\n\t");
|
||||
|
@ -13,7 +13,7 @@
|
||||
%>
|
||||
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
|
||||
<head>
|
||||
<title>I2PTunnel Webmanager - Edit</title>
|
||||
<title>I2P Tunnel Manager - Edit</title>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
|
||||
<meta http-equiv="Content-Type" content="application/xhtml+xml; charset=UTF-8" />
|
||||
|
||||
|
@ -13,7 +13,7 @@
|
||||
%>
|
||||
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
|
||||
<head>
|
||||
<title>I2PTunnel Webmanager - Edit</title>
|
||||
<title>I2P Tunnel Manager - Edit</title>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
|
||||
<meta http-equiv="Content-Type" content="application/xhtml+xml; charset=UTF-8" />
|
||||
|
||||
|
@ -16,9 +16,9 @@ net.i2p.client.streaming.I2PServerSocket#accept} method, which will provide an
|
||||
application wants to create a new stream to a peer, it should do so with the
|
||||
appropriate {@link net.i2p.client.streaming.I2PSocketManager#connect} call.</p>
|
||||
|
||||
<p>There is a simple pair of demo applications available as well - {@link
|
||||
net.i2p.client.streaming.StreamSinkServer} listens to a destination and dumps
|
||||
the data from all sockets it accepts to individual files, while {@link
|
||||
net.i2p.client.streaming.StreamSinkClient} connects to a particular destination
|
||||
<p>There is a simple pair of demo applications available as well -
|
||||
net.i2p.client.streaming.StreamSinkServer listens to a destination and dumps
|
||||
the data from all sockets it accepts to individual files, while
|
||||
net.i2p.client.streaming.StreamSinkClient connects to a particular destination
|
||||
and sends a specific amount of random data then disconnects.</p>
|
||||
</body></html>
|
||||
|
31
apps/routerconsole/jsp/debug.jsp
Normal file
31
apps/routerconsole/jsp/debug.jsp
Normal file
@ -0,0 +1,31 @@
|
||||
<%@page contentType="text/html"%>
|
||||
<%@page pageEncoding="UTF-8"%>
|
||||
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
|
||||
<html><head><title>I2P Router Console - Debug</title>
|
||||
<%@include file="css.jsp" %>
|
||||
</head><body>
|
||||
<%@include file="summary.jsp" %>
|
||||
<h1>Router SKM</h1>
|
||||
<div class="main" id="main">
|
||||
<%
|
||||
/*
|
||||
* Quick and easy place to put debugging stuff
|
||||
*/
|
||||
net.i2p.router.RouterContext ctx = (net.i2p.router.RouterContext) net.i2p.I2PAppContext.getGlobalContext();
|
||||
|
||||
/*
|
||||
* Print out the status for all the SessionKeyManagers
|
||||
*/
|
||||
|
||||
ctx.sessionKeyManager().renderStatusHTML(out);
|
||||
java.util.Set<net.i2p.data.Destination> clients = ctx.clientManager().listClients();
|
||||
for (net.i2p.data.Destination dest : clients) {
|
||||
net.i2p.data.Hash h = dest.calculateHash();
|
||||
net.i2p.crypto.SessionKeyManager skm = ctx.clientManager().getClientSessionKeyManager(h);
|
||||
if (skm != null) {
|
||||
out.print("<h1>" + h.toBase64().substring(0,6) + " SKM</h1>");
|
||||
skm.renderStatusHTML(out);
|
||||
}
|
||||
}
|
||||
%>
|
||||
</div></body></html>
|
@ -12,7 +12,7 @@
|
||||
}
|
||||
// If it can't find the iframe or viewtheme.jsp I wonder if the whole thing blows up...
|
||||
%>
|
||||
<html><head><title>I2P Router Console</title>
|
||||
<html><head><title>I2P Router Console - Page Not Found</title>
|
||||
<%@include file="css.jsp" %>
|
||||
</head><body>
|
||||
<%
|
||||
@ -22,6 +22,7 @@ if (System.getProperty("router.consoleNonce") == null) {
|
||||
%>
|
||||
<%@include file="summary.jsp" %>
|
||||
<h1><%=ERROR_CODE%> <%=ERROR_MESSAGE%></h1>
|
||||
<div class="warning" id="warning">
|
||||
The Router Console page <%=ERROR_URI%> was not found.
|
||||
<div class="sorry" id="warning">
|
||||
Sorry! You appear to be requesting a non-existent Router Console page or resource.<hr>
|
||||
Error 404: <%=ERROR_URI%> not found.
|
||||
</div></body></html>
|
||||
|
@ -176,4 +176,4 @@ client applications can be found on our <a href="http://www.i2p2.i2p/download">d
|
||||
|
||||
<p>A more complete list of changes can be found
|
||||
in the history.txt file in your i2p directory.
|
||||
</p><br></div></body></html>
|
||||
</p><hr></div></body></html>
|
||||
|
@ -124,10 +124,12 @@ public abstract class SAMHandler implements Runnable {
|
||||
* @return True if the string was successfully written, false otherwise
|
||||
*/
|
||||
protected final boolean writeString(String str) {
|
||||
synchronized (socketWLock) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Sending the client: [" + str + "]");
|
||||
return writeString(str, socket);
|
||||
}
|
||||
}
|
||||
|
||||
public static boolean writeString(String str, SocketChannel out)
|
||||
{
|
||||
|
@ -354,6 +354,7 @@ public class Connection {
|
||||
*/
|
||||
}
|
||||
|
||||
/*********
|
||||
private class PingNotifier implements ConnectionManager.PingNotifier {
|
||||
private long _startedPingOn;
|
||||
public PingNotifier() {
|
||||
@ -367,6 +368,7 @@ public class Connection {
|
||||
_options.updateRTT((int)time*2);
|
||||
}
|
||||
}
|
||||
*********/
|
||||
|
||||
List ackPackets(long ackThrough, long nacks[]) {
|
||||
if (ackThrough < _highestAckedThrough) {
|
||||
@ -548,20 +550,21 @@ public class Connection {
|
||||
killOutstandingPackets();
|
||||
}
|
||||
|
||||
/** ignore tag issues */
|
||||
private void killOutstandingPackets() {
|
||||
boolean tagsCancelled = false;
|
||||
//boolean tagsCancelled = false;
|
||||
synchronized (_outboundPackets) {
|
||||
for (Iterator iter = _outboundPackets.values().iterator(); iter.hasNext(); ) {
|
||||
PacketLocal pl = (PacketLocal)iter.next();
|
||||
if ( (pl.getTagsSent() != null) && (pl.getTagsSent().size() > 0) )
|
||||
tagsCancelled = true;
|
||||
//if ( (pl.getTagsSent() != null) && (pl.getTagsSent().size() > 0) )
|
||||
// tagsCancelled = true;
|
||||
pl.cancelled();
|
||||
}
|
||||
_outboundPackets.clear();
|
||||
_outboundPackets.notifyAll();
|
||||
}
|
||||
if (tagsCancelled)
|
||||
_context.sessionKeyManager().failTags(_remotePeer.getPublicKey());
|
||||
//if (tagsCancelled)
|
||||
// _context.sessionKeyManager().failTags(_remotePeer.getPublicKey());
|
||||
}
|
||||
|
||||
private class DisconnectEvent implements SimpleTimer.TimedEvent {
|
||||
@ -1140,12 +1143,12 @@ public class Connection {
|
||||
|
||||
// in case things really suck, the other side may have lost thier
|
||||
// session tags (e.g. they restarted), so jump back to ElGamal.
|
||||
int failTagsAt = _options.getMaxResends() - 2;
|
||||
if ( (newWindowSize == 1) && (numSends == failTagsAt) ) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Optimistically failing tags at resend " + numSends);
|
||||
_context.sessionKeyManager().failTags(_remotePeer.getPublicKey());
|
||||
}
|
||||
//int failTagsAt = _options.getMaxResends() - 2;
|
||||
//if ( (newWindowSize == 1) && (numSends == failTagsAt) ) {
|
||||
// if (_log.shouldLog(Log.WARN))
|
||||
// _log.warn("Optimistically failing tags at resend " + numSends);
|
||||
// _context.sessionKeyManager().failTags(_remotePeer.getPublicKey());
|
||||
//}
|
||||
|
||||
if (numSends - 1 > _options.getMaxResends()) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
|
@ -349,24 +349,35 @@ public class ConnectionManager {
|
||||
return new HashSet(_connectionByInboundId.values());
|
||||
}
|
||||
}
|
||||
|
||||
/** blocking */
|
||||
public boolean ping(Destination peer, long timeoutMs) {
|
||||
return ping(peer, timeoutMs, true);
|
||||
return ping(peer, timeoutMs, true, null);
|
||||
}
|
||||
public boolean ping(Destination peer, long timeoutMs, boolean blocking) {
|
||||
return ping(peer, timeoutMs, blocking, null, null, null);
|
||||
return ping(peer, timeoutMs, blocking, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated I2PSession ignores tags, use non-tag variant
|
||||
* @param keyToUse ignored
|
||||
* @param tagsToSend ignored
|
||||
*/
|
||||
public boolean ping(Destination peer, long timeoutMs, boolean blocking, SessionKey keyToUse, Set tagsToSend, PingNotifier notifier) {
|
||||
return ping(peer, timeoutMs, blocking, notifier);
|
||||
}
|
||||
|
||||
public boolean ping(Destination peer, long timeoutMs, boolean blocking, PingNotifier notifier) {
|
||||
Long id = new Long(_context.random().nextLong(Packet.MAX_STREAM_ID-1)+1);
|
||||
PacketLocal packet = new PacketLocal(_context, peer);
|
||||
packet.setSendStreamId(id.longValue());
|
||||
packet.setFlag(Packet.FLAG_ECHO);
|
||||
packet.setFlag(Packet.FLAG_SIGNATURE_INCLUDED);
|
||||
packet.setOptionalFrom(_session.getMyDestination());
|
||||
if ( (keyToUse != null) && (tagsToSend != null) ) {
|
||||
packet.setKeyUsed(keyToUse);
|
||||
packet.setTagsSent(tagsToSend);
|
||||
}
|
||||
//if ( (keyToUse != null) && (tagsToSend != null) ) {
|
||||
// packet.setKeyUsed(keyToUse);
|
||||
// packet.setTagsSent(tagsToSend);
|
||||
//}
|
||||
|
||||
PingRequest req = new PingRequest(peer, packet, notifier);
|
||||
|
||||
@ -435,7 +446,7 @@ public class ConnectionManager {
|
||||
}
|
||||
public void pong() {
|
||||
_log.debug("Ping successful");
|
||||
_context.sessionKeyManager().tagsDelivered(_peer.getPublicKey(), _packet.getKeyUsed(), _packet.getTagsSent());
|
||||
//_context.sessionKeyManager().tagsDelivered(_peer.getPublicKey(), _packet.getKeyUsed(), _packet.getTagsSent());
|
||||
synchronized (ConnectionManager.PingRequest.this) {
|
||||
_ponged = true;
|
||||
ConnectionManager.PingRequest.this.notifyAll();
|
||||
|
@ -263,12 +263,12 @@ public class ConnectionPacketHandler {
|
||||
numResends++;
|
||||
|
||||
// ACK the tags we delivered so we can use them
|
||||
if ( (p.getKeyUsed() != null) && (p.getTagsSent() != null)
|
||||
&& (p.getTagsSent().size() > 0) ) {
|
||||
_context.sessionKeyManager().tagsDelivered(p.getTo().getPublicKey(),
|
||||
p.getKeyUsed(),
|
||||
p.getTagsSent());
|
||||
}
|
||||
//if ( (p.getKeyUsed() != null) && (p.getTagsSent() != null)
|
||||
// && (p.getTagsSent().size() > 0) ) {
|
||||
// _context.sessionKeyManager().tagsDelivered(p.getTo().getPublicKey(),
|
||||
// p.getKeyUsed(),
|
||||
// p.getTagsSent());
|
||||
//}
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Packet acked after " + p.getAckTime() + "ms: " + p);
|
||||
}
|
||||
|
@ -47,11 +47,31 @@ public class PacketLocal extends Packet implements MessageOutputStream.WriteStat
|
||||
public Destination getTo() { return _to; }
|
||||
public void setTo(Destination to) { _to = to; }
|
||||
|
||||
/**
|
||||
* @deprecated should always return null
|
||||
*/
|
||||
public SessionKey getKeyUsed() { return _keyUsed; }
|
||||
public void setKeyUsed(SessionKey key) { _keyUsed = key; }
|
||||
|
||||
/**
|
||||
* @deprecated I2PSession throws out the tags
|
||||
*/
|
||||
public void setKeyUsed(SessionKey key) {
|
||||
if (key != null)
|
||||
_log.error("Who is sending tags thru the streaming lib?");
|
||||
_keyUsed = key;
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated should always return null or an empty set
|
||||
*/
|
||||
public Set getTagsSent() { return _tagsSent; }
|
||||
|
||||
/**
|
||||
* @deprecated I2PSession throws out the tags
|
||||
*/
|
||||
public void setTagsSent(Set tags) {
|
||||
if (tags != null && tags.size() > 0)
|
||||
_log.error("Who is sending tags thru the streaming lib? " + tags.size());
|
||||
if ( (_tagsSent != null) && (_tagsSent.size() > 0) && (tags.size() > 0) ) {
|
||||
//int old = _tagsSent.size();
|
||||
//_tagsSent.addAll(tags);
|
||||
|
@ -36,16 +36,18 @@ public class PacketQueue {
|
||||
|
||||
/**
|
||||
* Add a new packet to be sent out ASAP
|
||||
*
|
||||
* keys and tags disabled since dropped in I2PSession
|
||||
*/
|
||||
public void enqueue(PacketLocal packet) {
|
||||
packet.prepare();
|
||||
|
||||
SessionKey keyUsed = packet.getKeyUsed();
|
||||
if (keyUsed == null)
|
||||
keyUsed = new SessionKey();
|
||||
Set tagsSent = packet.getTagsSent();
|
||||
if (tagsSent == null)
|
||||
tagsSent = new HashSet(0);
|
||||
//SessionKey keyUsed = packet.getKeyUsed();
|
||||
//if (keyUsed == null)
|
||||
// keyUsed = new SessionKey();
|
||||
//Set tagsSent = packet.getTagsSent();
|
||||
//if (tagsSent == null)
|
||||
// tagsSent = new HashSet(0);
|
||||
|
||||
// cache this from before sendMessage
|
||||
String conStr = null;
|
||||
@ -92,13 +94,19 @@ public class PacketQueue {
|
||||
// I2PSessionImpl2
|
||||
//sent = _session.sendMessage(packet.getTo(), buf, 0, size, keyUsed, tagsSent, expires);
|
||||
// I2PSessionMuxedImpl
|
||||
sent = _session.sendMessage(packet.getTo(), buf, 0, size, keyUsed, tagsSent, expires,
|
||||
//sent = _session.sendMessage(packet.getTo(), buf, 0, size, keyUsed, tagsSent, expires,
|
||||
// I2PSession.PROTO_STREAMING, I2PSession.PORT_UNSPECIFIED, I2PSession.PORT_UNSPECIFIED);
|
||||
// I2PSessionMuxedImpl no tags
|
||||
sent = _session.sendMessage(packet.getTo(), buf, 0, size, null, null, expires,
|
||||
I2PSession.PROTO_STREAMING, I2PSession.PORT_UNSPECIFIED, I2PSession.PORT_UNSPECIFIED);
|
||||
else
|
||||
// I2PSessionImpl2
|
||||
//sent = _session.sendMessage(packet.getTo(), buf, 0, size, keyUsed, tagsSent, 0);
|
||||
// I2PSessionMuxedImpl
|
||||
sent = _session.sendMessage(packet.getTo(), buf, 0, size, keyUsed, tagsSent,
|
||||
//sent = _session.sendMessage(packet.getTo(), buf, 0, size, keyUsed, tagsSent,
|
||||
// I2PSession.PROTO_STREAMING, I2PSession.PORT_UNSPECIFIED, I2PSession.PORT_UNSPECIFIED);
|
||||
// I2PSessionMuxedImpl no tags
|
||||
sent = _session.sendMessage(packet.getTo(), buf, 0, size, null, null,
|
||||
I2PSession.PROTO_STREAMING, I2PSession.PORT_UNSPECIFIED, I2PSession.PORT_UNSPECIFIED);
|
||||
end = _context.clock().now();
|
||||
|
||||
@ -129,13 +137,11 @@ public class PacketQueue {
|
||||
if (c != null) // handle race on b0rk
|
||||
c.disconnect(false);
|
||||
} else {
|
||||
packet.setKeyUsed(keyUsed);
|
||||
packet.setTagsSent(tagsSent);
|
||||
//packet.setKeyUsed(keyUsed);
|
||||
//packet.setTagsSent(tagsSent);
|
||||
packet.incrementSends();
|
||||
if (_log.shouldLog(Log.DEBUG)) {
|
||||
String msg = "SEND " + packet + (tagsSent.size() > 0
|
||||
? " with " + tagsSent.size() + " tags"
|
||||
: "")
|
||||
String msg = "SEND " + packet
|
||||
+ " send # " + packet.getNumSends()
|
||||
+ " sendTime: " + (end-begin)
|
||||
+ " con: " + conStr;
|
||||
|
@ -73,4 +73,4 @@ Website files to change:
|
||||
release-x.y.z.html (new)
|
||||
Sync with mtn.i2p2.i2p
|
||||
|
||||
Announce on #i2p, forum.i2p
|
||||
Announce on #i2p, forum.i2p, freshmeat.net, launchpad.net
|
||||
|
@ -15,21 +15,21 @@
|
||||
|
||||
mkdir -p lib/
|
||||
mkdir -p bin/local
|
||||
VER=4.2.4
|
||||
VER=4.3.1
|
||||
|
||||
if [ "$1" != "dynamic" -a ! -d gmp-$VER ]
|
||||
then
|
||||
TAR=gmp-$VER.tar.bz2
|
||||
TAR=gmp-$VER.tar.lzma
|
||||
if [ ! -f $TAR ]
|
||||
then
|
||||
echo "GMP tarball $TAR not found. You must download it from http://gmplib.org/"
|
||||
exit 1
|
||||
echo "Downloading ftp://ftp.gmplib.org/pub/gmp-4.3.1/gmp-4.3.1.tar.lzma"
|
||||
wget ftp://ftp.gmplib.org/pub/gmp-4.3.1/gmp-4.3.1.tar.lzma
|
||||
fi
|
||||
|
||||
echo "Building the jbigi library with GMP Version $VER"
|
||||
|
||||
echo "Extracting GMP..."
|
||||
tar -xjf gmp-$VER.tar.bz2
|
||||
tar -xf gmp-$VER.tar.lzma --lzma
|
||||
fi
|
||||
|
||||
cd bin/local
|
||||
@ -42,7 +42,7 @@ then
|
||||
# --with-pic is required for static linking
|
||||
../../gmp-$VER/configure --with-pic;;
|
||||
*)
|
||||
../../gmp-$VER/configure;;
|
||||
../../gmp-$VER/configure --with-pic;;
|
||||
esac
|
||||
make
|
||||
sh ../../build_jbigi.sh static
|
||||
@ -54,7 +54,7 @@ cp *jbigi???* ../../lib/
|
||||
echo 'Library copied to lib/'
|
||||
cd ../..
|
||||
|
||||
I2P=~/i2p
|
||||
I2P=~/i2p/i2p
|
||||
if [ ! -f $I2P/lib/i2p.jar ]
|
||||
then
|
||||
echo "I2P installation not found in $I2P - correct \$I2P definition in script to run speed test"
|
||||
|
@ -37,13 +37,13 @@ FreeBSD*)
|
||||
Linux*)
|
||||
COMPILEFLAGS="-fPIC -Wall"
|
||||
INCLUDES="-I. -Iinclude -I$JAVA_HOME/include -I$JAVA_HOME/include/linux"
|
||||
LINKFLAGS="-shared -static -static-libgcc -Wl,-soname,libjcpuid-x86-linux.so"
|
||||
LINKFLAGS="-shared -Wl,-soname,libjcpuid-x86-linux.so"
|
||||
LIBFILE="lib/freenet/support/CPUInformation/libjcpuid-x86-linux.so";;
|
||||
esac
|
||||
|
||||
echo "Compiling C code..."
|
||||
rm -f $LIBFILE
|
||||
$CC $LINKFLAGS $INCLUDES src/*.c -o $LIBFILE
|
||||
$CC $COMPILEFLAGS $LINKFLAGS $INCLUDES src/*.c -o $LIBFILE
|
||||
strip $LIBFILE
|
||||
echo Built $LIBFILE
|
||||
|
||||
|
@ -388,9 +388,13 @@ public class I2PAppContext {
|
||||
* The session key manager which coordinates the sessionKey / sessionTag
|
||||
* data. This component allows transparent operation of the
|
||||
* ElGamal/AES+SessionTag algorithm, and contains all of the session tags
|
||||
* for one particular application. If you want to seperate multiple apps
|
||||
* to have their own sessionTags and sessionKeys, they should use different
|
||||
* I2PAppContexts, and hence, different sessionKeyManagers.
|
||||
* for one particular application.
|
||||
*
|
||||
* This is deprecated for client use, it should be used only by the router
|
||||
* as its own key manager. Not that clients are doing end-to-end crypto anyway.
|
||||
*
|
||||
* For client crypto within the router,
|
||||
* use RouterContext.clientManager.getClientSessionKeyManager(dest)
|
||||
*
|
||||
*/
|
||||
public SessionKeyManager sessionKeyManager() {
|
||||
|
@ -93,6 +93,10 @@ class I2CPMessageProducer {
|
||||
/**
|
||||
* Package up and send the payload to the router for delivery
|
||||
*
|
||||
* @param tag unused - no end-to-end crypto
|
||||
* @param tags unused - no end-to-end crypto
|
||||
* @param key unused - no end-to-end crypto
|
||||
* @param newKey unused - no end-to-end crypto
|
||||
*/
|
||||
public void sendMessage(I2PSessionImpl session, Destination dest, long nonce, byte[] payload, SessionTag tag,
|
||||
SessionKey key, Set tags, SessionKey newKey, long expires) throws I2PSessionException {
|
||||
@ -135,6 +139,10 @@ class I2CPMessageProducer {
|
||||
/**
|
||||
* Create a new signed payload and send it off to the destination
|
||||
*
|
||||
* @param tag unused - no end-to-end crypto
|
||||
* @param tags unused - no end-to-end crypto
|
||||
* @param key unused - no end-to-end crypto
|
||||
* @param newKey unused - no end-to-end crypto
|
||||
*/
|
||||
private Payload createPayload(Destination dest, byte[] payload, SessionTag tag, SessionKey key, Set tags,
|
||||
SessionKey newKey) throws I2PSessionException {
|
||||
|
@ -361,17 +361,23 @@ abstract class I2PSessionImpl implements I2PSession, I2CPMessageReader.I2CPMessa
|
||||
*/
|
||||
public abstract boolean sendMessage(Destination dest, byte[] payload) throws I2PSessionException;
|
||||
|
||||
/**
|
||||
* @param keyUsed unused - no end-to-end crypto
|
||||
* @param tagsSent unused - no end-to-end crypto
|
||||
*/
|
||||
public abstract boolean sendMessage(Destination dest, byte[] payload, SessionKey keyUsed,
|
||||
Set tagsSent) throws I2PSessionException;
|
||||
|
||||
public abstract void receiveStatus(int msgId, long nonce, int status);
|
||||
|
||||
/****** no end-to-end crypto
|
||||
protected static final Set createNewTags(int num) {
|
||||
Set tags = new HashSet();
|
||||
for (int i = 0; i < num; i++)
|
||||
tags.add(new SessionTag(true));
|
||||
return tags;
|
||||
}
|
||||
*******/
|
||||
|
||||
/**
|
||||
* Recieve a payload message and let the app know its available
|
||||
|
@ -135,14 +135,28 @@ class I2PSessionImpl2 extends I2PSessionImpl {
|
||||
return sendMessage(dest, payload, offset, size, null, null, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param keyUsed unused - no end-to-end crypto
|
||||
* @param tagsSent unused - no end-to-end crypto
|
||||
*/
|
||||
@Override
|
||||
public boolean sendMessage(Destination dest, byte[] payload, SessionKey keyUsed, Set tagsSent) throws I2PSessionException {
|
||||
return sendMessage(dest, payload, 0, payload.length, keyUsed, tagsSent, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param keyUsed unused - no end-to-end crypto
|
||||
* @param tagsSent unused - no end-to-end crypto
|
||||
*/
|
||||
public boolean sendMessage(Destination dest, byte[] payload, int offset, int size, SessionKey keyUsed, Set tagsSent)
|
||||
throws I2PSessionException {
|
||||
return sendMessage(dest, payload, offset, size, keyUsed, tagsSent, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param keyUsed unused - no end-to-end crypto
|
||||
* @param tagsSent unused - no end-to-end crypto
|
||||
*/
|
||||
public boolean sendMessage(Destination dest, byte[] payload, int offset, int size, SessionKey keyUsed, Set tagsSent, long expires)
|
||||
throws I2PSessionException {
|
||||
if (_log.shouldLog(Log.DEBUG)) _log.debug("sending message");
|
||||
@ -198,13 +212,17 @@ class I2PSessionImpl2 extends I2PSessionImpl {
|
||||
|
||||
private static final int NUM_TAGS = 50;
|
||||
|
||||
/**
|
||||
* @param keyUsed unused - no end-to-end crypto
|
||||
* @param tagsSent unused - no end-to-end crypto
|
||||
*/
|
||||
protected boolean sendBestEffort(Destination dest, byte payload[], SessionKey keyUsed, Set tagsSent, long expires)
|
||||
throws I2PSessionException {
|
||||
SessionKey key = null;
|
||||
SessionKey newKey = null;
|
||||
SessionTag tag = null;
|
||||
Set sentTags = null;
|
||||
int oldTags = 0;
|
||||
//SessionKey key = null;
|
||||
//SessionKey newKey = null;
|
||||
//SessionTag tag = null;
|
||||
//Set sentTags = null;
|
||||
//int oldTags = 0;
|
||||
long begin = _context.clock().now();
|
||||
/***********
|
||||
if (I2CPMessageProducer.END_TO_END_CRYPTO) {
|
||||
@ -258,27 +276,27 @@ class I2PSessionImpl2 extends I2PSessionImpl {
|
||||
long nonce = _context.random().nextInt(Integer.MAX_VALUE);
|
||||
if (_log.shouldLog(Log.DEBUG)) _log.debug("before sync state");
|
||||
MessageState state = new MessageState(_context, nonce, getPrefix());
|
||||
state.setKey(key);
|
||||
state.setTags(sentTags);
|
||||
state.setNewKey(newKey);
|
||||
//state.setKey(key);
|
||||
//state.setTags(sentTags);
|
||||
//state.setNewKey(newKey);
|
||||
state.setTo(dest);
|
||||
if (_log.shouldLog(Log.DEBUG)) _log.debug(getPrefix() + "Setting key = " + key);
|
||||
//if (_log.shouldLog(Log.DEBUG)) _log.debug(getPrefix() + "Setting key = " + key);
|
||||
|
||||
if (keyUsed != null) {
|
||||
//if (keyUsed != null) {
|
||||
//if (I2CPMessageProducer.END_TO_END_CRYPTO) {
|
||||
// if (newKey != null)
|
||||
// keyUsed.setData(newKey.getData());
|
||||
// else
|
||||
// keyUsed.setData(key.getData());
|
||||
//} else {
|
||||
keyUsed.setData(SessionKey.INVALID_KEY.getData());
|
||||
// keyUsed.setData(SessionKey.INVALID_KEY.getData());
|
||||
//}
|
||||
//}
|
||||
//if (tagsSent != null) {
|
||||
// if (sentTags != null) {
|
||||
// tagsSent.addAll(sentTags);
|
||||
// }
|
||||
//}
|
||||
}
|
||||
if (tagsSent != null) {
|
||||
if (sentTags != null) {
|
||||
tagsSent.addAll(sentTags);
|
||||
}
|
||||
}
|
||||
|
||||
if (_log.shouldLog(Log.DEBUG)) _log.debug("before sync state");
|
||||
long beforeSendingSync = _context.clock().now();
|
||||
@ -293,7 +311,8 @@ class I2PSessionImpl2 extends I2PSessionImpl {
|
||||
+ state.getNonce() + " for best effort "
|
||||
+ " sync took " + (inSendingSync-beforeSendingSync)
|
||||
+ " add took " + (afterSendingSync-inSendingSync));
|
||||
_producer.sendMessage(this, dest, nonce, payload, tag, key, sentTags, newKey, expires);
|
||||
//_producer.sendMessage(this, dest, nonce, payload, tag, key, sentTags, newKey, expires);
|
||||
_producer.sendMessage(this, dest, nonce, payload, null, null, null, null, expires);
|
||||
|
||||
// since this is 'best effort', all we're waiting for is a status update
|
||||
// saying that the router received it - in theory, that should come back
|
||||
|
@ -128,6 +128,10 @@ class I2PSessionMuxedImpl extends I2PSessionImpl2 implements I2PSession {
|
||||
return sendMessage(dest, payload, 0, payload.length, null, null, 0, proto, fromport, toport);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param keyUsed unused - no end-to-end crypto
|
||||
* @param tagsSent unused - no end-to-end crypto
|
||||
*/
|
||||
@Override
|
||||
public boolean sendMessage(Destination dest, byte[] payload, int offset, int size,
|
||||
SessionKey keyUsed, Set tagsSent, long expires)
|
||||
@ -135,6 +139,10 @@ class I2PSessionMuxedImpl extends I2PSessionImpl2 implements I2PSession {
|
||||
return sendMessage(dest, payload, offset, size, keyUsed, tagsSent, 0, PROTO_UNSPECIFIED, PORT_UNSPECIFIED, PORT_UNSPECIFIED);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param keyUsed unused - no end-to-end crypto
|
||||
* @param tagsSent unused - no end-to-end crypto
|
||||
*/
|
||||
@Override
|
||||
public boolean sendMessage(Destination dest, byte[] payload, int offset, int size, SessionKey keyUsed, Set tagsSent,
|
||||
int proto, int fromport, int toport) throws I2PSessionException {
|
||||
@ -142,6 +150,8 @@ class I2PSessionMuxedImpl extends I2PSessionImpl2 implements I2PSession {
|
||||
}
|
||||
|
||||
/**
|
||||
* @param keyUsed unused - no end-to-end crypto
|
||||
* @param tagsSent unused - no end-to-end crypto
|
||||
* @param proto 1-254 or 0 for unset; recommended:
|
||||
* I2PSession.PROTO_UNSPECIFIED
|
||||
* I2PSession.PROTO_STREAMING
|
||||
|
@ -22,6 +22,8 @@ import net.i2p.util.Log;
|
||||
* of a message by accepting it, decrypting the payload, adding it to the set of
|
||||
* recieved messages, and telling the router that it has been recieved correctly.
|
||||
*
|
||||
* We don't really decrypt (no more end-to-end crypto)
|
||||
*
|
||||
* @author jrandom
|
||||
*/
|
||||
class MessagePayloadMessageHandler extends HandlerImpl {
|
||||
@ -51,21 +53,24 @@ class MessagePayloadMessageHandler extends HandlerImpl {
|
||||
|
||||
/**
|
||||
* Decrypt the payload
|
||||
*
|
||||
* We don't really decrypt (no more end-to-end crypto)
|
||||
* If we do, we need to use the correct key manager in the decrypt() call below
|
||||
*/
|
||||
private Payload decryptPayload(MessagePayloadMessage msg, I2PSessionImpl session) throws DataFormatException {
|
||||
Payload payload = msg.getPayload();
|
||||
if (!I2CPMessageProducer.END_TO_END_CRYPTO) {
|
||||
//if (!I2CPMessageProducer.END_TO_END_CRYPTO) {
|
||||
payload.setUnencryptedData(payload.getEncryptedData());
|
||||
return payload;
|
||||
}
|
||||
//}
|
||||
|
||||
byte[] data = _context.elGamalAESEngine().decrypt(payload.getEncryptedData(), session.getDecryptionKey());
|
||||
if (data == null) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Error decrypting the payload");
|
||||
throw new DataFormatException("Unable to decrypt the payload");
|
||||
}
|
||||
payload.setUnencryptedData(data);
|
||||
return payload;
|
||||
//byte[] data = _context.elGamalAESEngine().decrypt(payload.getEncryptedData(), session.getDecryptionKey());
|
||||
//if (data == null) {
|
||||
// if (_log.shouldLog(Log.WARN))
|
||||
// _log.warn("Error decrypting the payload");
|
||||
// throw new DataFormatException("Unable to decrypt the payload");
|
||||
//}
|
||||
//payload.setUnencryptedData(data);
|
||||
//return payload;
|
||||
}
|
||||
}
|
||||
|
@ -59,14 +59,18 @@ public class ElGamalAESEngine {
|
||||
}
|
||||
|
||||
/**
|
||||
* Decrypt the message using the given private key using tags from the given key manager.
|
||||
* Decrypt the message using the given private key using tags from the default key manager.
|
||||
*
|
||||
* @deprecated specify the key manager!
|
||||
*/
|
||||
public byte[] decrypt(byte data[], PrivateKey targetPrivateKey) throws DataFormatException {
|
||||
return decrypt(data, targetPrivateKey, _context.sessionKeyManager());
|
||||
}
|
||||
|
||||
/**
|
||||
* Decrypt the message using the given private key. This works according to the
|
||||
* Decrypt the message using the given private key
|
||||
* and using tags from the specified key manager.
|
||||
* This works according to the
|
||||
* ElGamal+AES algorithm in the data structure spec.
|
||||
*
|
||||
*/
|
||||
|
@ -12,7 +12,7 @@ import org.bouncycastle.crypto.macs.I2PHMac;
|
||||
|
||||
/**
|
||||
* Calculate the HMAC-SHA256 of a key+message. All the good stuff occurs
|
||||
* in {@link org.bouncycastle.crypto.macs.HMac} and
|
||||
* in {@link org.bouncycastle.crypto.macs.I2PHMac} and
|
||||
* {@link org.bouncycastle.crypto.digests.MD5Digest}.
|
||||
*
|
||||
*/
|
||||
|
@ -15,7 +15,7 @@ import org.bouncycastle.crypto.macs.I2PHMac;
|
||||
|
||||
/**
|
||||
* Calculate the HMAC-MD5 of a key+message. All the good stuff occurs
|
||||
* in {@link org.bouncycastle.crypto.macs.HMac} and
|
||||
* in {@link org.bouncycastle.crypto.macs.I2PHMac} and
|
||||
* {@link org.bouncycastle.crypto.digests.MD5Digest}.
|
||||
*
|
||||
*/
|
||||
|
@ -9,6 +9,8 @@ package net.i2p.crypto;
|
||||
*
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Writer;
|
||||
import java.util.Set;
|
||||
|
||||
import net.i2p.I2PAppContext;
|
||||
@ -93,7 +95,8 @@ public class SessionKeyManager {
|
||||
* method after receiving an ack to a message delivering them)
|
||||
*
|
||||
*/
|
||||
public void tagsDelivered(PublicKey target, SessionKey key, Set<SessionTag> sessionTags) { // nop
|
||||
public TagSetHandle tagsDelivered(PublicKey target, SessionKey key, Set<SessionTag> sessionTags) { // nop
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -130,4 +133,8 @@ public class SessionKeyManager {
|
||||
*/
|
||||
public void shutdown() { // nop
|
||||
}
|
||||
|
||||
public void renderStatusHTML(Writer out) throws IOException {}
|
||||
public void failTags(PublicKey target, SessionKey key, TagSetHandle ts) {}
|
||||
public void tagsAcked(PublicKey target, SessionKey key, TagSetHandle ts) {}
|
||||
}
|
||||
|
8
core/java/src/net/i2p/crypto/TagSetHandle.java
Normal file
8
core/java/src/net/i2p/crypto/TagSetHandle.java
Normal file
@ -0,0 +1,8 @@
|
||||
package net.i2p.crypto;
|
||||
|
||||
/**
|
||||
* An opaque handle to a TagSet returned by the SessionKeyManager,
|
||||
* so that OCMOSJ can report that the tags were later acked, or not.
|
||||
*
|
||||
*/
|
||||
public interface TagSetHandle {}
|
@ -9,14 +9,19 @@ package net.i2p.crypto;
|
||||
*
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Writer;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Comparator;
|
||||
import java.util.Date;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.NoSuchElementException;
|
||||
import java.util.Set;
|
||||
import java.util.TreeSet;
|
||||
|
||||
import net.i2p.I2PAppContext;
|
||||
import net.i2p.data.DataHelper;
|
||||
@ -32,6 +37,41 @@ import net.i2p.util.SimpleTimer;
|
||||
* to disk). However, this being java, we cannot guarantee that the keys aren't swapped
|
||||
* out to disk so this should not be considered secure in that sense.
|
||||
*
|
||||
* The outbound and inbound sides are completely independent, each with
|
||||
* their own keys and tags.
|
||||
*
|
||||
* For a new session, outbound tags are not considered delivered until an ack is received.
|
||||
* Otherwise, the loss of the first message would render all subsequent messages
|
||||
* undecryptable. True?
|
||||
*
|
||||
* For an existing session, outbound tags are immediately considered delivered, and are
|
||||
* later revoked if the ack times out. This prevents massive stream slowdown caused by
|
||||
* repeated tag delivery after the minimum tag threshold is reached. Included tags
|
||||
* pushes messages above the ideal 1956 size by ~2KB and causes excessive fragmentation
|
||||
* and padding. As the tags are not seen by the streaming lib, they aren't accounted
|
||||
* for in the window size, and one or more of a series of large messages is likely to be dropped,
|
||||
* either due to high fragmentation or drop priorites at the tunnel OBEP.
|
||||
*
|
||||
* For this to work, the minimum tag threshold and tag delivery quanitity defined in
|
||||
* GarlicMessageBuilder must be chosen with streaming lib windows sizes in mind.
|
||||
* If a single TagSet is not delivered, there will be no stall as long as the
|
||||
* current window size is smaller than the minimum tag threshold.
|
||||
* Additional TagSets will be sent before the acked tags completely run out. See below.
|
||||
* all subsequent messages will fail to decrypt.
|
||||
* See ConnectionOptions in streaming for more information.
|
||||
*
|
||||
* There are large inefficiencies caused by the repeated delivery of tags in a new session.
|
||||
* With an initial streaming window size of 6 and 40 tags per delivery, a web server
|
||||
* would deliver up to 240 tags (7680 bytes, not including bundled leaseset, etc.)
|
||||
* in the first volley of the response.
|
||||
*
|
||||
* Could the two directions be linked somehow, such that the initial request could
|
||||
* contain a key or tags for the response?
|
||||
*
|
||||
* Should the tag threshold and quantity be adaptive?
|
||||
*
|
||||
* Todo: Switch to ConcurrentHashMaps and ReadWriteLocks, only get write lock during cleanup
|
||||
*
|
||||
*/
|
||||
public class TransientSessionKeyManager extends SessionKeyManager {
|
||||
private Log _log;
|
||||
@ -122,6 +162,7 @@ public class TransientSessionKeyManager extends SessionKeyManager {
|
||||
}
|
||||
|
||||
/* FIXME Exporting non-public type through public API */
|
||||
/****** leftover from when we had the persistent SKM
|
||||
protected void setData(Set<TagSet> inboundTagSets, Set<OutboundSession> outboundSessions) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Loading " + inboundTagSets.size() + " inbound tag sets, and "
|
||||
@ -148,6 +189,7 @@ public class TransientSessionKeyManager extends SessionKeyManager {
|
||||
_outboundSessions.putAll(sessions);
|
||||
}
|
||||
}
|
||||
******/
|
||||
|
||||
/**
|
||||
* Retrieve the session key currently associated with encryption to the target,
|
||||
@ -175,13 +217,10 @@ public class TransientSessionKeyManager extends SessionKeyManager {
|
||||
* Associate a new session key with the specified target. Metrics to determine
|
||||
* when to expire that key begin with this call.
|
||||
*
|
||||
* Unused except in tests?
|
||||
*/
|
||||
@Override
|
||||
public void createSession(PublicKey target, SessionKey key) {
|
||||
OutboundSession sess = new OutboundSession(target);
|
||||
sess.setCurrentKey(key);
|
||||
addSession(sess);
|
||||
createAndReturnSession(target, key);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -214,7 +253,7 @@ public class TransientSessionKeyManager extends SessionKeyManager {
|
||||
if (sess.getCurrentKey().equals(key)) {
|
||||
SessionTag nxt = sess.consumeNext();
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Tag consumed: " + nxt + " with key: " + key.toBase64());
|
||||
_log.debug("OB Tag consumed: " + nxt + " with: " + key);
|
||||
return nxt;
|
||||
}
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
@ -257,23 +296,31 @@ public class TransientSessionKeyManager extends SessionKeyManager {
|
||||
|
||||
/**
|
||||
* Take note of the fact that the given sessionTags associated with the key for
|
||||
* encryption to the target have definitely been received at the target (aka call this
|
||||
* method after receiving an ack to a message delivering them)
|
||||
* encryption to the target have been sent. Whether to use the tags immediately
|
||||
* (i.e. assume they will be received) or to wait until an ack, is implementation dependent.
|
||||
*
|
||||
* Here, we wait for the ack if the session is new, otherwise we use right away.
|
||||
* Will this work???
|
||||
* If the tags are pipelined sufficiently, it will.
|
||||
*
|
||||
* @return the TagSetHandle. Caller MUST subsequently call failTags() or tagsAcked()
|
||||
* with this handle.
|
||||
*/
|
||||
@Override
|
||||
public void tagsDelivered(PublicKey target, SessionKey key, Set sessionTags) {
|
||||
public TagSetHandle tagsDelivered(PublicKey target, SessionKey key, Set<SessionTag> sessionTags) {
|
||||
if (_log.shouldLog(Log.DEBUG)) {
|
||||
//_log.debug("Tags delivered to set " + set + " on session " + sess);
|
||||
if (sessionTags.size() > 0)
|
||||
_log.debug("Tags delivered: " + sessionTags.size() + " for key: " + key.toBase64() + ": " + sessionTags);
|
||||
_log.debug("Tags delivered: " + sessionTags.size() + " for key: " + key + ": " + sessionTags);
|
||||
}
|
||||
OutboundSession sess = getSession(target);
|
||||
if (sess == null)
|
||||
sess = createAndReturnSession(target, key);
|
||||
else
|
||||
sess.setCurrentKey(key);
|
||||
TagSet set = new TagSet(sessionTags, key, _context.clock().now());
|
||||
sess.addTags(set);
|
||||
return set;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -281,12 +328,44 @@ public class TransientSessionKeyManager extends SessionKeyManager {
|
||||
* has failed to respond when they should have. This call essentially lets the system recover
|
||||
* from corrupted tag sets and crashes
|
||||
*
|
||||
* @deprecated unused and rather drastic
|
||||
*/
|
||||
@Override
|
||||
public void failTags(PublicKey target) {
|
||||
removeSession(target);
|
||||
}
|
||||
|
||||
/**
|
||||
* Mark these tags as invalid, since the peer
|
||||
* has failed to ack them in time.
|
||||
*/
|
||||
@Override
|
||||
public void failTags(PublicKey target, SessionKey key, TagSetHandle ts) {
|
||||
OutboundSession sess = getSession(target);
|
||||
if (sess == null)
|
||||
return;
|
||||
if(!key.equals(sess.getCurrentKey()))
|
||||
return;
|
||||
sess.failTags((TagSet)ts);
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("TagSet failed: " + ts);
|
||||
}
|
||||
|
||||
/**
|
||||
* Mark these tags as acked, start to use them (if we haven't already)
|
||||
*/
|
||||
@Override
|
||||
public void tagsAcked(PublicKey target, SessionKey key, TagSetHandle ts) {
|
||||
OutboundSession sess = getSession(target);
|
||||
if (sess == null)
|
||||
return;
|
||||
if(!key.equals(sess.getCurrentKey()))
|
||||
return;
|
||||
sess.ackTags((TagSet)ts);
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("TagSet acked: " + ts);
|
||||
}
|
||||
|
||||
/**
|
||||
* Accept the given tags and associate them with the given key for decryption
|
||||
*
|
||||
@ -300,9 +379,9 @@ public class TransientSessionKeyManager extends SessionKeyManager {
|
||||
for (Iterator<SessionTag> iter = sessionTags.iterator(); iter.hasNext();) {
|
||||
SessionTag tag = iter.next();
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Receiving tag " + tag + " for key " + key.toBase64() + " / " + key.toString() + ": tagSet: " + tagSet);
|
||||
_log.debug("Receiving tag " + tag + " for key " + key + ": tagSet: " + tagSet);
|
||||
synchronized (_inboundTagSets) {
|
||||
old = (TagSet)_inboundTagSets.put(tag, tagSet);
|
||||
old = _inboundTagSets.put(tag, tagSet);
|
||||
overage = _inboundTagSets.size() - MAX_INBOUND_SESSION_TAGS;
|
||||
if (old != null) {
|
||||
if (!old.getAssociatedKey().equals(tagSet.getAssociatedKey())) {
|
||||
@ -330,9 +409,9 @@ public class TransientSessionKeyManager extends SessionKeyManager {
|
||||
}
|
||||
|
||||
if (_log.shouldLog(Log.WARN)) {
|
||||
_log.warn("Multiple tags matching! tagSet: " + tagSet + " and old tagSet: " + old + " tag: " + dupTag + "/" + dupTag.toBase64());
|
||||
_log.warn("Earlier tag set creation: " + old + ": key=" + old.getAssociatedKey().toBase64(), old.getCreatedBy());
|
||||
_log.warn("Current tag set creation: " + tagSet + ": key=" + tagSet.getAssociatedKey().toBase64(), tagSet.getCreatedBy());
|
||||
_log.warn("Multiple tags matching! tagSet: " + tagSet + " and old tagSet: " + old + " tag: " + dupTag + "/" + dupTag);
|
||||
_log.warn("Earlier tag set creation: " + old + ": key=" + old.getAssociatedKey());
|
||||
_log.warn("Current tag set creation: " + tagSet + ": key=" + tagSet.getAssociatedKey());
|
||||
}
|
||||
}
|
||||
|
||||
@ -341,7 +420,7 @@ public class TransientSessionKeyManager extends SessionKeyManager {
|
||||
|
||||
if ( (sessionTags.size() <= 0) && (_log.shouldLog(Log.DEBUG)) )
|
||||
_log.debug("Received 0 tags for key " + key);
|
||||
if (false) aggressiveExpire();
|
||||
//if (false) aggressiveExpire();
|
||||
}
|
||||
|
||||
/**
|
||||
@ -406,26 +485,26 @@ public class TransientSessionKeyManager extends SessionKeyManager {
|
||||
*/
|
||||
@Override
|
||||
public SessionKey consumeTag(SessionTag tag) {
|
||||
if (false) aggressiveExpire();
|
||||
//if (false) aggressiveExpire();
|
||||
synchronized (_inboundTagSets) {
|
||||
TagSet tagSet = (TagSet) _inboundTagSets.remove(tag);
|
||||
if (tagSet == null) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Cannot consume tag " + tag + " as it is not known");
|
||||
_log.debug("Cannot consume IB " + tag + " as it is not known");
|
||||
return null;
|
||||
}
|
||||
tagSet.consume(tag);
|
||||
|
||||
SessionKey key = tagSet.getAssociatedKey();
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Consuming tag " + tag.toString() + " for sessionKey " + key.toBase64() + " / " + key.toString() + " on tagSet: " + tagSet);
|
||||
_log.debug("Consuming IB " + tag + " for " + key + " on: " + tagSet);
|
||||
return key;
|
||||
}
|
||||
}
|
||||
|
||||
private OutboundSession getSession(PublicKey target) {
|
||||
synchronized (_outboundSessions) {
|
||||
return (OutboundSession) _outboundSessions.get(target);
|
||||
return _outboundSessions.get(target);
|
||||
}
|
||||
}
|
||||
|
||||
@ -439,7 +518,7 @@ public class TransientSessionKeyManager extends SessionKeyManager {
|
||||
if (target == null) return;
|
||||
OutboundSession session = null;
|
||||
synchronized (_outboundSessions) {
|
||||
session = (OutboundSession)_outboundSessions.remove(target);
|
||||
session = _outboundSessions.remove(target);
|
||||
}
|
||||
if ( (session != null) && (_log.shouldLog(Log.WARN)) )
|
||||
_log.warn("Removing session tags with " + session.availableTags() + " available for "
|
||||
@ -457,11 +536,11 @@ public class TransientSessionKeyManager extends SessionKeyManager {
|
||||
int remaining = 0;
|
||||
long now = _context.clock().now();
|
||||
StringBuilder buf = null;
|
||||
StringBuilder bufSummary = null;
|
||||
//StringBuilder bufSummary = null;
|
||||
if (_log.shouldLog(Log.DEBUG)) {
|
||||
buf = new StringBuilder(128);
|
||||
buf.append("Expiring inbound: ");
|
||||
bufSummary = new StringBuilder(1024);
|
||||
//bufSummary = new StringBuilder(1024);
|
||||
}
|
||||
synchronized (_inboundTagSets) {
|
||||
for (Iterator<SessionTag> iter = _inboundTagSets.keySet().iterator(); iter.hasNext();) {
|
||||
@ -473,10 +552,10 @@ public class TransientSessionKeyManager extends SessionKeyManager {
|
||||
iter.remove();
|
||||
removed++;
|
||||
if (buf != null)
|
||||
buf.append(tag.toString()).append(" @ age ").append(DataHelper.formatDuration(age));
|
||||
} else if (false && (bufSummary != null) ) {
|
||||
bufSummary.append("\nTagSet: " + ts.toString() + ", key: " + ts.getAssociatedKey().toBase64()+"/" + ts.getAssociatedKey().toString()
|
||||
+ ": tag: " + tag.toString());
|
||||
buf.append(tag).append(" @ age ").append(DataHelper.formatDuration(age));
|
||||
//} else if (false && (bufSummary != null) ) {
|
||||
// bufSummary.append("\nTagSet: " + ts + ", key: " + ts.getAssociatedKey()
|
||||
// + ": tag: " + tag);
|
||||
}
|
||||
}
|
||||
remaining = _inboundTagSets.size();
|
||||
@ -484,8 +563,8 @@ public class TransientSessionKeyManager extends SessionKeyManager {
|
||||
_context.statManager().addRateData("crypto.sessionTagsRemaining", remaining, 0);
|
||||
if ( (buf != null) && (removed > 0) )
|
||||
_log.debug(buf.toString());
|
||||
if (bufSummary != null)
|
||||
_log.debug("Cleaning up with remaining: " + bufSummary.toString());
|
||||
//if (bufSummary != null)
|
||||
// _log.debug("Cleaning up with remaining: " + bufSummary.toString());
|
||||
|
||||
//_log.warn("Expiring tags: [" + tagsToDrop + "]");
|
||||
|
||||
@ -494,74 +573,111 @@ public class TransientSessionKeyManager extends SessionKeyManager {
|
||||
PublicKey key = iter.next();
|
||||
OutboundSession sess = _outboundSessions.get(key);
|
||||
removed += sess.expireTags();
|
||||
if (sess.availableTags() <= 0) {
|
||||
// don't kill a new session or one that's temporarily out of tags
|
||||
if (sess.getLastUsedDate() < now - (SESSION_LIFETIME_MAX_MS / 2) &&
|
||||
sess.availableTags() <= 0) {
|
||||
iter.remove();
|
||||
removed++;
|
||||
removed++; // just to have a non-zero return value?
|
||||
}
|
||||
}
|
||||
}
|
||||
return removed;
|
||||
}
|
||||
|
||||
public String renderStatusHTML() {
|
||||
@Override
|
||||
public void renderStatusHTML(Writer out) throws IOException {
|
||||
StringBuilder buf = new StringBuilder(1024);
|
||||
buf.append("<h2>Inbound sessions</h2>");
|
||||
buf.append("<table>");
|
||||
buf.append("<h2>Inbound sessions</h2>" +
|
||||
"<table>");
|
||||
Set<TagSet> inbound = getInboundTagSets();
|
||||
Map<SessionKey, Set<TagSet>> inboundSets = new HashMap(inbound.size());
|
||||
// Build a map of the inbound tag sets, grouped by SessionKey
|
||||
for (Iterator<TagSet> iter = inbound.iterator(); iter.hasNext();) {
|
||||
TagSet ts = iter.next();
|
||||
if (!inboundSets.containsKey(ts.getAssociatedKey())) inboundSets.put(ts.getAssociatedKey(), new HashSet());
|
||||
Set<TagSet> sets = inboundSets.get(ts.getAssociatedKey());
|
||||
sets.add(ts);
|
||||
}
|
||||
int total = 0;
|
||||
long now = _context.clock().now();
|
||||
for (Iterator<SessionKey> iter = inboundSets.keySet().iterator(); iter.hasNext();) {
|
||||
SessionKey skey = iter.next();
|
||||
Set<TagSet> sets = inboundSets.get(skey);
|
||||
buf.append("<tr><td><b>Session key</b>: ").append(skey.toBase64()).append("</td>");
|
||||
buf.append("<td><b># Sets:</b> ").append(sets.size()).append("</td></tr>");
|
||||
buf.append("<tr><td colspan=\"2\"><ul>");
|
||||
Set<TagSet> sets = new TreeSet(new TagSetComparator());
|
||||
sets.addAll(inboundSets.get(skey));
|
||||
buf.append("<tr><td><b>Session key</b>: ").append(skey.toBase64()).append("</td>" +
|
||||
"<td><b># Sets:</b> ").append(sets.size()).append("</td></tr>" +
|
||||
"<tr><td colspan=\"2\"><ul>");
|
||||
for (Iterator<TagSet> siter = sets.iterator(); siter.hasNext();) {
|
||||
TagSet ts = siter.next();
|
||||
buf.append("<li><b>Received on:</b> ").append(new Date(ts.getDate())).append(" with ")
|
||||
.append(ts.getTags().size()).append(" tags remaining</li>");
|
||||
int size = ts.getTags().size();
|
||||
total += size;
|
||||
buf.append("<li><b>Received:</b> ").append(DataHelper.formatDuration(now - ts.getDate())).append(" ago with ");
|
||||
buf.append(size).append(" tags remaining</li>");
|
||||
}
|
||||
buf.append("</ul></td></tr>");
|
||||
buf.append("</ul></td></tr>\n");
|
||||
out.write(buf.toString());
|
||||
buf.setLength(0);
|
||||
}
|
||||
buf.append("</table>");
|
||||
|
||||
buf.append("<h2><b>Outbound sessions</b></h2>");
|
||||
|
||||
buf.append("<table>");
|
||||
buf.append("<tr><th colspan=\"2\">Total tags: ").append(total).append(" (");
|
||||
buf.append(DataHelper.formatSize(32*total)).append("B)</th></tr>\n" +
|
||||
"</table>" +
|
||||
"<h2><b>Outbound sessions</b></h2>" +
|
||||
"<table>");
|
||||
total = 0;
|
||||
Set<OutboundSession> outbound = getOutboundSessions();
|
||||
for (Iterator<OutboundSession> iter = outbound.iterator(); iter.hasNext();) {
|
||||
OutboundSession sess = iter.next();
|
||||
buf.append("<tr><td><b>Target key:</b> ").append(sess.getTarget().toString()).append("<br>");
|
||||
buf.append("<b>Established:</b> ").append(new Date(sess.getEstablishedDate())).append("<br>");
|
||||
buf.append("<b>Last Used:</b> ").append(new Date(sess.getLastUsedDate())).append("<br>");
|
||||
buf.append("<b># Sets:</b> ").append(sess.getTagSets().size()).append("</td></tr>");
|
||||
buf.append("<tr><td><b>Session key:</b> ").append(sess.getCurrentKey().toBase64()).append("</td></tr>");
|
||||
buf.append("<tr><td><ul>");
|
||||
for (Iterator<TagSet> siter = sess.getTagSets().iterator(); siter.hasNext();) {
|
||||
Set<TagSet> sets = new TreeSet(new TagSetComparator());
|
||||
sets.addAll(sess.getTagSets());
|
||||
buf.append("<tr><td><b>Target key:</b> ").append(sess.getTarget().toBase64().substring(0, 64)).append("<br>" +
|
||||
"<b>Established:</b> ").append(DataHelper.formatDuration(now - sess.getEstablishedDate())).append(" ago<br>" +
|
||||
"<b>Last Used:</b> ").append(DataHelper.formatDuration(now - sess.getLastUsedDate())).append(" ago<br>" +
|
||||
"<b>Session key:</b> ").append(sess.getCurrentKey().toBase64()).append("</td>" +
|
||||
"<td><b># Sets:</b> ").append(sess.getTagSets().size()).append("</td></tr>" +
|
||||
"<tr><td colspan=\"2\"><ul>");
|
||||
for (Iterator<TagSet> siter = sets.iterator(); siter.hasNext();) {
|
||||
TagSet ts = siter.next();
|
||||
buf.append("<li><b>Sent on:</b> ").append(new Date(ts.getDate())).append(" with ").append(
|
||||
ts.getTags()
|
||||
.size())
|
||||
.append(" tags remaining</li>");
|
||||
int size = ts.getTags().size();
|
||||
total += size;
|
||||
buf.append("<li><b>Sent:</b> ").append(DataHelper.formatDuration(now - ts.getDate())).append(" ago with ");
|
||||
buf.append(size).append(" tags remaining; acked? ").append(ts.getAcked()).append("</li>");
|
||||
}
|
||||
buf.append("</ul></td></tr>");
|
||||
buf.append("</ul></td></tr>\n");
|
||||
out.write(buf.toString());
|
||||
buf.setLength(0);
|
||||
}
|
||||
buf.append("</table>");
|
||||
buf.append("<tr><th colspan=\"2\">Total tags: ").append(total).append(" (");
|
||||
buf.append(DataHelper.formatSize(32*total)).append("B)</th></tr>\n" +
|
||||
"</table>");
|
||||
|
||||
return buf.toString();
|
||||
out.write(buf.toString());
|
||||
}
|
||||
|
||||
class OutboundSession {
|
||||
/**
|
||||
* Just for the HTML method above so we can see what's going on easier
|
||||
* Earliest first
|
||||
*/
|
||||
private static class TagSetComparator implements Comparator {
|
||||
public int compare(Object l, Object r) {
|
||||
return (int) (((TagSet)l).getDate() - ((TagSet)r).getDate());
|
||||
}
|
||||
}
|
||||
|
||||
private class OutboundSession {
|
||||
private PublicKey _target;
|
||||
private SessionKey _currentKey;
|
||||
private long _established;
|
||||
private long _lastUsed;
|
||||
/** before the first ack, all tagsets go here. These are never expired, we rely
|
||||
on the callers to call failTags() or ackTags() to remove them from this list. */
|
||||
private /* FIXME final FIXME */ List<TagSet> _unackedTagSets;
|
||||
/**
|
||||
* As tagsets are acked, they go here.
|
||||
* After the first ack, new tagsets go here (i.e. presumed acked)
|
||||
*/
|
||||
private /* FIXME final FIXME */ List<TagSet> _tagSets;
|
||||
/** set to true after first tagset is acked */
|
||||
private boolean _acked;
|
||||
|
||||
public OutboundSession(PublicKey target) {
|
||||
this(target, null, _context.clock().now(), _context.clock().now(), new ArrayList());
|
||||
@ -572,13 +688,44 @@ public class TransientSessionKeyManager extends SessionKeyManager {
|
||||
_currentKey = curKey;
|
||||
_established = established;
|
||||
_lastUsed = lastUsed;
|
||||
_tagSets = tagSets;
|
||||
_unackedTagSets = tagSets;
|
||||
_tagSets = new ArrayList();
|
||||
}
|
||||
|
||||
/** list of TagSet objects */
|
||||
/**
|
||||
* @return list of TagSet objects
|
||||
* This is used only by renderStatusHTML().
|
||||
* It includes both acked and unacked TagSets.
|
||||
*/
|
||||
List<TagSet> getTagSets() {
|
||||
List<TagSet> rv;
|
||||
synchronized (_tagSets) {
|
||||
return new ArrayList(_tagSets);
|
||||
rv = new ArrayList(_unackedTagSets);
|
||||
rv.addAll(_tagSets);
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
|
||||
/**
|
||||
* got an ack for these tags
|
||||
* For tagsets delivered after the session was acked, this is a nop
|
||||
* because the tagset was originally placed directly on the acked list.
|
||||
*/
|
||||
void ackTags(TagSet set) {
|
||||
synchronized (_tagSets) {
|
||||
if (_unackedTagSets.remove(set)) {
|
||||
_tagSets.add(set);
|
||||
_acked = true;
|
||||
}
|
||||
}
|
||||
set.setAcked();
|
||||
}
|
||||
|
||||
/** didn't get an ack for these tags */
|
||||
void failTags(TagSet set) {
|
||||
synchronized (_tagSets) {
|
||||
_unackedTagSets.remove(set);
|
||||
_tagSets.remove(set);
|
||||
}
|
||||
}
|
||||
|
||||
@ -626,7 +773,7 @@ public class TransientSessionKeyManager extends SessionKeyManager {
|
||||
int removed = 0;
|
||||
synchronized (_tagSets) {
|
||||
for (int i = 0; i < _tagSets.size(); i++) {
|
||||
TagSet set = (TagSet) _tagSets.get(i);
|
||||
TagSet set = _tagSets.get(i);
|
||||
if (set.getDate() + SESSION_TAG_DURATION_MS <= now) {
|
||||
_tagSets.remove(i);
|
||||
i--;
|
||||
@ -642,7 +789,7 @@ public class TransientSessionKeyManager extends SessionKeyManager {
|
||||
_lastUsed = now;
|
||||
synchronized (_tagSets) {
|
||||
while (_tagSets.size() > 0) {
|
||||
TagSet set = (TagSet) _tagSets.get(0);
|
||||
TagSet set = _tagSets.get(0);
|
||||
if (set.getDate() + SESSION_TAG_DURATION_MS > now) {
|
||||
SessionTag tag = set.consumeNext();
|
||||
if (tag != null) return tag;
|
||||
@ -656,14 +803,21 @@ public class TransientSessionKeyManager extends SessionKeyManager {
|
||||
return null;
|
||||
}
|
||||
|
||||
/** @return the total number of tags in acked TagSets */
|
||||
public int availableTags() {
|
||||
int tags = 0;
|
||||
long now = _context.clock().now();
|
||||
synchronized (_tagSets) {
|
||||
for (int i = 0; i < _tagSets.size(); i++) {
|
||||
TagSet set = (TagSet) _tagSets.get(i);
|
||||
if (set.getDate() + SESSION_TAG_DURATION_MS > now)
|
||||
tags += set.getTags().size();
|
||||
TagSet set = _tagSets.get(i);
|
||||
if (set.getDate() + SESSION_TAG_DURATION_MS > now) {
|
||||
int sz = set.getTags().size();
|
||||
// so tags are sent when the acked tags are below
|
||||
// 30, 17, and 4.
|
||||
if (!set.getAcked())
|
||||
sz /= 3;
|
||||
tags += sz;
|
||||
}
|
||||
}
|
||||
}
|
||||
return tags;
|
||||
@ -689,19 +843,31 @@ public class TransientSessionKeyManager extends SessionKeyManager {
|
||||
return -1;
|
||||
}
|
||||
|
||||
/**
|
||||
* If the session has never been acked, put the TagSet on the unacked list.
|
||||
* Otherwise, consider it good right away.
|
||||
*/
|
||||
public void addTags(TagSet set) {
|
||||
_lastUsed = _context.clock().now();
|
||||
if (_acked) {
|
||||
synchronized (_tagSets) {
|
||||
_tagSets.add(set);
|
||||
}
|
||||
} else {
|
||||
synchronized (_unackedTagSets) {
|
||||
_unackedTagSets.add(set);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static class TagSet {
|
||||
private static class TagSet implements TagSetHandle {
|
||||
private Set<SessionTag> _sessionTags;
|
||||
private SessionKey _key;
|
||||
private long _date;
|
||||
private Exception _createdBy;
|
||||
//private Exception _createdBy;
|
||||
/** did we get an ack for this tagset? */
|
||||
private boolean _acked;
|
||||
|
||||
public TagSet(Set<SessionTag> tags, SessionKey key, long date) {
|
||||
if (key == null) throw new IllegalArgumentException("Missing key");
|
||||
@ -709,12 +875,12 @@ public class TransientSessionKeyManager extends SessionKeyManager {
|
||||
_sessionTags = tags;
|
||||
_key = key;
|
||||
_date = date;
|
||||
if (true) {
|
||||
long now = I2PAppContext.getGlobalContext().clock().now();
|
||||
_createdBy = new Exception("Created by: key=" + _key.toBase64() + " on "
|
||||
+ new Date(now) + "/" + now
|
||||
+ " via " + Thread.currentThread().getName());
|
||||
}
|
||||
//if (true) {
|
||||
// long now = I2PAppContext.getGlobalContext().clock().now();
|
||||
// _createdBy = new Exception("Created by: key=" + _key.toBase64() + " on "
|
||||
// + new Date(now) + "/" + now
|
||||
// + " via " + Thread.currentThread().getName());
|
||||
//}
|
||||
}
|
||||
|
||||
/** when the tag set was created */
|
||||
@ -740,27 +906,31 @@ public class TransientSessionKeyManager extends SessionKeyManager {
|
||||
}
|
||||
|
||||
public void consume(SessionTag tag) {
|
||||
if (contains(tag)) {
|
||||
_sessionTags.remove(tag);
|
||||
}
|
||||
}
|
||||
|
||||
/** let's do this without counting the elements first */
|
||||
public SessionTag consumeNext() {
|
||||
if (_sessionTags.size() <= 0) {
|
||||
SessionTag first;
|
||||
try {
|
||||
first = _sessionTags.iterator().next();
|
||||
} catch (NoSuchElementException nsee) {
|
||||
return null;
|
||||
}
|
||||
|
||||
SessionTag first = (SessionTag) _sessionTags.iterator().next();
|
||||
_sessionTags.remove(first);
|
||||
return first;
|
||||
}
|
||||
|
||||
public Exception getCreatedBy() { return _createdBy; }
|
||||
//public Exception getCreatedBy() { return _createdBy; }
|
||||
|
||||
public void setAcked() { _acked = true; }
|
||||
public boolean getAcked() { return _acked; }
|
||||
|
||||
/****** this will return a dup if two in the same ms, so just use java
|
||||
@Override
|
||||
public int hashCode() {
|
||||
long rv = 0;
|
||||
if (_key != null) rv = rv * 7 + _key.hashCode();
|
||||
if (_key != null) rv = _key.hashCode();
|
||||
rv = rv * 7 + _date;
|
||||
// no need to hashCode the tags, key + date should be enough
|
||||
return (int) rv;
|
||||
@ -770,9 +940,20 @@ public class TransientSessionKeyManager extends SessionKeyManager {
|
||||
public boolean equals(Object o) {
|
||||
if ((o == null) || !(o instanceof TagSet)) return false;
|
||||
TagSet ts = (TagSet) o;
|
||||
return DataHelper.eq(ts.getAssociatedKey(), getAssociatedKey())
|
||||
return DataHelper.eq(ts.getAssociatedKey(), _key)
|
||||
//&& DataHelper.eq(ts.getTags(), getTags())
|
||||
&& ts.getDate() == getDate();
|
||||
&& ts.getDate() == _date;
|
||||
}
|
||||
******/
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder buf = new StringBuilder(256);
|
||||
buf.append("TagSet established: ").append(new Date(_date));
|
||||
buf.append(" Session key: ").append(_key.toBase64());
|
||||
buf.append(" Size: ").append(_sessionTags.size());
|
||||
buf.append(" Acked? ").append(_acked);
|
||||
return buf.toString();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -89,6 +89,8 @@ public class SessionKey extends DataStructureImpl {
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "SessionKey " + toBase64();
|
||||
/****
|
||||
if (true) return super.toString();
|
||||
StringBuilder buf = new StringBuilder(64);
|
||||
buf.append("[SessionKey: ");
|
||||
@ -103,5 +105,6 @@ public class SessionKey extends DataStructureImpl {
|
||||
}
|
||||
buf.append("]");
|
||||
return buf.toString();
|
||||
****/
|
||||
}
|
||||
}
|
||||
|
@ -58,4 +58,8 @@ public class SessionTag extends ByteArray {
|
||||
out.write(getData());
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "SessionTag " + toBase64();
|
||||
}
|
||||
}
|
100
history.txt
100
history.txt
@ -1,3 +1,103 @@
|
||||
2009-09-21 sponge
|
||||
* fixups to SlackBuilds. requiredbuilder does the wrong thing, and
|
||||
thinks that java is perl! This isn't really a big deal,
|
||||
the file format is simple enough and the requirements are known.
|
||||
|
||||
2009-09-07 mkvore
|
||||
* removes a SAM v1&2 bug
|
||||
|
||||
2009-09-04 zzz
|
||||
* SessionKeyManager, OCMOSJ, Garlic:
|
||||
- Enable per-client SessionKeyManagers for better anonymity
|
||||
- tagsDelivered() now means tags are sent, not acked.
|
||||
- OCMOSJ uses the new TagSetHandle object returned from tagsDelivered()
|
||||
to call tagsAcked() or failTags() as appropriate.
|
||||
- Assume tags delivered on an established session to
|
||||
reduce streaming lib stalls caused by massive tag deliveries;
|
||||
should increase throughput and window sizes on long-lived streams
|
||||
- Unacked tagsets on a new session are stored on a separate list
|
||||
- Don't kill an OB Session just because it's temporarily out of tags
|
||||
- Increase min tag threshold to 30 (was 20) due to new speculative
|
||||
tags delivered scheme, and to increase effective max window
|
||||
- More Java 5 and dead code cleanups, and more comments and javadoc,
|
||||
debug logging cleanups
|
||||
- Key toString()s for easier debugging
|
||||
- HandleGarlicMessageJob: cleanup of unused things
|
||||
* Tunnel TestJob:
|
||||
- Consume the tag after a failed test so it doesn't
|
||||
stay in the SKM
|
||||
- Disable tests with router.disableTunnelTesting=true
|
||||
* configkeyring.jsp: Add delete and cancel buttons
|
||||
* Logging: Fix directory for rotated log
|
||||
* TunnelDispatcher: Cleanup
|
||||
|
||||
2009-09-02 sponge
|
||||
* Small logic fix for dr|z3d
|
||||
|
||||
2009-08-28 zzz
|
||||
* Client: Fail if no date handshake after 30s or no leaseset
|
||||
after 5m, rather than hanging forever.
|
||||
* Console:
|
||||
- Prevent OOMs in NewsFetcher or StatsSummarizer from
|
||||
killing the router
|
||||
- Fix favicon (-17)
|
||||
* Data: Speed up many hashcodes
|
||||
* DataHelper: Fix byte array hashcode for small arrays
|
||||
* DecayingBloomFilter:
|
||||
- Replace with new DecayingHashSet for 3 of 4 uses,
|
||||
and also in the 4th if the router is low-bandwidth.
|
||||
Saves 8 MB heap.
|
||||
* EepGet, I2PSnark:
|
||||
- New I2PSocketEepGet fetches through existing tunnels
|
||||
rather than through the proxy
|
||||
- Use new eepget for i2psnark
|
||||
- Add a fake user agent for non-proxied fetches
|
||||
- Cleanups
|
||||
* NetDb:
|
||||
- oops, store leaseset locally even when shutting down
|
||||
(fix -16)
|
||||
- Java 5 cleanups
|
||||
* PRNG:
|
||||
- Rename config option to prng.buffers (was router.prng.buffers)
|
||||
- Change the default from 16 to 2 for I2PAppContext (saves 3.5MB)
|
||||
* Tunnel:
|
||||
- Adjust the random drop probability for the message size
|
||||
- Concurrentify HashSetIVValidator
|
||||
* TunnelPool:
|
||||
- Don't test tunnels when shutting down
|
||||
- Less rates
|
||||
- Java 5 cleanups
|
||||
|
||||
2009-08-24 zzz
|
||||
* ClientManager:
|
||||
- Prevent client destination theft by rejecting duplicates
|
||||
- Java 5 cleanups
|
||||
* Console:
|
||||
- Put favicon on every page
|
||||
- Make every page UTF-8, ☃ safe for snowmen
|
||||
- Remove options boxes on configtunnels.jsp
|
||||
- Fix UTF-8 form submission (i2ptunnel too)
|
||||
- Throw 403 instead of 404 from flags.jsp and viewstat.jsp
|
||||
so we don't render error.jsp
|
||||
* I2CP: Fix the SessionConfig serializer in DataHelper,
|
||||
so that UTF-8 tunnel names are not corrupted by
|
||||
I2CP and can be displayed on the console
|
||||
* Message: Move 2 unused classes out of the router lib (~15KB)
|
||||
(more SKM prep)
|
||||
* Message, I2PSession, SessionKeyManager, Console:
|
||||
Prep for SessionKeyManager work in the router -
|
||||
Fix up SKM renderStatusHTML(); add debug.jsp to see it;
|
||||
Redefine getClientSessionKeyManager();
|
||||
More cleanups
|
||||
* Ministreaming: Kill deprecation warnings
|
||||
* profiles.jsp: Bulletproofing, less memory usage
|
||||
* Streaming, I2PSession:
|
||||
Prep for SessionKeyManager work in the router -
|
||||
Comment out, deprecate, and javadoc for unused keys and tags,
|
||||
they are vestiges of end-to-end crypto
|
||||
* Updates: Verify zip at startup before extracting
|
||||
* Wrapper: Take a couple fields out of the log so it's narrower
|
||||
|
||||
2009-08-20 zzz
|
||||
* Config files:
|
||||
- Add some path and encoding help
|
||||
|
@ -103,6 +103,28 @@ div.warning hr {
|
||||
margin: 5px 0;
|
||||
}
|
||||
|
||||
/* console error messages */
|
||||
|
||||
div.sorry {
|
||||
padding: 20px;
|
||||
background: #ddf;
|
||||
margin: -2px 1px 0 195px;
|
||||
border: 5px solid #bbf;
|
||||
text-align: justify;
|
||||
-moz-box-shadow: inset 0px 0px 0px 1px #d00;
|
||||
word-wrap: break-word;
|
||||
font-weight: bold;
|
||||
color: #001;
|
||||
}
|
||||
|
||||
div.sorry hr {
|
||||
color: #001;
|
||||
background: #001;
|
||||
height: 1px;
|
||||
border: 1px solid #001;
|
||||
margin: 10px 0;
|
||||
}
|
||||
|
||||
div.toolbar {
|
||||
margin: 0em 0em 2em 0em;
|
||||
font-weight: bold;
|
||||
@ -123,7 +145,7 @@ div.routersummary {
|
||||
width: 185px;
|
||||
color: inherit;
|
||||
margin: 0;
|
||||
padding: 7px 1px;
|
||||
padding: 10px 1px 7px 1px;
|
||||
text-align: center !important;
|
||||
border: 5px solid #bbf;
|
||||
font-size: 9pt;
|
||||
|
BIN
installer/resources/themes/console/classic/images/i2plogo.png
Normal file
BIN
installer/resources/themes/console/classic/images/i2plogo.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 6.4 KiB |
@ -223,6 +223,34 @@ div.warning {
|
||||
word-wrap: break-word;
|
||||
}
|
||||
|
||||
/* console error messages */
|
||||
|
||||
div.sorry {
|
||||
margin: 5px 15px 10px 220px;
|
||||
padding: 20px 20px 20px 75px;
|
||||
background: #005;
|
||||
border: 1px solid #99f;
|
||||
-moz-border-radius: 4px;
|
||||
-khtml-border-radius: 4px;
|
||||
border-radius: 4px;
|
||||
text-align: justify;
|
||||
background-image:url("images/errortriangle.png");
|
||||
background-position:15px center;
|
||||
background-repeat:no-repeat;
|
||||
-moz-box-shadow: inset 0px 0px 0px 1px #d00;
|
||||
word-wrap: break-word;
|
||||
font-weight: bold;
|
||||
color: #eef;
|
||||
}
|
||||
|
||||
div.sorry hr {
|
||||
color: #eef;
|
||||
background: #eef;
|
||||
height: 1px;
|
||||
border: 1px solid #eef;
|
||||
margin: 10px 0;
|
||||
}
|
||||
|
||||
div.main {
|
||||
margin: 0px 0px 20px 195px;
|
||||
padding: 0 15px 15px 25px;
|
||||
|
BIN
installer/resources/themes/console/dark/i2plogo.png
Normal file
BIN
installer/resources/themes/console/dark/i2plogo.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 11 KiB |
Binary file not shown.
Before Width: | Height: | Size: 6.4 KiB After Width: | Height: | Size: 12 KiB |
@ -1,7 +1,7 @@
|
||||
/* Not yet complete. Subject to flux and change. dr|z3d - 07.25.09 */
|
||||
|
||||
body {
|
||||
margin: 25px 10px 0 5px;
|
||||
margin: 15px 0 0 10px;
|
||||
padding: 0em;
|
||||
text-align: center;
|
||||
background: #eef;
|
||||
@ -23,6 +23,7 @@ pre {
|
||||
text-align: left;
|
||||
font: 8pt "Lucida Console", "DejaVu Sans Mono", Courier, mono;
|
||||
color: #333;
|
||||
margin: 10px;
|
||||
}
|
||||
|
||||
div.logo {
|
||||
@ -78,7 +79,7 @@ a:active{
|
||||
div.routersummaryouter {
|
||||
float: left;
|
||||
width: 215px;
|
||||
margin: 0 0 10px 20px;
|
||||
margin: 0 0 10px 0px;
|
||||
padding: 0;
|
||||
border: 0;
|
||||
clear: left;/* fixes a bug in Opera */
|
||||
@ -183,7 +184,6 @@ div.routersummary td {
|
||||
border: 0 !important;
|
||||
}
|
||||
|
||||
|
||||
div.routersummary tr:nth-child(even) {
|
||||
background-color: #f60;
|
||||
background-image: none !important;
|
||||
@ -194,6 +194,8 @@ div.routersummarytr:nth-child(odd) {
|
||||
background-image: none !important;
|
||||
}
|
||||
|
||||
/* proxy error messages */
|
||||
|
||||
div.warning {
|
||||
margin: 5px 20px 10px 240px;
|
||||
padding: 0px 25px 20px 75px;
|
||||
@ -212,8 +214,36 @@ div.warning {
|
||||
word-wrap: break-word;
|
||||
}
|
||||
|
||||
/* console error messages */
|
||||
|
||||
div.sorry {
|
||||
margin: 5px 15px 10px 220px;
|
||||
padding: 20px 20px 20px 75px;
|
||||
background: #ffb;
|
||||
border: 1px solid #002;
|
||||
-moz-border-radius: 4px;
|
||||
-khtml-border-radius: 4px;
|
||||
border-radius: 4px;
|
||||
text-align: justify;
|
||||
background-image: url("images/errortriangle.png");
|
||||
background-position: 15px center;
|
||||
background-repeat: no-repeat;
|
||||
-moz-box-shadow: inset 0px 0px 0px 1px #d00;
|
||||
word-wrap: break-word;
|
||||
font-weight: bold;
|
||||
color: #331;
|
||||
}
|
||||
|
||||
div.sorry hr {
|
||||
color: #552;
|
||||
background: #552;
|
||||
height: 1px;
|
||||
border: 1px solid #552;
|
||||
margin: 10px 0;
|
||||
}
|
||||
|
||||
div.main {
|
||||
margin: 0px 0px 20px 220px;
|
||||
margin: 0px 0px 20px 195px;
|
||||
padding: 0 15px 15px 25px;
|
||||
background: #eef;
|
||||
text-align: left;
|
||||
@ -237,7 +267,7 @@ div.main textarea {
|
||||
}
|
||||
|
||||
div.news {
|
||||
margin: 0px 15px 20px 245px;
|
||||
margin: 0px 15px 10px 220px;
|
||||
padding: 20px 30px 20px 30px;
|
||||
border: 1px solid #003;
|
||||
color: #410;
|
||||
@ -302,7 +332,7 @@ div.news h4 {
|
||||
|
||||
div.confignav {
|
||||
padding: 15px 10px !important;
|
||||
margin: 0 0 25px 0;
|
||||
margin: 0 0px 15px 0;
|
||||
background: #ddf url('images/lightbluetile.png');
|
||||
-moz-border-radius: 4px;
|
||||
-khtml-border-radius: 4px;
|
||||
@ -316,8 +346,8 @@ div.confignav {
|
||||
}
|
||||
|
||||
div.configure {
|
||||
padding: 0 15px 15px 15px !important;
|
||||
margin: 10px 0px 25px 0;
|
||||
padding: 0 15px 0px 15px !important;
|
||||
margin: 0px 0px 15px 0;
|
||||
background: #ddf url('images/lightbluetile.png');
|
||||
-moz-border-radius: 4px;
|
||||
-khtml-border-radius: 4px;
|
||||
@ -327,6 +357,17 @@ div.configure {
|
||||
min-width: 400px;
|
||||
}
|
||||
|
||||
div.configure h3, div.graphspanel h3 {
|
||||
border: 1px solid #002;
|
||||
border-left: 5px solid #002;
|
||||
padding: 3px 5px 3px 5px;
|
||||
margin: 15px 0 15px 0;
|
||||
border-radius: 0 4px 4px 0;
|
||||
-moz-border-radius: 0 4px 4px 0;
|
||||
-khtml-border-radius: 0 4px 4px 0;
|
||||
background: #eef;
|
||||
}
|
||||
|
||||
div.graphspanel {
|
||||
padding: 12px;
|
||||
margin: 10px 0px 25px 0;
|
||||
@ -365,7 +406,7 @@ div.graphspanel form {
|
||||
|
||||
div.messages {
|
||||
padding: 10px;
|
||||
margin: 10px 0 20px 0;
|
||||
margin: 10px 0 15px 0;
|
||||
background: #ddf;
|
||||
-moz-border-radius: 4px;
|
||||
-khtml-border-radius: 4px;
|
||||
@ -397,7 +438,7 @@ table {
|
||||
border-collapse: collapse;
|
||||
width: 100%;
|
||||
border: 1px solid #000022;
|
||||
margin: 5px 0px 5px 0px;
|
||||
margin: 10px -15px 5px 0px;
|
||||
cell-padding: 1px;
|
||||
font-size: 7pt;
|
||||
background: #b4c8ff url('images/tabletitlelight.png') repeat-x;
|
||||
@ -462,7 +503,7 @@ div.main li {
|
||||
text-align: left;
|
||||
list-style: square;
|
||||
margin: 2px 5px 0px 20px;
|
||||
padding: 1px 20px 1px 10px;
|
||||
padding: 1px 10px 1px 10px;
|
||||
line-height: 150%;
|
||||
word-wrap: break-word;
|
||||
}
|
||||
@ -528,7 +569,7 @@ h1 {
|
||||
text-align: left;
|
||||
color: #002;
|
||||
padding: 10px 15px;
|
||||
margin: 0 15px 25px 245px;
|
||||
margin: 0 15px 15px 220px;
|
||||
font: normal bold 16pt/120% "Lucida Sans Unicode", "Bitstream Vera Sans", Verdana, Tahoma, Helvetica, sans-serif;
|
||||
letter-spacing: 0.15em;
|
||||
text-transform: uppercase;
|
||||
@ -554,7 +595,7 @@ h2 {
|
||||
border-radius: 4px;
|
||||
-moz-border-radius: 4px;
|
||||
-khtml-border-radius: 4px;
|
||||
margin: 25px 0 20px 0 !important;
|
||||
margin: 15px 0px 10px 0 !important;
|
||||
-moz-box-shadow: inset 0px 0px 1px 0px #002;
|
||||
word-wrap: break-word;
|
||||
}
|
||||
@ -571,7 +612,7 @@ h3 {
|
||||
border: 1px solid #002;
|
||||
border-left: 5px solid #002;
|
||||
padding: 3px 5px 3px 5px;
|
||||
margin: 20px 0 15px 0;
|
||||
margin: 10px 0 15px 0;
|
||||
border-radius: 0 4px 4px 0;
|
||||
-moz-border-radius: 0 4px 4px 0;
|
||||
-khtml-border-radius: 0 4px 4px 0;
|
||||
@ -764,8 +805,8 @@ form {}
|
||||
}
|
||||
|
||||
.joblog {
|
||||
margin: 25px 0 25px 0;
|
||||
padding: 20px 40px 20px 40px !important;
|
||||
margin: 15px 0;
|
||||
padding: 10px 20px !important;
|
||||
border: 1px solid #003;
|
||||
background-color: #004;
|
||||
background: #ddf url('images/lightbluetile.png');
|
||||
@ -786,6 +827,10 @@ form {}
|
||||
word-wrap: break-word !important;
|
||||
}
|
||||
|
||||
.joblog table {
|
||||
margin-top: 10px;
|
||||
}
|
||||
|
||||
.smallhead {
|
||||
font-size: 7pt
|
||||
}
|
||||
|
Binary file not shown.
Before Width: | Height: | Size: 6.4 KiB After Width: | Height: | Size: 12 KiB |
@ -1,10 +1,9 @@
|
||||
/* Not yet complete. Subject to flux and change. dr|z3d - 07.25.09 */
|
||||
|
||||
body {
|
||||
background-color: #eef;
|
||||
background: #eef;
|
||||
color: #001;
|
||||
font-family:"Lucida Sans Unicode", "Bitstream Vera Sans", Verdana, Tahoma, Helvetica, sans-serif;
|
||||
font-size: 8pt;
|
||||
font: 8pt "Lucida Sans Unicode","Bitstream Vera Sans",Verdana,Tahoma,Helvetica,sans-serif;
|
||||
}
|
||||
|
||||
.snarkTitle {
|
||||
@ -51,10 +50,7 @@ body {
|
||||
}
|
||||
|
||||
.snarkMessages {
|
||||
background-color: #f83;
|
||||
font-family: "Lucida Console", "DejaVu Sans Mono", Courier, mono !important;
|
||||
font-size: 9pt;
|
||||
font-weight: bold;
|
||||
font: bold 9pt "Lucida Console","DejaVu Sans Mono",Courier,mono !important;
|
||||
text-align: left;
|
||||
margin: 0 0px 10px 0px;
|
||||
padding: 0;
|
||||
@ -63,18 +59,16 @@ body {
|
||||
-khtml-border-radius: 4px;
|
||||
border-radius: 4px;
|
||||
border: 2px solid #930;
|
||||
text-align: left;
|
||||
overflow: auto;
|
||||
background: #f40 url('../console/images/orangetile.png');
|
||||
color: #531;
|
||||
height: 64px;
|
||||
width: auto;
|
||||
background: #f83 url('../console/images/orangetile.png');
|
||||
}
|
||||
|
||||
pre {
|
||||
font-family: "Lucida Console", "DejaVu Sans Mono", Courier, mono !important;
|
||||
width: 100%;
|
||||
font-size: 8pt;
|
||||
font: 8pt "Lucida Console","DejaVu Sans Mono",Courier,mono !important;
|
||||
padding: 0;
|
||||
text-align: left !important;
|
||||
height: 8px;
|
||||
@ -84,62 +78,66 @@ table {
|
||||
margin: 0px 0px 10px 0px;
|
||||
border: 0px;
|
||||
padding: 0px;
|
||||
border-width: 0px;
|
||||
border-spacing: 0px;
|
||||
border-collapse: collapse;
|
||||
}
|
||||
|
||||
th {
|
||||
padding: 5px;
|
||||
padding: 4px;
|
||||
font-size: 8pt;
|
||||
border-top: 1px outset #001;
|
||||
border-bottom: 1px inset #001;
|
||||
background: #f60 url('/themes/console/images/tabletitleorange.png') repeat-x;
|
||||
/* text-align: right; */
|
||||
whitespace: nowrap;
|
||||
}
|
||||
|
||||
.SnarkTorrents {
|
||||
margin: 0;
|
||||
border: 1px solid #001;
|
||||
background-color: #f9f;
|
||||
background: #f9f;
|
||||
}
|
||||
|
||||
td {
|
||||
padding: 5px;
|
||||
/* text-align: right;*/
|
||||
padding: 4px;
|
||||
}
|
||||
|
||||
.snarkTorrentEven {
|
||||
background-color: #fb1;
|
||||
background: #fb1;
|
||||
font-size: 7pt;
|
||||
}
|
||||
|
||||
.snarkTorrentOdd {
|
||||
background-color: #fa1;
|
||||
background: #fa1;
|
||||
font-size: 7pt;
|
||||
}
|
||||
|
||||
.snarkNewTorrent {
|
||||
font-size: 9pt;
|
||||
}
|
||||
|
||||
.snarkAddInfo {
|
||||
font-size: 9pt;
|
||||
line-height: 130% !important;
|
||||
}
|
||||
|
||||
.snarkConfigTitle {
|
||||
font-size: 11pt;
|
||||
font-weight: bold;
|
||||
text-decoration: underline;
|
||||
text-transform: uppercase;
|
||||
text-shadow: 0px 0px 2px rgba(172,172,192,0.9);
|
||||
}
|
||||
|
||||
.snarkConfig {
|
||||
font-size: 10pt;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
.page {
|
||||
background-color: #fff;
|
||||
background: #fff;
|
||||
color: #310;
|
||||
min-width: 800px !important;
|
||||
/* max-width: 800px !important; */
|
||||
margin: 5px 0px;
|
||||
margin: 5px 0 0 0;
|
||||
padding: 10px 10px 0px 10px;
|
||||
-moz-border-radius: 4px;
|
||||
-khtml-border-radius: 4px;
|
||||
@ -149,20 +147,15 @@ td {
|
||||
line-height: 160% !important;
|
||||
-moz-box-shadow: inset 0px 0px 1px 0px #002;
|
||||
text-align: center;
|
||||
background: #ddf url('../console/light/images/lightbluetile.png');
|
||||
opacity: 1.0;
|
||||
}
|
||||
|
||||
form {
|
||||
line-height: 250%
|
||||
line-height: 250%;
|
||||
}
|
||||
|
||||
p {
|
||||
line-height: 150%
|
||||
}
|
||||
|
||||
a:link {
|
||||
padding 5px;
|
||||
line-height: 150%;
|
||||
}
|
||||
|
||||
hr {
|
||||
@ -194,7 +187,6 @@ a:hover{
|
||||
}
|
||||
|
||||
input {
|
||||
/* font-family: "Lucida Console", "DejaVu Sans Mono", Courier, mono !important;*/
|
||||
font-size: 9pt;
|
||||
font-weight: bold;
|
||||
text-align: left;
|
||||
@ -202,50 +194,29 @@ input {
|
||||
}
|
||||
|
||||
select {
|
||||
font-family:"Lucida Sans Unicode", "Bitstream Vera Sans", Verdana, Tahoma, Helvetica, sans-serif;
|
||||
background-color: #ffe;
|
||||
background: #ffe;
|
||||
color: #310;
|
||||
font-size: 9pt;
|
||||
font: 9pt "Lucida Sans Unicode","Bitstream Vera Sans",Verdana,Tahoma,Helvetica,sans-serif;
|
||||
}
|
||||
|
||||
img {
|
||||
border: none;
|
||||
margin: 5px 5px 0px 5px;
|
||||
opacity: 1.0;
|
||||
line-height: 100%
|
||||
line-height: 100%;
|
||||
}
|
||||
|
||||
img: hover {
|
||||
border: none;
|
||||
margin: 5px 5px 0px 5px;
|
||||
opacity: 0.5;
|
||||
line-height: 100%
|
||||
line-height: 100%;
|
||||
}
|
||||
|
||||
|
||||
div.section {
|
||||
div.section,div.mainsection {
|
||||
margin: 0 0 10px 0;
|
||||
padding: 10px;
|
||||
background: #ffe;
|
||||
border: 1px solid #001;
|
||||
text-align: center;
|
||||
color: #001;
|
||||
-moz-border-radius: 4px;
|
||||
-khtml-border-radius: 4px;
|
||||
border-radius: 4px;
|
||||
-moz-box-shadow: inset 0px 0px 1px 0px #002;
|
||||
word-wrap: break-word;
|
||||
text-align: center;
|
||||
background: #ffe url('../console/light/images/tabletile.png');
|
||||
opacity: 1.0;
|
||||
}
|
||||
|
||||
div.mainsection {
|
||||
margin: 0 0 10px 0;
|
||||
padding: 10px;
|
||||
background: #ffe;
|
||||
border: 1px solid #001;
|
||||
text-align: center;
|
||||
color: #001;
|
||||
-moz-border-radius: 4px;
|
||||
-khtml-border-radius: 4px;
|
||||
@ -259,8 +230,7 @@ div.mainsection {
|
||||
|
||||
div.newtorrentsection {
|
||||
margin: 0 0 10px 0;
|
||||
padding: 10px;
|
||||
background: #ffe;
|
||||
padding: 0 10px 10px 10px;
|
||||
border: 1px solid #001;
|
||||
text-align: center;
|
||||
color: #001;
|
||||
@ -269,15 +239,13 @@ div.newtorrentsection {
|
||||
border-radius: 4px;
|
||||
-moz-box-shadow: inset 0px 0px 1px 0px #002;
|
||||
word-wrap: break-word;
|
||||
text-align: center;
|
||||
background: #ffe url('../console/images/yellowtile.png');
|
||||
background: #bb4 url('../console/images/yellowtile.png');
|
||||
opacity: 1.0;
|
||||
}
|
||||
|
||||
div.addtorrentsection {
|
||||
margin: 0 0 10px 0;
|
||||
padding: 10px;
|
||||
background: #ffe;
|
||||
padding: 0 10px 10px 10px;
|
||||
border: 1px solid #001;
|
||||
text-align: center;
|
||||
color: #001;
|
||||
@ -286,15 +254,13 @@ div.addtorrentsection {
|
||||
border-radius: 4px;
|
||||
-moz-box-shadow: inset 0px 0px 1px 0px #002;
|
||||
word-wrap: break-word;
|
||||
text-align: center;
|
||||
background: #ffe url('../console/images/greentile.png');
|
||||
background: #7f7 url('../console/images/greentile.png');
|
||||
opacity: 1.0;
|
||||
}
|
||||
|
||||
div.configsection {
|
||||
margin: 0;
|
||||
padding: 10px;
|
||||
background: #ffe;
|
||||
padding: 0 10px 10px 10px;
|
||||
border: 1px solid #001;
|
||||
color: #ffb;
|
||||
-moz-border-radius: 4px;
|
||||
@ -303,7 +269,7 @@ div.configsection {
|
||||
-moz-box-shadow: inset 0px 0px 0px 1px #900;
|
||||
word-wrap: break-word;
|
||||
text-align: center;
|
||||
background: #ffe url('../console/light/images/darkbluetile.png');
|
||||
background: #700 url('../console/light/images/darkbluetile.png');
|
||||
font-weight: bold;/* red tile needs bold text! */
|
||||
}
|
||||
|
||||
@ -323,7 +289,6 @@ div.configsection a:hover{
|
||||
-moz-border-radius: 4px;
|
||||
-khtml-border-radius: 4px;
|
||||
border-radius: 4px;
|
||||
background: #eef;
|
||||
-moz-box-shadow: inset 0px 0px 1px 0px #002;
|
||||
background: #ddf url('../console/light/images/tabletile.png');
|
||||
text-transform: uppercase !important;
|
||||
|
@ -85,13 +85,13 @@ public abstract class ClientManagerFacade implements Service {
|
||||
*
|
||||
* @return set of Destination objects
|
||||
*/
|
||||
public Set listClients() { return Collections.EMPTY_SET; }
|
||||
public Set<Destination> listClients() { return Collections.EMPTY_SET; }
|
||||
|
||||
/**
|
||||
* Return the client's current config, or null if not connected
|
||||
*
|
||||
*/
|
||||
public abstract SessionConfig getClientSessionConfig(Destination dest);
|
||||
public abstract SessionKeyManager getClientSessionKeyManager(Destination dest);
|
||||
public abstract SessionKeyManager getClientSessionKeyManager(Hash dest);
|
||||
public void renderStatusHTML(Writer out) throws IOException { }
|
||||
}
|
||||
|
@ -41,7 +41,7 @@ public class DummyClientManagerFacade extends ClientManagerFacade {
|
||||
public void messageDeliveryStatusUpdate(Destination fromDest, MessageId id, boolean delivered) {}
|
||||
|
||||
public SessionConfig getClientSessionConfig(Destination _dest) { return null; }
|
||||
public SessionKeyManager getClientSessionKeyManager(Destination _dest) { return null; }
|
||||
public SessionKeyManager getClientSessionKeyManager(Hash _dest) { return null; }
|
||||
|
||||
public void requestLeaseSet(Hash dest, LeaseSet set) {}
|
||||
|
||||
|
@ -18,7 +18,7 @@ public class RouterVersion {
|
||||
/** deprecated */
|
||||
public final static String ID = "Monotone";
|
||||
public final static String VERSION = CoreVersion.VERSION;
|
||||
public final static long BUILD = 16;
|
||||
public final static long BUILD = 20;
|
||||
/** for example "-test" */
|
||||
public final static String EXTRA = "";
|
||||
public final static String FULL_VERSION = VERSION + "-" + BUILD + EXTRA;
|
||||
|
@ -18,6 +18,7 @@ import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import net.i2p.crypto.SessionKeyManager;
|
||||
import net.i2p.crypto.TransientSessionKeyManager;
|
||||
import net.i2p.data.Destination;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.LeaseSet;
|
||||
@ -188,11 +189,11 @@ public class ClientConnectionRunner {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("SessionEstablished called for destination " + _destHashCache.toBase64());
|
||||
_config = config;
|
||||
// per-dest unimplemented
|
||||
//if (_sessionKeyManager == null)
|
||||
// _sessionKeyManager = new TransientSessionKeyManager(_context);
|
||||
//else
|
||||
// _log.error("SessionEstablished called for twice for destination " + _destHashCache.toBase64().substring(0,4));
|
||||
// per-destination session key manager to prevent rather easy correlation
|
||||
if (_sessionKeyManager == null)
|
||||
_sessionKeyManager = new TransientSessionKeyManager(_context);
|
||||
else
|
||||
_log.error("SessionEstablished called for twice for destination " + _destHashCache.toBase64().substring(0,4));
|
||||
_manager.destinationEstablished(this);
|
||||
}
|
||||
|
||||
|
@ -42,8 +42,8 @@ import net.i2p.util.Log;
|
||||
public class ClientManager {
|
||||
private Log _log;
|
||||
private ClientListenerRunner _listener;
|
||||
private final HashMap _runners; // Destination --> ClientConnectionRunner
|
||||
private final Set _pendingRunners; // ClientConnectionRunner for clients w/out a Dest yet
|
||||
private final HashMap<Destination, ClientConnectionRunner> _runners; // Destination --> ClientConnectionRunner
|
||||
private final Set<ClientConnectionRunner> _pendingRunners; // ClientConnectionRunner for clients w/out a Dest yet
|
||||
private RouterContext _ctx;
|
||||
|
||||
/** ms to wait before rechecking for inbound messages to deliver to clients */
|
||||
@ -90,21 +90,21 @@ public class ClientManager {
|
||||
public void shutdown() {
|
||||
_log.info("Shutting down the ClientManager");
|
||||
_listener.stopListening();
|
||||
Set runners = new HashSet();
|
||||
Set<ClientConnectionRunner> runners = new HashSet();
|
||||
synchronized (_runners) {
|
||||
for (Iterator iter = _runners.values().iterator(); iter.hasNext();) {
|
||||
ClientConnectionRunner runner = (ClientConnectionRunner)iter.next();
|
||||
for (Iterator<ClientConnectionRunner> iter = _runners.values().iterator(); iter.hasNext();) {
|
||||
ClientConnectionRunner runner = iter.next();
|
||||
runners.add(runner);
|
||||
}
|
||||
}
|
||||
synchronized (_pendingRunners) {
|
||||
for (Iterator iter = _pendingRunners.iterator(); iter.hasNext();) {
|
||||
ClientConnectionRunner runner = (ClientConnectionRunner)iter.next();
|
||||
for (Iterator<ClientConnectionRunner> iter = _pendingRunners.iterator(); iter.hasNext();) {
|
||||
ClientConnectionRunner runner = iter.next();
|
||||
runners.add(runner);
|
||||
}
|
||||
}
|
||||
for (Iterator iter = runners.iterator(); iter.hasNext(); ) {
|
||||
ClientConnectionRunner runner = (ClientConnectionRunner)iter.next();
|
||||
for (Iterator<ClientConnectionRunner> iter = runners.iterator(); iter.hasNext(); ) {
|
||||
ClientConnectionRunner runner = iter.next();
|
||||
runner.stopRunning();
|
||||
}
|
||||
}
|
||||
@ -131,15 +131,26 @@ public class ClientManager {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Add to the clients list. Check for a dup destination.
|
||||
*/
|
||||
public void destinationEstablished(ClientConnectionRunner runner) {
|
||||
Destination dest = runner.getConfig().getDestination();
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("DestinationEstablished called for destination " + runner.getConfig().getDestination().calculateHash().toBase64());
|
||||
_log.debug("DestinationEstablished called for destination " + dest.calculateHash().toBase64());
|
||||
|
||||
synchronized (_pendingRunners) {
|
||||
_pendingRunners.remove(runner);
|
||||
}
|
||||
boolean fail = false;
|
||||
synchronized (_runners) {
|
||||
_runners.put(runner.getConfig().getDestination(), runner);
|
||||
fail = _runners.containsKey(dest);
|
||||
if (!fail)
|
||||
_runners.put(dest, runner);
|
||||
}
|
||||
if (fail) {
|
||||
_log.log(Log.CRIT, "Client attempted to register duplicate destination " + dest.calculateHash().toBase64());
|
||||
runner.disconnectClient("Duplicate destination");
|
||||
}
|
||||
}
|
||||
|
||||
@ -278,8 +289,8 @@ public class ClientManager {
|
||||
return true;
|
||||
}
|
||||
|
||||
public Set listClients() {
|
||||
Set rv = new HashSet();
|
||||
public Set<Destination> listClients() {
|
||||
Set<Destination> rv = new HashSet();
|
||||
synchronized (_runners) {
|
||||
rv.addAll(_runners.keySet());
|
||||
}
|
||||
@ -293,7 +304,7 @@ public class ClientManager {
|
||||
long inLock = 0;
|
||||
synchronized (_runners) {
|
||||
inLock = _ctx.clock().now();
|
||||
rv = (ClientConnectionRunner)_runners.get(dest);
|
||||
rv = _runners.get(dest);
|
||||
}
|
||||
long afterLock = _ctx.clock().now();
|
||||
if (afterLock - beforeLock > 50) {
|
||||
@ -317,9 +328,10 @@ public class ClientManager {
|
||||
|
||||
/**
|
||||
* Return the client's SessionKeyManager
|
||||
*
|
||||
* Use this instead of the RouterContext.sessionKeyManager()
|
||||
* to prevent correlation attacks across destinations
|
||||
*/
|
||||
public SessionKeyManager getClientSessionKeyManager(Destination dest) {
|
||||
public SessionKeyManager getClientSessionKeyManager(Hash dest) {
|
||||
ClientConnectionRunner runner = getRunner(dest);
|
||||
if (runner != null)
|
||||
return runner.getSessionKeyManager();
|
||||
@ -331,8 +343,8 @@ public class ClientManager {
|
||||
if (destHash == null)
|
||||
return null;
|
||||
synchronized (_runners) {
|
||||
for (Iterator iter = _runners.values().iterator(); iter.hasNext(); ) {
|
||||
ClientConnectionRunner cur = (ClientConnectionRunner)iter.next();
|
||||
for (Iterator<ClientConnectionRunner> iter = _runners.values().iterator(); iter.hasNext(); ) {
|
||||
ClientConnectionRunner cur = iter.next();
|
||||
if (cur.getDestHash().equals(destHash))
|
||||
return cur;
|
||||
}
|
||||
@ -354,8 +366,8 @@ public class ClientManager {
|
||||
}
|
||||
}
|
||||
|
||||
Set getRunnerDestinations() {
|
||||
Set dests = new HashSet();
|
||||
Set<Destination> getRunnerDestinations() {
|
||||
Set<Destination> dests = new HashSet();
|
||||
long beforeLock = _ctx.clock().now();
|
||||
long inLock = 0;
|
||||
synchronized (_runners) {
|
||||
@ -390,13 +402,13 @@ public class ClientManager {
|
||||
StringBuilder buf = new StringBuilder(8*1024);
|
||||
buf.append("<u><b>Local destinations</b></u><br>");
|
||||
|
||||
Map runners = null;
|
||||
Map<Destination, ClientConnectionRunner> runners = null;
|
||||
synchronized (_runners) {
|
||||
runners = (Map)_runners.clone();
|
||||
}
|
||||
for (Iterator iter = runners.keySet().iterator(); iter.hasNext(); ) {
|
||||
Destination dest = (Destination)iter.next();
|
||||
ClientConnectionRunner runner = (ClientConnectionRunner)runners.get(dest);
|
||||
for (Iterator<Destination> iter = runners.keySet().iterator(); iter.hasNext(); ) {
|
||||
Destination dest = iter.next();
|
||||
ClientConnectionRunner runner = runners.get(dest);
|
||||
buf.append("<b>*</b> ").append(dest.calculateHash().toBase64().substring(0,6)).append("<br>\n");
|
||||
LeaseSet ls = runner.getLeaseSet();
|
||||
if (ls == null) {
|
||||
|
@ -194,7 +194,7 @@ public class ClientManagerFacadeImpl extends ClientManagerFacade {
|
||||
* Return the client's current manager or null if not connected
|
||||
*
|
||||
*/
|
||||
public SessionKeyManager getClientSessionKeyManager(Destination dest) {
|
||||
public SessionKeyManager getClientSessionKeyManager(Hash dest) {
|
||||
if (_manager != null)
|
||||
return _manager.getClientSessionKeyManager(dest);
|
||||
else {
|
||||
@ -215,7 +215,7 @@ public class ClientManagerFacadeImpl extends ClientManagerFacade {
|
||||
* @return set of Destination objects
|
||||
*/
|
||||
@Override
|
||||
public Set listClients() {
|
||||
public Set<Destination> listClients() {
|
||||
if (_manager != null)
|
||||
return _manager.listClients();
|
||||
else
|
||||
|
@ -17,7 +17,7 @@ import java.util.Set;
|
||||
import net.i2p.crypto.SessionKeyManager;
|
||||
import net.i2p.data.DataFormatException;
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.data.Destination;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.PublicKey;
|
||||
import net.i2p.data.SessionKey;
|
||||
import net.i2p.data.SessionTag;
|
||||
@ -59,14 +59,16 @@ public class GarlicMessageBuilder {
|
||||
*
|
||||
* So a value somewhat higher than the low threshold
|
||||
* seems appropriate.
|
||||
*
|
||||
* Use care when adjusting these values. See ConnectionOptions in streaming,
|
||||
* and TransientSessionKeyManager in crypto, for more information.
|
||||
*/
|
||||
private static final int DEFAULT_TAGS = 40;
|
||||
private static final int LOW_THRESHOLD = 20;
|
||||
private static final int LOW_THRESHOLD = 30;
|
||||
|
||||
public static int estimateAvailableTags(RouterContext ctx, PublicKey key, Destination local) {
|
||||
// per-dest Unimplemented
|
||||
//SessionKeyManager skm = ctx.clientManager().getClientSessionKeyManager(local);
|
||||
SessionKeyManager skm = ctx.sessionKeyManager();
|
||||
/** @param local non-null; do not use this method for the router's SessionKeyManager */
|
||||
public static int estimateAvailableTags(RouterContext ctx, PublicKey key, Hash local) {
|
||||
SessionKeyManager skm = ctx.clientManager().getClientSessionKeyManager(local);
|
||||
if (skm == null)
|
||||
return 0;
|
||||
SessionKey curKey = skm.getCurrentKey(key);
|
||||
@ -75,19 +77,54 @@ public class GarlicMessageBuilder {
|
||||
return skm.getAvailableTags(key, curKey);
|
||||
}
|
||||
|
||||
public static GarlicMessage buildMessage(RouterContext ctx, GarlicConfig config) {
|
||||
return buildMessage(ctx, config, new SessionKey(), new HashSet());
|
||||
/**
|
||||
* Unused and probably a bad idea.
|
||||
*
|
||||
* Used below only on a recursive call if the garlic message contains a garlic message.
|
||||
* We don't need the SessionKey or SesssionTags returned
|
||||
* This uses the router's SKM, which is probably not what you want.
|
||||
* This isn't fully implemented, because the key and tags aren't saved - maybe
|
||||
* it should force elGamal?
|
||||
*
|
||||
* @param ctx scope
|
||||
* @param config how/what to wrap
|
||||
*/
|
||||
private static GarlicMessage buildMessage(RouterContext ctx, GarlicConfig config) {
|
||||
Log log = ctx.logManager().getLog(GarlicMessageBuilder.class);
|
||||
log.error("buildMessage 2 args, using router SKM", new Exception("who did it"));
|
||||
return buildMessage(ctx, config, new SessionKey(), new HashSet(), ctx.sessionKeyManager());
|
||||
}
|
||||
|
||||
public static GarlicMessage buildMessage(RouterContext ctx, GarlicConfig config, SessionKey wrappedKey, Set wrappedTags) {
|
||||
return buildMessage(ctx, config, wrappedKey, wrappedTags, DEFAULT_TAGS);
|
||||
/**
|
||||
* called by OCMJH
|
||||
*
|
||||
* @param ctx scope
|
||||
* @param config how/what to wrap
|
||||
* @param wrappedKey output parameter that will be filled with the sessionKey used
|
||||
* @param wrappedTags output parameter that will be filled with the sessionTags used
|
||||
*/
|
||||
public static GarlicMessage buildMessage(RouterContext ctx, GarlicConfig config, SessionKey wrappedKey, Set<SessionTag> wrappedTags,
|
||||
SessionKeyManager skm) {
|
||||
return buildMessage(ctx, config, wrappedKey, wrappedTags, DEFAULT_TAGS, false, skm);
|
||||
}
|
||||
|
||||
public static GarlicMessage buildMessage(RouterContext ctx, GarlicConfig config, SessionKey wrappedKey, Set wrappedTags, int numTagsToDeliver) {
|
||||
/** unused */
|
||||
/***
|
||||
public static GarlicMessage buildMessage(RouterContext ctx, GarlicConfig config, SessionKey wrappedKey, Set wrappedTags,
|
||||
int numTagsToDeliver) {
|
||||
return buildMessage(ctx, config, wrappedKey, wrappedTags, numTagsToDeliver, false);
|
||||
}
|
||||
***/
|
||||
|
||||
public static GarlicMessage buildMessage(RouterContext ctx, GarlicConfig config, SessionKey wrappedKey, Set wrappedTags, int numTagsToDeliver, boolean forceElGamal) {
|
||||
/**
|
||||
* @param ctx scope
|
||||
* @param config how/what to wrap
|
||||
* @param wrappedKey output parameter that will be filled with the sessionKey used
|
||||
* @param wrappedTags output parameter that will be filled with the sessionTags used
|
||||
* @param numTagsToDeliver only if the estimated available tags are below the threshold
|
||||
*/
|
||||
private static GarlicMessage buildMessage(RouterContext ctx, GarlicConfig config, SessionKey wrappedKey, Set<SessionTag> wrappedTags,
|
||||
int numTagsToDeliver, boolean forceElGamal, SessionKeyManager skm) {
|
||||
Log log = ctx.logManager().getLog(GarlicMessageBuilder.class);
|
||||
PublicKey key = config.getRecipientPublicKey();
|
||||
if (key == null) {
|
||||
@ -104,14 +141,14 @@ public class GarlicMessageBuilder {
|
||||
if (log.shouldLog(Log.INFO))
|
||||
log.info("Encrypted with public key " + key + " to expire on " + new Date(config.getExpiration()));
|
||||
|
||||
SessionKey curKey = ctx.sessionKeyManager().getCurrentKey(key);
|
||||
SessionKey curKey = skm.getCurrentKey(key);
|
||||
SessionTag curTag = null;
|
||||
if (curKey == null)
|
||||
curKey = ctx.sessionKeyManager().createSession(key);
|
||||
curKey = skm.createSession(key);
|
||||
if (!forceElGamal) {
|
||||
curTag = ctx.sessionKeyManager().consumeNextAvailableTag(key, curKey);
|
||||
curTag = skm.consumeNextAvailableTag(key, curKey);
|
||||
|
||||
int availTags = ctx.sessionKeyManager().getAvailableTags(key, curKey);
|
||||
int availTags = skm.getAvailableTags(key, curKey);
|
||||
if (log.shouldLog(Log.DEBUG))
|
||||
log.debug("Available tags for encryption to " + key + ": " + availTags);
|
||||
|
||||
@ -120,7 +157,7 @@ public class GarlicMessageBuilder {
|
||||
wrappedTags.add(new SessionTag(true));
|
||||
if (log.shouldLog(Log.INFO))
|
||||
log.info("Too few are available (" + availTags + "), so we're including more");
|
||||
} else if (ctx.sessionKeyManager().getAvailableTimeLeft(key, curKey) < 60*1000) {
|
||||
} else if (skm.getAvailableTimeLeft(key, curKey) < 60*1000) {
|
||||
// if we have enough tags, but they expire in under 30 seconds, we want more
|
||||
for (int i = 0; i < numTagsToDeliver; i++)
|
||||
wrappedTags.add(new SessionTag(true));
|
||||
@ -138,16 +175,19 @@ public class GarlicMessageBuilder {
|
||||
}
|
||||
|
||||
/**
|
||||
* used by TestJob and directly above
|
||||
*
|
||||
* @param ctx scope
|
||||
* @param config how/what to wrap
|
||||
* @param wrappedKey output parameter that will be filled with the sessionKey used
|
||||
* @param wrappedKey unused - why??
|
||||
* @param wrappedTags output parameter that will be filled with the sessionTags used
|
||||
* @param target public key of the location being garlic routed to (may be null if we
|
||||
* know the encryptKey and encryptTag)
|
||||
* @param encryptKey sessionKey used to encrypt the current message
|
||||
* @param encryptTag sessionTag used to encrypt the current message
|
||||
*/
|
||||
public static GarlicMessage buildMessage(RouterContext ctx, GarlicConfig config, SessionKey wrappedKey, Set wrappedTags, PublicKey target, SessionKey encryptKey, SessionTag encryptTag) {
|
||||
public static GarlicMessage buildMessage(RouterContext ctx, GarlicConfig config, SessionKey wrappedKey, Set<SessionTag> wrappedTags,
|
||||
PublicKey target, SessionKey encryptKey, SessionTag encryptTag) {
|
||||
Log log = ctx.logManager().getLog(GarlicMessageBuilder.class);
|
||||
if (config == null)
|
||||
throw new IllegalArgumentException("Null config specified");
|
||||
@ -209,6 +249,7 @@ public class GarlicMessageBuilder {
|
||||
cloves[i] = buildClove(ctx, (PayloadGarlicConfig)c);
|
||||
} else {
|
||||
log.debug("Subclove IS NOT a payload garlic clove");
|
||||
// See notes below
|
||||
cloves[i] = buildClove(ctx, c);
|
||||
}
|
||||
if (cloves[i] == null)
|
||||
@ -242,6 +283,22 @@ public class GarlicMessageBuilder {
|
||||
return buildCommonClove(ctx, clove, config);
|
||||
}
|
||||
|
||||
/**
|
||||
* UNUSED
|
||||
*
|
||||
* The Garlic Message we are building contains another garlic message,
|
||||
* as specified by a GarlicConfig (NOT a PayloadGarlicConfig).
|
||||
*
|
||||
* So this calls back to the top, to buildMessage(ctx, config),
|
||||
* which uses the router's SKM, i.e. the wrong one.
|
||||
* Unfortunately we've lost the reference to the SessionKeyManager way down here,
|
||||
* so we can't call buildMessage(ctx, config, key, tags, skm).
|
||||
*
|
||||
* If we do ever end up constructing a garlic message that contains a garlic message,
|
||||
* we'll have to fix this by passing the skm through the last buildMessage,
|
||||
* through buildCloveSet, to here.
|
||||
*
|
||||
*/
|
||||
private static byte[] buildClove(RouterContext ctx, GarlicConfig config) throws DataFormatException, IOException {
|
||||
GarlicClove clove = new GarlicClove(ctx);
|
||||
GarlicMessage msg = buildMessage(ctx, config);
|
||||
|
@ -10,6 +10,7 @@ package net.i2p.router.message;
|
||||
|
||||
import java.util.Date;
|
||||
|
||||
import net.i2p.crypto.SessionKeyManager;
|
||||
import net.i2p.data.Certificate;
|
||||
import net.i2p.data.DataFormatException;
|
||||
import net.i2p.data.DataHelper;
|
||||
@ -32,13 +33,14 @@ public class GarlicMessageParser {
|
||||
_log = _context.logManager().getLog(GarlicMessageParser.class);
|
||||
}
|
||||
|
||||
public CloveSet getGarlicCloves(GarlicMessage message, PrivateKey encryptionKey) {
|
||||
/** @param skm use tags from this session key manager */
|
||||
public CloveSet getGarlicCloves(GarlicMessage message, PrivateKey encryptionKey, SessionKeyManager skm) {
|
||||
byte encData[] = message.getData();
|
||||
byte decrData[] = null;
|
||||
try {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Decrypting with private key " + encryptionKey);
|
||||
decrData = _context.elGamalAESEngine().decrypt(encData, encryptionKey);
|
||||
decrData = _context.elGamalAESEngine().decrypt(encData, encryptionKey, skm);
|
||||
} catch (DataFormatException dfe) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Error decrypting", dfe);
|
||||
|
@ -8,6 +8,7 @@ package net.i2p.router.message;
|
||||
*
|
||||
*/
|
||||
|
||||
import net.i2p.crypto.SessionKeyManager;
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.PrivateKey;
|
||||
@ -47,13 +48,16 @@ public class GarlicMessageReceiver {
|
||||
_clientDestination = clientDestination;
|
||||
_parser = new GarlicMessageParser(context);
|
||||
_receiver = receiver;
|
||||
//_log.error("New GMR dest = " + clientDestination);
|
||||
}
|
||||
|
||||
public void receive(GarlicMessage message) {
|
||||
PrivateKey decryptionKey = null;
|
||||
SessionKeyManager skm = null;
|
||||
if (_clientDestination != null) {
|
||||
LeaseSetKeys keys = _context.keyManager().getKeys(_clientDestination);
|
||||
if (keys != null) {
|
||||
skm = _context.clientManager().getClientSessionKeyManager(_clientDestination);
|
||||
if (keys != null && skm != null) {
|
||||
decryptionKey = keys.getDecryptionKey();
|
||||
} else {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
@ -62,9 +66,10 @@ public class GarlicMessageReceiver {
|
||||
}
|
||||
} else {
|
||||
decryptionKey = _context.keyManager().getPrivateKey();
|
||||
skm = _context.sessionKeyManager();
|
||||
}
|
||||
|
||||
CloveSet set = _parser.getGarlicCloves(message, decryptionKey);
|
||||
CloveSet set = _parser.getGarlicCloves(message, decryptionKey, skm);
|
||||
if (set != null) {
|
||||
for (int i = 0; i < set.getCloveCount(); i++) {
|
||||
GarlicClove clove = set.getClove(i);
|
||||
|
@ -31,14 +31,18 @@ import net.i2p.util.Log;
|
||||
public class HandleGarlicMessageJob extends JobImpl implements GarlicMessageReceiver.CloveReceiver {
|
||||
private Log _log;
|
||||
private GarlicMessage _message;
|
||||
private RouterIdentity _from;
|
||||
private Hash _fromHash;
|
||||
private Map _cloves; // map of clove Id --> Expiration of cloves we've already seen
|
||||
//private RouterIdentity _from;
|
||||
//private Hash _fromHash;
|
||||
//private Map _cloves; // map of clove Id --> Expiration of cloves we've already seen
|
||||
//private MessageHandler _handler;
|
||||
private GarlicMessageParser _parser;
|
||||
//private GarlicMessageParser _parser;
|
||||
|
||||
private final static int FORWARD_PRIORITY = 50;
|
||||
|
||||
/**
|
||||
* @param from ignored
|
||||
* @param fromHash ignored
|
||||
*/
|
||||
public HandleGarlicMessageJob(RouterContext context, GarlicMessage msg, RouterIdentity from, Hash fromHash) {
|
||||
super(context);
|
||||
_log = context.logManager().getLog(HandleGarlicMessageJob.class);
|
||||
@ -46,11 +50,11 @@ public class HandleGarlicMessageJob extends JobImpl implements GarlicMessageRece
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("New handle garlicMessageJob called w/ message from [" + from + "]", new Exception("Debug"));
|
||||
_message = msg;
|
||||
_from = from;
|
||||
_fromHash = fromHash;
|
||||
_cloves = new HashMap();
|
||||
//_from = from;
|
||||
//_fromHash = fromHash;
|
||||
//_cloves = new HashMap();
|
||||
//_handler = new MessageHandler(context);
|
||||
_parser = new GarlicMessageParser(context);
|
||||
//_parser = new GarlicMessageParser(context);
|
||||
}
|
||||
|
||||
public String getName() { return "Handle Inbound Garlic Message"; }
|
||||
|
@ -17,6 +17,7 @@ import net.i2p.data.LeaseSet;
|
||||
import net.i2p.data.Payload;
|
||||
import net.i2p.data.PublicKey;
|
||||
import net.i2p.data.SessionKey;
|
||||
import net.i2p.data.SessionTag;
|
||||
import net.i2p.data.TunnelId;
|
||||
import net.i2p.data.i2np.DataMessage;
|
||||
import net.i2p.data.i2np.DatabaseStoreMessage;
|
||||
@ -46,13 +47,15 @@ class OutboundClientMessageJobHelper {
|
||||
*
|
||||
* For now, its just a tunneled DeliveryStatusMessage
|
||||
*
|
||||
* Unused?
|
||||
*
|
||||
* @param bundledReplyLeaseSet if specified, the given LeaseSet will be packaged with the message (allowing
|
||||
* much faster replies, since their netDb search will return almost instantly)
|
||||
* @return garlic, or null if no tunnels were found (or other errors)
|
||||
*/
|
||||
static GarlicMessage createGarlicMessage(RouterContext ctx, long replyToken, long expiration, PublicKey recipientPK,
|
||||
Payload data, Hash from, Destination dest, TunnelInfo replyTunnel,
|
||||
SessionKey wrappedKey, Set wrappedTags,
|
||||
SessionKey wrappedKey, Set<SessionTag> wrappedTags,
|
||||
boolean requireAck, LeaseSet bundledReplyLeaseSet) {
|
||||
PayloadGarlicConfig dataClove = buildDataClove(ctx, data, dest, expiration);
|
||||
return createGarlicMessage(ctx, replyToken, expiration, recipientPK, dataClove, from, dest, replyTunnel, wrappedKey,
|
||||
@ -62,15 +65,18 @@ class OutboundClientMessageJobHelper {
|
||||
* Allow the app to specify the data clove directly, which enables OutboundClientMessage to resend the
|
||||
* same payload (including expiration and unique id) in different garlics (down different tunnels)
|
||||
*
|
||||
* This is called from OCMOSJ
|
||||
*
|
||||
* @return garlic, or null if no tunnels were found (or other errors)
|
||||
*/
|
||||
static GarlicMessage createGarlicMessage(RouterContext ctx, long replyToken, long expiration, PublicKey recipientPK,
|
||||
PayloadGarlicConfig dataClove, Hash from, Destination dest, TunnelInfo replyTunnel, SessionKey wrappedKey,
|
||||
Set wrappedTags, boolean requireAck, LeaseSet bundledReplyLeaseSet) {
|
||||
Set<SessionTag> wrappedTags, boolean requireAck, LeaseSet bundledReplyLeaseSet) {
|
||||
GarlicConfig config = createGarlicConfig(ctx, replyToken, expiration, recipientPK, dataClove, from, dest, replyTunnel, requireAck, bundledReplyLeaseSet);
|
||||
if (config == null)
|
||||
return null;
|
||||
GarlicMessage msg = GarlicMessageBuilder.buildMessage(ctx, config, wrappedKey, wrappedTags);
|
||||
GarlicMessage msg = GarlicMessageBuilder.buildMessage(ctx, config, wrappedKey, wrappedTags,
|
||||
ctx.clientManager().getClientSessionKeyManager(from));
|
||||
return msg;
|
||||
}
|
||||
|
||||
|
@ -10,6 +10,8 @@ import java.util.Map;
|
||||
import java.util.Properties;
|
||||
import java.util.Set;
|
||||
|
||||
import net.i2p.crypto.SessionKeyManager;
|
||||
import net.i2p.crypto.TagSetHandle;
|
||||
import net.i2p.data.Base64;
|
||||
import net.i2p.data.Certificate;
|
||||
import net.i2p.data.Destination;
|
||||
@ -20,6 +22,7 @@ import net.i2p.data.Payload;
|
||||
import net.i2p.data.PublicKey;
|
||||
import net.i2p.data.RouterInfo;
|
||||
import net.i2p.data.SessionKey;
|
||||
import net.i2p.data.SessionTag;
|
||||
import net.i2p.data.i2cp.MessageId;
|
||||
import net.i2p.data.i2np.DataMessage;
|
||||
import net.i2p.data.i2np.DeliveryInstructions;
|
||||
@ -471,7 +474,8 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
return;
|
||||
}
|
||||
|
||||
int existingTags = GarlicMessageBuilder.estimateAvailableTags(getContext(), _leaseSet.getEncryptionKey(), _from);
|
||||
int existingTags = GarlicMessageBuilder.estimateAvailableTags(getContext(), _leaseSet.getEncryptionKey(),
|
||||
_from.calculateHash());
|
||||
_outTunnel = selectOutboundTunnel(_to);
|
||||
// boolean wantACK = _wantACK || existingTags <= 30 || getContext().random().nextInt(100) < 5;
|
||||
// what's the point of 5% random? possible improvements or replacements:
|
||||
@ -489,7 +493,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
|
||||
PublicKey key = _leaseSet.getEncryptionKey();
|
||||
SessionKey sessKey = new SessionKey();
|
||||
Set tags = new HashSet();
|
||||
Set<SessionTag> tags = new HashSet();
|
||||
// If we want an ack, bundle a leaseSet... (so he can get back to us)
|
||||
LeaseSet replyLeaseSet = getReplyLeaseSet(wantACK);
|
||||
// ... and vice versa (so we know he got it)
|
||||
@ -531,8 +535,16 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
SendTimeoutJob onFail = null;
|
||||
ReplySelector selector = null;
|
||||
if (wantACK) {
|
||||
onReply = new SendSuccessJob(getContext(), sessKey, tags);
|
||||
onFail = new SendTimeoutJob(getContext());
|
||||
TagSetHandle tsh = null;
|
||||
if ( (sessKey != null) && (tags != null) && (tags.size() > 0) ) {
|
||||
if (_leaseSet != null) {
|
||||
SessionKeyManager skm = getContext().clientManager().getClientSessionKeyManager(_from.calculateHash());
|
||||
if (skm != null)
|
||||
tsh = skm.tagsDelivered(_leaseSet.getEncryptionKey(), sessKey, tags);
|
||||
}
|
||||
}
|
||||
onReply = new SendSuccessJob(getContext(), sessKey, tsh);
|
||||
onFail = new SendTimeoutJob(getContext(), sessKey, tsh);
|
||||
selector = new ReplySelector(token);
|
||||
}
|
||||
|
||||
@ -550,9 +562,9 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
+ _lease.getGateway().toBase64());
|
||||
|
||||
DispatchJob dispatchJob = new DispatchJob(getContext(), msg, selector, onReply, onFail, (int)(_overallExpiration-getContext().clock().now()));
|
||||
if (false) // dispatch may take 100+ms, so toss it in its own job
|
||||
getContext().jobQueue().addJob(dispatchJob);
|
||||
else
|
||||
//if (false) // dispatch may take 100+ms, so toss it in its own job
|
||||
// getContext().jobQueue().addJob(dispatchJob);
|
||||
//else
|
||||
dispatchJob.runJob();
|
||||
} else {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
@ -848,6 +860,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
|
||||
/** build the payload clove that will be used for all of the messages, placing the clove in the status structure */
|
||||
private boolean buildClove() {
|
||||
// FIXME set SKM
|
||||
PayloadGarlicConfig clove = new PayloadGarlicConfig();
|
||||
|
||||
DeliveryInstructions instructions = new DeliveryInstructions();
|
||||
@ -932,14 +945,14 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
*/
|
||||
private class SendSuccessJob extends JobImpl implements ReplyJob {
|
||||
private SessionKey _key;
|
||||
private Set _tags;
|
||||
private TagSetHandle _tags;
|
||||
|
||||
/**
|
||||
* Create a new success job that will be fired when the message encrypted with
|
||||
* the given session key and bearing the specified tags are confirmed delivered.
|
||||
*
|
||||
*/
|
||||
public SendSuccessJob(RouterContext enclosingContext, SessionKey key, Set tags) {
|
||||
public SendSuccessJob(RouterContext enclosingContext, SessionKey key, TagSetHandle tags) {
|
||||
super(enclosingContext);
|
||||
_key = key;
|
||||
_tags = tags;
|
||||
@ -955,10 +968,10 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
+ ": SUCCESS! msg " + _clientMessageId
|
||||
+ " sent after " + sendTime + "ms");
|
||||
|
||||
if ( (_key != null) && (_tags != null) && (_tags.size() > 0) ) {
|
||||
if (_leaseSet != null)
|
||||
getContext().sessionKeyManager().tagsDelivered(_leaseSet.getEncryptionKey(),
|
||||
_key, _tags);
|
||||
if (_key != null && _tags != null && _leaseSet != null) {
|
||||
SessionKeyManager skm = getContext().clientManager().getClientSessionKeyManager(_from.calculateHash());
|
||||
if (skm != null)
|
||||
skm.tagsAcked(_leaseSet.getEncryptionKey(), _key, _tags);
|
||||
}
|
||||
|
||||
long dataMsgId = _cloveId;
|
||||
@ -994,8 +1007,13 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
*
|
||||
*/
|
||||
private class SendTimeoutJob extends JobImpl {
|
||||
public SendTimeoutJob(RouterContext enclosingContext) {
|
||||
private SessionKey _key;
|
||||
private TagSetHandle _tags;
|
||||
|
||||
public SendTimeoutJob(RouterContext enclosingContext, SessionKey key, TagSetHandle tags) {
|
||||
super(enclosingContext);
|
||||
_key = key;
|
||||
_tags = tags;
|
||||
}
|
||||
|
||||
public String getName() { return "Send client message timed out"; }
|
||||
@ -1005,6 +1023,11 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
+ ": Soft timeout through the lease " + _lease);
|
||||
|
||||
_lease.setNumFailure(_lease.getNumFailure()+1);
|
||||
if (_key != null && _tags != null && _leaseSet != null) {
|
||||
SessionKeyManager skm = getContext().clientManager().getClientSessionKeyManager(_from.calculateHash());
|
||||
if (skm != null)
|
||||
skm.failTags(_leaseSet.getEncryptionKey(), _key, _tags);
|
||||
}
|
||||
dieFatal();
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,225 @@
|
||||
package net.i2p.router.networkdb.kademlia;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.i2np.DatabaseLookupMessage;
|
||||
import net.i2p.data.i2np.DatabaseSearchReplyMessage;
|
||||
import net.i2p.data.i2np.DatabaseStoreMessage;
|
||||
import net.i2p.data.i2np.I2NPMessage;
|
||||
import net.i2p.router.Job;
|
||||
import net.i2p.router.JobImpl;
|
||||
import net.i2p.router.MessageSelector;
|
||||
import net.i2p.router.OutNetMessage;
|
||||
import net.i2p.router.ReplyJob;
|
||||
import net.i2p.router.RouterContext;
|
||||
import net.i2p.router.TunnelInfo;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
/**
|
||||
* Try sending a search to some floodfill peers, but if we don't get a successful
|
||||
* match within half the allowed lookup time, give up and start querying through
|
||||
* the normal (kademlia) channels. This should cut down on spurious lookups caused
|
||||
* by simple delays in responses from floodfill peers
|
||||
*
|
||||
*/
|
||||
public class FloodSearchJob extends JobImpl {
|
||||
private Log _log;
|
||||
private FloodfillNetworkDatabaseFacade _facade;
|
||||
private Hash _key;
|
||||
private final List _onFind;
|
||||
private final List _onFailed;
|
||||
private long _expiration;
|
||||
private int _timeoutMs;
|
||||
private long _origExpiration;
|
||||
private boolean _isLease;
|
||||
private volatile int _lookupsRemaining;
|
||||
private volatile boolean _dead;
|
||||
public FloodSearchJob(RouterContext ctx, FloodfillNetworkDatabaseFacade facade, Hash key, Job onFind, Job onFailed, int timeoutMs, boolean isLease) {
|
||||
super(ctx);
|
||||
_log = ctx.logManager().getLog(FloodSearchJob.class);
|
||||
_facade = facade;
|
||||
_key = key;
|
||||
_onFind = new ArrayList();
|
||||
_onFind.add(onFind);
|
||||
_onFailed = new ArrayList();
|
||||
_onFailed.add(onFailed);
|
||||
int timeout = -1;
|
||||
timeout = timeoutMs / FLOOD_SEARCH_TIME_FACTOR;
|
||||
if (timeout < timeoutMs)
|
||||
timeout = timeoutMs;
|
||||
_timeoutMs = timeout;
|
||||
_expiration = timeout + ctx.clock().now();
|
||||
_origExpiration = timeoutMs + ctx.clock().now();
|
||||
_isLease = isLease;
|
||||
_lookupsRemaining = 0;
|
||||
_dead = false;
|
||||
}
|
||||
void addDeferred(Job onFind, Job onFailed, long timeoutMs, boolean isLease) {
|
||||
if (_dead) {
|
||||
getContext().jobQueue().addJob(onFailed);
|
||||
} else {
|
||||
if (onFind != null) synchronized (_onFind) { _onFind.add(onFind); }
|
||||
if (onFailed != null) synchronized (_onFailed) { _onFailed.add(onFailed); }
|
||||
}
|
||||
}
|
||||
public long getExpiration() { return _expiration; }
|
||||
private static final int CONCURRENT_SEARCHES = 2;
|
||||
private static final int FLOOD_SEARCH_TIME_FACTOR = 2;
|
||||
private static final int FLOOD_SEARCH_TIME_MIN = 30*1000;
|
||||
public void runJob() {
|
||||
// pick some floodfill peers and send out the searches
|
||||
List floodfillPeers = _facade.getFloodfillPeers();
|
||||
FloodLookupSelector replySelector = new FloodLookupSelector(getContext(), this);
|
||||
ReplyJob onReply = new FloodLookupMatchJob(getContext(), this);
|
||||
Job onTimeout = new FloodLookupTimeoutJob(getContext(), this);
|
||||
OutNetMessage out = getContext().messageRegistry().registerPending(replySelector, onReply, onTimeout, _timeoutMs);
|
||||
|
||||
for (int i = 0; _lookupsRemaining < CONCURRENT_SEARCHES && i < floodfillPeers.size(); i++) {
|
||||
Hash peer = (Hash)floodfillPeers.get(i);
|
||||
if (peer.equals(getContext().routerHash()))
|
||||
continue;
|
||||
|
||||
DatabaseLookupMessage dlm = new DatabaseLookupMessage(getContext(), true);
|
||||
TunnelInfo replyTunnel = getContext().tunnelManager().selectInboundTunnel();
|
||||
TunnelInfo outTunnel = getContext().tunnelManager().selectOutboundTunnel();
|
||||
if ( (replyTunnel == null) || (outTunnel == null) ) {
|
||||
_dead = true;
|
||||
List removed = null;
|
||||
synchronized (_onFailed) {
|
||||
removed = new ArrayList(_onFailed);
|
||||
_onFailed.clear();
|
||||
}
|
||||
while (removed.size() > 0)
|
||||
getContext().jobQueue().addJob((Job)removed.remove(0));
|
||||
getContext().messageRegistry().unregisterPending(out);
|
||||
return;
|
||||
}
|
||||
dlm.setFrom(replyTunnel.getPeer(0));
|
||||
dlm.setMessageExpiration(getContext().clock().now()+10*1000);
|
||||
dlm.setReplyTunnel(replyTunnel.getReceiveTunnelId(0));
|
||||
dlm.setSearchKey(_key);
|
||||
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info(getJobId() + ": Floodfill search for " + _key.toBase64() + " to " + peer.toBase64());
|
||||
getContext().tunnelDispatcher().dispatchOutbound(dlm, outTunnel.getSendTunnelId(0), peer);
|
||||
_lookupsRemaining++;
|
||||
}
|
||||
|
||||
if (_lookupsRemaining <= 0) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info(getJobId() + ": Floodfill search for " + _key.toBase64() + " had no peers to send to");
|
||||
// no floodfill peers, go to the normal ones
|
||||
getContext().messageRegistry().unregisterPending(out);
|
||||
_facade.searchFull(_key, _onFind, _onFailed, _timeoutMs*FLOOD_SEARCH_TIME_FACTOR, _isLease);
|
||||
}
|
||||
}
|
||||
public String getName() { return "NetDb search (phase 1)"; }
|
||||
|
||||
Hash getKey() { return _key; }
|
||||
void decrementRemaining() { _lookupsRemaining--; }
|
||||
int getLookupsRemaining() { return _lookupsRemaining; }
|
||||
|
||||
void failed() {
|
||||
if (_dead) return;
|
||||
_dead = true;
|
||||
int timeRemaining = (int)(_origExpiration - getContext().clock().now());
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info(getJobId() + ": Floodfill search for " + _key.toBase64() + " failed with " + timeRemaining);
|
||||
if (timeRemaining > 0) {
|
||||
_facade.searchFull(_key, _onFind, _onFailed, timeRemaining, _isLease);
|
||||
} else {
|
||||
List removed = null;
|
||||
synchronized (_onFailed) {
|
||||
removed = new ArrayList(_onFailed);
|
||||
_onFailed.clear();
|
||||
}
|
||||
while (removed.size() > 0)
|
||||
getContext().jobQueue().addJob((Job)removed.remove(0));
|
||||
}
|
||||
}
|
||||
void success() {
|
||||
if (_dead) return;
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info(getJobId() + ": Floodfill search for " + _key.toBase64() + " successful");
|
||||
_dead = true;
|
||||
_facade.complete(_key);
|
||||
List removed = null;
|
||||
synchronized (_onFind) {
|
||||
removed = new ArrayList(_onFind);
|
||||
_onFind.clear();
|
||||
}
|
||||
while (removed.size() > 0)
|
||||
getContext().jobQueue().addJob((Job)removed.remove(0));
|
||||
}
|
||||
|
||||
private static class FloodLookupTimeoutJob extends JobImpl {
|
||||
private FloodSearchJob _search;
|
||||
public FloodLookupTimeoutJob(RouterContext ctx, FloodSearchJob job) {
|
||||
super(ctx);
|
||||
_search = job;
|
||||
}
|
||||
public void runJob() {
|
||||
_search.decrementRemaining();
|
||||
if (_search.getLookupsRemaining() <= 0)
|
||||
_search.failed();
|
||||
}
|
||||
public String getName() { return "NetDb search (phase 1) timeout"; }
|
||||
}
|
||||
|
||||
private static class FloodLookupMatchJob extends JobImpl implements ReplyJob {
|
||||
private Log _log;
|
||||
private FloodSearchJob _search;
|
||||
public FloodLookupMatchJob(RouterContext ctx, FloodSearchJob job) {
|
||||
super(ctx);
|
||||
_log = ctx.logManager().getLog(FloodLookupMatchJob.class);
|
||||
_search = job;
|
||||
}
|
||||
public void runJob() {
|
||||
if ( (getContext().netDb().lookupLeaseSetLocally(_search.getKey()) != null) ||
|
||||
(getContext().netDb().lookupRouterInfoLocally(_search.getKey()) != null) ) {
|
||||
_search.success();
|
||||
} else {
|
||||
int remaining = _search.getLookupsRemaining();
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info(getJobId() + "/" + _search.getJobId() + ": got a reply looking for "
|
||||
+ _search.getKey().toBase64() + ", with " + remaining + " outstanding searches");
|
||||
// netDb reply pointing us at other people
|
||||
if (remaining <= 0)
|
||||
_search.failed();
|
||||
}
|
||||
}
|
||||
public String getName() { return "NetDb search (phase 1) match"; }
|
||||
public void setMessage(I2NPMessage message) {}
|
||||
}
|
||||
|
||||
private static class FloodLookupSelector implements MessageSelector {
|
||||
private RouterContext _context;
|
||||
private FloodSearchJob _search;
|
||||
public FloodLookupSelector(RouterContext ctx, FloodSearchJob search) {
|
||||
_context = ctx;
|
||||
_search = search;
|
||||
}
|
||||
public boolean continueMatching() { return _search.getLookupsRemaining() > 0; }
|
||||
public long getExpiration() { return _search.getExpiration(); }
|
||||
public boolean isMatch(I2NPMessage message) {
|
||||
if (message == null) return false;
|
||||
if (message instanceof DatabaseStoreMessage) {
|
||||
DatabaseStoreMessage dsm = (DatabaseStoreMessage)message;
|
||||
// is it worth making sure the reply came in on the right tunnel?
|
||||
if (_search.getKey().equals(dsm.getKey())) {
|
||||
_search.decrementRemaining();
|
||||
return true;
|
||||
}
|
||||
} else if (message instanceof DatabaseSearchReplyMessage) {
|
||||
DatabaseSearchReplyMessage dsrm = (DatabaseSearchReplyMessage)message;
|
||||
if (_search.getKey().equals(dsrm.getSearchKey())) {
|
||||
_search.decrementRemaining();
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
@ -351,210 +351,3 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Try sending a search to some floodfill peers, but if we don't get a successful
|
||||
* match within half the allowed lookup time, give up and start querying through
|
||||
* the normal (kademlia) channels. This should cut down on spurious lookups caused
|
||||
* by simple delays in responses from floodfill peers
|
||||
*
|
||||
*/
|
||||
class FloodSearchJob extends JobImpl {
|
||||
private Log _log;
|
||||
private FloodfillNetworkDatabaseFacade _facade;
|
||||
private Hash _key;
|
||||
private final List _onFind;
|
||||
private final List _onFailed;
|
||||
private long _expiration;
|
||||
private int _timeoutMs;
|
||||
private long _origExpiration;
|
||||
private boolean _isLease;
|
||||
private volatile int _lookupsRemaining;
|
||||
private volatile boolean _dead;
|
||||
public FloodSearchJob(RouterContext ctx, FloodfillNetworkDatabaseFacade facade, Hash key, Job onFind, Job onFailed, int timeoutMs, boolean isLease) {
|
||||
super(ctx);
|
||||
_log = ctx.logManager().getLog(FloodSearchJob.class);
|
||||
_facade = facade;
|
||||
_key = key;
|
||||
_onFind = new ArrayList();
|
||||
_onFind.add(onFind);
|
||||
_onFailed = new ArrayList();
|
||||
_onFailed.add(onFailed);
|
||||
int timeout = -1;
|
||||
timeout = timeoutMs / FLOOD_SEARCH_TIME_FACTOR;
|
||||
if (timeout < timeoutMs)
|
||||
timeout = timeoutMs;
|
||||
_timeoutMs = timeout;
|
||||
_expiration = timeout + ctx.clock().now();
|
||||
_origExpiration = timeoutMs + ctx.clock().now();
|
||||
_isLease = isLease;
|
||||
_lookupsRemaining = 0;
|
||||
_dead = false;
|
||||
}
|
||||
void addDeferred(Job onFind, Job onFailed, long timeoutMs, boolean isLease) {
|
||||
if (_dead) {
|
||||
getContext().jobQueue().addJob(onFailed);
|
||||
} else {
|
||||
if (onFind != null) synchronized (_onFind) { _onFind.add(onFind); }
|
||||
if (onFailed != null) synchronized (_onFailed) { _onFailed.add(onFailed); }
|
||||
}
|
||||
}
|
||||
public long getExpiration() { return _expiration; }
|
||||
private static final int CONCURRENT_SEARCHES = 2;
|
||||
private static final int FLOOD_SEARCH_TIME_FACTOR = 2;
|
||||
private static final int FLOOD_SEARCH_TIME_MIN = 30*1000;
|
||||
public void runJob() {
|
||||
// pick some floodfill peers and send out the searches
|
||||
List floodfillPeers = _facade.getFloodfillPeers();
|
||||
FloodLookupSelector replySelector = new FloodLookupSelector(getContext(), this);
|
||||
ReplyJob onReply = new FloodLookupMatchJob(getContext(), this);
|
||||
Job onTimeout = new FloodLookupTimeoutJob(getContext(), this);
|
||||
OutNetMessage out = getContext().messageRegistry().registerPending(replySelector, onReply, onTimeout, _timeoutMs);
|
||||
|
||||
for (int i = 0; _lookupsRemaining < CONCURRENT_SEARCHES && i < floodfillPeers.size(); i++) {
|
||||
Hash peer = (Hash)floodfillPeers.get(i);
|
||||
if (peer.equals(getContext().routerHash()))
|
||||
continue;
|
||||
|
||||
DatabaseLookupMessage dlm = new DatabaseLookupMessage(getContext(), true);
|
||||
TunnelInfo replyTunnel = getContext().tunnelManager().selectInboundTunnel();
|
||||
TunnelInfo outTunnel = getContext().tunnelManager().selectOutboundTunnel();
|
||||
if ( (replyTunnel == null) || (outTunnel == null) ) {
|
||||
_dead = true;
|
||||
List removed = null;
|
||||
synchronized (_onFailed) {
|
||||
removed = new ArrayList(_onFailed);
|
||||
_onFailed.clear();
|
||||
}
|
||||
while (removed.size() > 0)
|
||||
getContext().jobQueue().addJob((Job)removed.remove(0));
|
||||
getContext().messageRegistry().unregisterPending(out);
|
||||
return;
|
||||
}
|
||||
dlm.setFrom(replyTunnel.getPeer(0));
|
||||
dlm.setMessageExpiration(getContext().clock().now()+10*1000);
|
||||
dlm.setReplyTunnel(replyTunnel.getReceiveTunnelId(0));
|
||||
dlm.setSearchKey(_key);
|
||||
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info(getJobId() + ": Floodfill search for " + _key.toBase64() + " to " + peer.toBase64());
|
||||
getContext().tunnelDispatcher().dispatchOutbound(dlm, outTunnel.getSendTunnelId(0), peer);
|
||||
_lookupsRemaining++;
|
||||
}
|
||||
|
||||
if (_lookupsRemaining <= 0) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info(getJobId() + ": Floodfill search for " + _key.toBase64() + " had no peers to send to");
|
||||
// no floodfill peers, go to the normal ones
|
||||
getContext().messageRegistry().unregisterPending(out);
|
||||
_facade.searchFull(_key, _onFind, _onFailed, _timeoutMs*FLOOD_SEARCH_TIME_FACTOR, _isLease);
|
||||
}
|
||||
}
|
||||
public String getName() { return "NetDb search (phase 1)"; }
|
||||
|
||||
Hash getKey() { return _key; }
|
||||
void decrementRemaining() { _lookupsRemaining--; }
|
||||
int getLookupsRemaining() { return _lookupsRemaining; }
|
||||
|
||||
void failed() {
|
||||
if (_dead) return;
|
||||
_dead = true;
|
||||
int timeRemaining = (int)(_origExpiration - getContext().clock().now());
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info(getJobId() + ": Floodfill search for " + _key.toBase64() + " failed with " + timeRemaining);
|
||||
if (timeRemaining > 0) {
|
||||
_facade.searchFull(_key, _onFind, _onFailed, timeRemaining, _isLease);
|
||||
} else {
|
||||
List removed = null;
|
||||
synchronized (_onFailed) {
|
||||
removed = new ArrayList(_onFailed);
|
||||
_onFailed.clear();
|
||||
}
|
||||
while (removed.size() > 0)
|
||||
getContext().jobQueue().addJob((Job)removed.remove(0));
|
||||
}
|
||||
}
|
||||
void success() {
|
||||
if (_dead) return;
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info(getJobId() + ": Floodfill search for " + _key.toBase64() + " successful");
|
||||
_dead = true;
|
||||
_facade.complete(_key);
|
||||
List removed = null;
|
||||
synchronized (_onFind) {
|
||||
removed = new ArrayList(_onFind);
|
||||
_onFind.clear();
|
||||
}
|
||||
while (removed.size() > 0)
|
||||
getContext().jobQueue().addJob((Job)removed.remove(0));
|
||||
}
|
||||
}
|
||||
|
||||
class FloodLookupTimeoutJob extends JobImpl {
|
||||
private FloodSearchJob _search;
|
||||
public FloodLookupTimeoutJob(RouterContext ctx, FloodSearchJob job) {
|
||||
super(ctx);
|
||||
_search = job;
|
||||
}
|
||||
public void runJob() {
|
||||
_search.decrementRemaining();
|
||||
if (_search.getLookupsRemaining() <= 0)
|
||||
_search.failed();
|
||||
}
|
||||
public String getName() { return "NetDb search (phase 1) timeout"; }
|
||||
}
|
||||
|
||||
class FloodLookupMatchJob extends JobImpl implements ReplyJob {
|
||||
private Log _log;
|
||||
private FloodSearchJob _search;
|
||||
public FloodLookupMatchJob(RouterContext ctx, FloodSearchJob job) {
|
||||
super(ctx);
|
||||
_log = ctx.logManager().getLog(FloodLookupMatchJob.class);
|
||||
_search = job;
|
||||
}
|
||||
public void runJob() {
|
||||
if ( (getContext().netDb().lookupLeaseSetLocally(_search.getKey()) != null) ||
|
||||
(getContext().netDb().lookupRouterInfoLocally(_search.getKey()) != null) ) {
|
||||
_search.success();
|
||||
} else {
|
||||
int remaining = _search.getLookupsRemaining();
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info(getJobId() + "/" + _search.getJobId() + ": got a reply looking for "
|
||||
+ _search.getKey().toBase64() + ", with " + remaining + " outstanding searches");
|
||||
// netDb reply pointing us at other people
|
||||
if (remaining <= 0)
|
||||
_search.failed();
|
||||
}
|
||||
}
|
||||
public String getName() { return "NetDb search (phase 1) match"; }
|
||||
public void setMessage(I2NPMessage message) {}
|
||||
}
|
||||
|
||||
class FloodLookupSelector implements MessageSelector {
|
||||
private RouterContext _context;
|
||||
private FloodSearchJob _search;
|
||||
public FloodLookupSelector(RouterContext ctx, FloodSearchJob search) {
|
||||
_context = ctx;
|
||||
_search = search;
|
||||
}
|
||||
public boolean continueMatching() { return _search.getLookupsRemaining() > 0; }
|
||||
public long getExpiration() { return _search.getExpiration(); }
|
||||
public boolean isMatch(I2NPMessage message) {
|
||||
if (message == null) return false;
|
||||
if (message instanceof DatabaseStoreMessage) {
|
||||
DatabaseStoreMessage dsm = (DatabaseStoreMessage)message;
|
||||
// is it worth making sure the reply came in on the right tunnel?
|
||||
if (_search.getKey().equals(dsm.getKey())) {
|
||||
_search.decrementRemaining();
|
||||
return true;
|
||||
}
|
||||
} else if (message instanceof DatabaseSearchReplyMessage) {
|
||||
DatabaseSearchReplyMessage dsrm = (DatabaseSearchReplyMessage)message;
|
||||
if (_search.getKey().equals(dsrm.getSearchKey())) {
|
||||
_search.decrementRemaining();
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
@ -1003,7 +1003,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
StringBuilder buf = new StringBuilder(size);
|
||||
out.write("<h2>Network Database Contents (<a href=\"netdb.jsp?l=1\">View LeaseSets</a>)</h2>\n");
|
||||
if (!_initialized) {
|
||||
buf.append("<i>Not initialized</i>\n");
|
||||
buf.append("Not initialized\n");
|
||||
out.write(buf.toString());
|
||||
out.flush();
|
||||
return;
|
||||
@ -1052,8 +1052,8 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
buf.append("<tr><th>Version</th><th>Count</th></tr>\n");
|
||||
for (String routerVersion : versionList) {
|
||||
int num = versions.count(routerVersion);
|
||||
buf.append("<tr><td>").append(DataHelper.stripHTML(routerVersion));
|
||||
buf.append("</td><td align=\"right\">").append(num).append("</td></tr>\n");
|
||||
buf.append("<tr><td align=\"center\">").append(DataHelper.stripHTML(routerVersion));
|
||||
buf.append("</td><td align=\"center\">").append(num).append("</td></tr>\n");
|
||||
}
|
||||
buf.append("</table>\n");
|
||||
}
|
||||
@ -1071,7 +1071,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
buf.append("<tr><td><img height=\"11\" width=\"16\" alt=\"").append(country.toUpperCase()).append("\"");
|
||||
buf.append(" src=\"/flags.jsp?c=").append(country).append("\"> ");
|
||||
buf.append(_context.commSystem().getCountryName(country));
|
||||
buf.append("</td><td align=\"right\">").append(num).append("</td></tr>\n");
|
||||
buf.append("</td><td align=\"center\">").append(num).append("</td></tr>\n");
|
||||
}
|
||||
buf.append("</table>\n");
|
||||
}
|
||||
@ -1086,21 +1086,26 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
*/
|
||||
private void renderRouterInfo(StringBuilder buf, RouterInfo info, boolean isUs, boolean full) {
|
||||
String hash = info.getIdentity().getHash().toBase64();
|
||||
buf.append("<a name=\"").append(hash.substring(0, 6)).append("\" ></a>");
|
||||
buf.append("<table><tr><th><a name=\"").append(hash.substring(0, 6)).append("\" ></a>");
|
||||
if (isUs) {
|
||||
buf.append("<a name=\"our-info\" ></a><b>Our info: ").append(hash).append("</b><br>\n");
|
||||
buf.append("<a name=\"our-info\" ></a><b>Our info: ").append(hash).append("</b></th></tr><tr><td>\n");
|
||||
} else {
|
||||
buf.append("<b>Peer info for:</b> ").append(hash).append("<br>\n");
|
||||
buf.append("<b>Peer info for:</b> ").append(hash).append("\n");
|
||||
if (full) {
|
||||
buf.append("[<a href=\"netdb.jsp\" >Back</a>]</th></tr><td>\n");
|
||||
} else {
|
||||
buf.append("[<a href=\"netdb.jsp?r=").append(hash.substring(0, 6)).append("\" >Full entry</a>]</th></tr><td>\n");
|
||||
}
|
||||
}
|
||||
|
||||
long age = _context.clock().now() - info.getPublished();
|
||||
if (isUs && _context.router().isHidden())
|
||||
buf.append("Hidden, Updated: <i>").append(DataHelper.formatDuration(age)).append(" ago</i><br>\n");
|
||||
buf.append("<b>Hidden, Updated:</b> ").append(DataHelper.formatDuration(age)).append(" ago<br>\n");
|
||||
else if (age > 0)
|
||||
buf.append("Published: <i>").append(DataHelper.formatDuration(age)).append(" ago</i><br>\n");
|
||||
buf.append("<b>Published:</b> ").append(DataHelper.formatDuration(age)).append(" ago<br>\n");
|
||||
else
|
||||
buf.append("Published: <i>in ").append(DataHelper.formatDuration(0-age)).append("???</i><br>\n");
|
||||
buf.append("Address(es): <i>");
|
||||
buf.append("<b>Published:</b> in ").append(DataHelper.formatDuration(0-age)).append("???<br>\n");
|
||||
buf.append("<b>Address(es):</b> ");
|
||||
String country = _context.commSystem().getCountry(info.getIdentity().getHash());
|
||||
if(country != null) {
|
||||
buf.append("<img height=\"11\" width=\"16\" alt=\"").append(country.toUpperCase()).append("\"");
|
||||
@ -1115,19 +1120,18 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
buf.append('[').append(DataHelper.stripHTML(name)).append('=').append(DataHelper.stripHTML(val)).append("] ");
|
||||
}
|
||||
}
|
||||
buf.append("</i><br>\n");
|
||||
buf.append("</td></tr>\n");
|
||||
if (full) {
|
||||
buf.append("Stats: <br><i><code>\n");
|
||||
buf.append("<tr><td>Stats: <br><code>\n");
|
||||
for (Iterator iter = info.getOptions().keySet().iterator(); iter.hasNext(); ) {
|
||||
String key = (String)iter.next();
|
||||
String val = info.getOption(key);
|
||||
buf.append(DataHelper.stripHTML(key)).append(" = ").append(DataHelper.stripHTML(val)).append("<br>\n");
|
||||
}
|
||||
buf.append("</code></i>\n");
|
||||
buf.append("</code></td></tr>\n");
|
||||
} else {
|
||||
buf.append("<a href=\"netdb.jsp?r=").append(hash.substring(0, 6)).append("\" >Full entry</a>\n");
|
||||
}
|
||||
buf.append("<hr>\n");
|
||||
buf.append("</td></tr>\n");
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -3,6 +3,7 @@ package net.i2p.router.tunnel.pool;
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
|
||||
import net.i2p.crypto.SessionKeyManager;
|
||||
import net.i2p.data.Certificate;
|
||||
import net.i2p.data.SessionKey;
|
||||
import net.i2p.data.SessionTag;
|
||||
@ -28,6 +29,8 @@ class TestJob extends JobImpl {
|
||||
private TunnelInfo _outTunnel;
|
||||
private TunnelInfo _replyTunnel;
|
||||
private PooledTunnelCreatorConfig _otherTunnel;
|
||||
/** save this so we can tell the SKM to kill it if the test fails */
|
||||
private SessionTag _encryptTag;
|
||||
|
||||
/** base to randomize the test delay on */
|
||||
private static final int TEST_DELAY = 30*1000;
|
||||
@ -129,6 +132,7 @@ class TestJob extends JobImpl {
|
||||
|
||||
SessionKey encryptKey = getContext().keyGenerator().generateSessionKey();
|
||||
SessionTag encryptTag = new SessionTag(true);
|
||||
_encryptTag = encryptTag;
|
||||
SessionKey sentKey = new SessionKey();
|
||||
Set sentTags = null;
|
||||
GarlicMessage msg = GarlicMessageBuilder.buildMessage(getContext(), payload, sentKey, sentTags,
|
||||
@ -142,7 +146,14 @@ class TestJob extends JobImpl {
|
||||
}
|
||||
Set encryptTags = new HashSet(1);
|
||||
encryptTags.add(encryptTag);
|
||||
// Register the single tag with the appropriate SKM
|
||||
if (_cfg.isInbound() && !_pool.getSettings().isExploratory()) {
|
||||
SessionKeyManager skm = getContext().clientManager().getClientSessionKeyManager(_pool.getSettings().getDestination());
|
||||
if (skm != null)
|
||||
skm.tagsReceived(encryptKey, encryptTags);
|
||||
} else {
|
||||
getContext().sessionKeyManager().tagsReceived(encryptKey, encryptTags);
|
||||
}
|
||||
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Sending garlic test of " + _outTunnel + " / " + _replyTunnel);
|
||||
@ -307,9 +318,18 @@ class TestJob extends JobImpl {
|
||||
public void runJob() {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Timeout: found? " + _found, getAddedBy());
|
||||
if (!_found)
|
||||
if (!_found) {
|
||||
// don't clog up the SKM with old one-tag tagsets
|
||||
if (_cfg.isInbound() && !_pool.getSettings().isExploratory()) {
|
||||
SessionKeyManager skm = getContext().clientManager().getClientSessionKeyManager(_pool.getSettings().getDestination());
|
||||
if (skm != null)
|
||||
skm.consumeTag(_encryptTag);
|
||||
} else {
|
||||
getContext().sessionKeyManager().consumeTag(_encryptTag);
|
||||
}
|
||||
testFailed(getContext().clock().now() - _started);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
|
@ -309,7 +309,8 @@ public class TunnelPoolManager implements TunnelManagerFacade {
|
||||
void buildComplete(PooledTunnelCreatorConfig cfg) {
|
||||
//buildComplete();
|
||||
if (cfg.getLength() > 1 &&
|
||||
!_context.router().gracefulShutdownInProgress()) {
|
||||
(!_context.router().gracefulShutdownInProgress()) &&
|
||||
!Boolean.valueOf(_context.getProperty("router.disableTunnelTesting")).booleanValue()) {
|
||||
TunnelPool pool = cfg.getTunnelPool();
|
||||
if (pool == null) {
|
||||
// never seen this before, do we reallly need to bother
|
||||
|
Reference in New Issue
Block a user