propagate from branch 'i2p.i2p.zzz.test3' (head 459a56e53d8d694591071574e87474b5e95d6494)

to branch 'i2p.i2p' (head ffa1aab7aa8e75d75c183fd6f76140f7d840a6ae)
This commit is contained in:
zzz
2009-10-01 18:18:23 +00:00
61 changed files with 1446 additions and 788 deletions

View File

@ -85,13 +85,13 @@ public abstract class ClientManagerFacade implements Service {
*
* @return set of Destination objects
*/
public Set listClients() { return Collections.EMPTY_SET; }
public Set<Destination> listClients() { return Collections.EMPTY_SET; }
/**
* Return the client's current config, or null if not connected
*
*/
public abstract SessionConfig getClientSessionConfig(Destination dest);
public abstract SessionKeyManager getClientSessionKeyManager(Destination dest);
public abstract SessionKeyManager getClientSessionKeyManager(Hash dest);
public void renderStatusHTML(Writer out) throws IOException { }
}

View File

@ -41,7 +41,7 @@ public class DummyClientManagerFacade extends ClientManagerFacade {
public void messageDeliveryStatusUpdate(Destination fromDest, MessageId id, boolean delivered) {}
public SessionConfig getClientSessionConfig(Destination _dest) { return null; }
public SessionKeyManager getClientSessionKeyManager(Destination _dest) { return null; }
public SessionKeyManager getClientSessionKeyManager(Hash _dest) { return null; }
public void requestLeaseSet(Hash dest, LeaseSet set) {}

View File

@ -18,7 +18,7 @@ public class RouterVersion {
/** deprecated */
public final static String ID = "Monotone";
public final static String VERSION = CoreVersion.VERSION;
public final static long BUILD = 16;
public final static long BUILD = 20;
/** for example "-test" */
public final static String EXTRA = "";
public final static String FULL_VERSION = VERSION + "-" + BUILD + EXTRA;

View File

@ -18,6 +18,7 @@ import java.util.Map;
import java.util.Set;
import net.i2p.crypto.SessionKeyManager;
import net.i2p.crypto.TransientSessionKeyManager;
import net.i2p.data.Destination;
import net.i2p.data.Hash;
import net.i2p.data.LeaseSet;
@ -188,11 +189,11 @@ public class ClientConnectionRunner {
if (_log.shouldLog(Log.DEBUG))
_log.debug("SessionEstablished called for destination " + _destHashCache.toBase64());
_config = config;
// per-dest unimplemented
//if (_sessionKeyManager == null)
// _sessionKeyManager = new TransientSessionKeyManager(_context);
//else
// _log.error("SessionEstablished called for twice for destination " + _destHashCache.toBase64().substring(0,4));
// per-destination session key manager to prevent rather easy correlation
if (_sessionKeyManager == null)
_sessionKeyManager = new TransientSessionKeyManager(_context);
else
_log.error("SessionEstablished called for twice for destination " + _destHashCache.toBase64().substring(0,4));
_manager.destinationEstablished(this);
}

View File

@ -42,8 +42,8 @@ import net.i2p.util.Log;
public class ClientManager {
private Log _log;
private ClientListenerRunner _listener;
private final HashMap _runners; // Destination --> ClientConnectionRunner
private final Set _pendingRunners; // ClientConnectionRunner for clients w/out a Dest yet
private final HashMap<Destination, ClientConnectionRunner> _runners; // Destination --> ClientConnectionRunner
private final Set<ClientConnectionRunner> _pendingRunners; // ClientConnectionRunner for clients w/out a Dest yet
private RouterContext _ctx;
/** ms to wait before rechecking for inbound messages to deliver to clients */
@ -90,21 +90,21 @@ public class ClientManager {
public void shutdown() {
_log.info("Shutting down the ClientManager");
_listener.stopListening();
Set runners = new HashSet();
Set<ClientConnectionRunner> runners = new HashSet();
synchronized (_runners) {
for (Iterator iter = _runners.values().iterator(); iter.hasNext();) {
ClientConnectionRunner runner = (ClientConnectionRunner)iter.next();
for (Iterator<ClientConnectionRunner> iter = _runners.values().iterator(); iter.hasNext();) {
ClientConnectionRunner runner = iter.next();
runners.add(runner);
}
}
synchronized (_pendingRunners) {
for (Iterator iter = _pendingRunners.iterator(); iter.hasNext();) {
ClientConnectionRunner runner = (ClientConnectionRunner)iter.next();
for (Iterator<ClientConnectionRunner> iter = _pendingRunners.iterator(); iter.hasNext();) {
ClientConnectionRunner runner = iter.next();
runners.add(runner);
}
}
for (Iterator iter = runners.iterator(); iter.hasNext(); ) {
ClientConnectionRunner runner = (ClientConnectionRunner)iter.next();
for (Iterator<ClientConnectionRunner> iter = runners.iterator(); iter.hasNext(); ) {
ClientConnectionRunner runner = iter.next();
runner.stopRunning();
}
}
@ -131,15 +131,26 @@ public class ClientManager {
}
}
/**
* Add to the clients list. Check for a dup destination.
*/
public void destinationEstablished(ClientConnectionRunner runner) {
Destination dest = runner.getConfig().getDestination();
if (_log.shouldLog(Log.DEBUG))
_log.debug("DestinationEstablished called for destination " + runner.getConfig().getDestination().calculateHash().toBase64());
_log.debug("DestinationEstablished called for destination " + dest.calculateHash().toBase64());
synchronized (_pendingRunners) {
_pendingRunners.remove(runner);
}
boolean fail = false;
synchronized (_runners) {
_runners.put(runner.getConfig().getDestination(), runner);
fail = _runners.containsKey(dest);
if (!fail)
_runners.put(dest, runner);
}
if (fail) {
_log.log(Log.CRIT, "Client attempted to register duplicate destination " + dest.calculateHash().toBase64());
runner.disconnectClient("Duplicate destination");
}
}
@ -278,8 +289,8 @@ public class ClientManager {
return true;
}
public Set listClients() {
Set rv = new HashSet();
public Set<Destination> listClients() {
Set<Destination> rv = new HashSet();
synchronized (_runners) {
rv.addAll(_runners.keySet());
}
@ -293,7 +304,7 @@ public class ClientManager {
long inLock = 0;
synchronized (_runners) {
inLock = _ctx.clock().now();
rv = (ClientConnectionRunner)_runners.get(dest);
rv = _runners.get(dest);
}
long afterLock = _ctx.clock().now();
if (afterLock - beforeLock > 50) {
@ -317,9 +328,10 @@ public class ClientManager {
/**
* Return the client's SessionKeyManager
*
* Use this instead of the RouterContext.sessionKeyManager()
* to prevent correlation attacks across destinations
*/
public SessionKeyManager getClientSessionKeyManager(Destination dest) {
public SessionKeyManager getClientSessionKeyManager(Hash dest) {
ClientConnectionRunner runner = getRunner(dest);
if (runner != null)
return runner.getSessionKeyManager();
@ -331,8 +343,8 @@ public class ClientManager {
if (destHash == null)
return null;
synchronized (_runners) {
for (Iterator iter = _runners.values().iterator(); iter.hasNext(); ) {
ClientConnectionRunner cur = (ClientConnectionRunner)iter.next();
for (Iterator<ClientConnectionRunner> iter = _runners.values().iterator(); iter.hasNext(); ) {
ClientConnectionRunner cur = iter.next();
if (cur.getDestHash().equals(destHash))
return cur;
}
@ -354,8 +366,8 @@ public class ClientManager {
}
}
Set getRunnerDestinations() {
Set dests = new HashSet();
Set<Destination> getRunnerDestinations() {
Set<Destination> dests = new HashSet();
long beforeLock = _ctx.clock().now();
long inLock = 0;
synchronized (_runners) {
@ -390,13 +402,13 @@ public class ClientManager {
StringBuilder buf = new StringBuilder(8*1024);
buf.append("<u><b>Local destinations</b></u><br>");
Map runners = null;
Map<Destination, ClientConnectionRunner> runners = null;
synchronized (_runners) {
runners = (Map)_runners.clone();
}
for (Iterator iter = runners.keySet().iterator(); iter.hasNext(); ) {
Destination dest = (Destination)iter.next();
ClientConnectionRunner runner = (ClientConnectionRunner)runners.get(dest);
for (Iterator<Destination> iter = runners.keySet().iterator(); iter.hasNext(); ) {
Destination dest = iter.next();
ClientConnectionRunner runner = runners.get(dest);
buf.append("<b>*</b> ").append(dest.calculateHash().toBase64().substring(0,6)).append("<br>\n");
LeaseSet ls = runner.getLeaseSet();
if (ls == null) {

View File

@ -194,7 +194,7 @@ public class ClientManagerFacadeImpl extends ClientManagerFacade {
* Return the client's current manager or null if not connected
*
*/
public SessionKeyManager getClientSessionKeyManager(Destination dest) {
public SessionKeyManager getClientSessionKeyManager(Hash dest) {
if (_manager != null)
return _manager.getClientSessionKeyManager(dest);
else {
@ -215,7 +215,7 @@ public class ClientManagerFacadeImpl extends ClientManagerFacade {
* @return set of Destination objects
*/
@Override
public Set listClients() {
public Set<Destination> listClients() {
if (_manager != null)
return _manager.listClients();
else

View File

@ -17,7 +17,7 @@ import java.util.Set;
import net.i2p.crypto.SessionKeyManager;
import net.i2p.data.DataFormatException;
import net.i2p.data.DataHelper;
import net.i2p.data.Destination;
import net.i2p.data.Hash;
import net.i2p.data.PublicKey;
import net.i2p.data.SessionKey;
import net.i2p.data.SessionTag;
@ -59,14 +59,16 @@ public class GarlicMessageBuilder {
*
* So a value somewhat higher than the low threshold
* seems appropriate.
*
* Use care when adjusting these values. See ConnectionOptions in streaming,
* and TransientSessionKeyManager in crypto, for more information.
*/
private static final int DEFAULT_TAGS = 40;
private static final int LOW_THRESHOLD = 20;
private static final int LOW_THRESHOLD = 30;
public static int estimateAvailableTags(RouterContext ctx, PublicKey key, Destination local) {
// per-dest Unimplemented
//SessionKeyManager skm = ctx.clientManager().getClientSessionKeyManager(local);
SessionKeyManager skm = ctx.sessionKeyManager();
/** @param local non-null; do not use this method for the router's SessionKeyManager */
public static int estimateAvailableTags(RouterContext ctx, PublicKey key, Hash local) {
SessionKeyManager skm = ctx.clientManager().getClientSessionKeyManager(local);
if (skm == null)
return 0;
SessionKey curKey = skm.getCurrentKey(key);
@ -75,19 +77,54 @@ public class GarlicMessageBuilder {
return skm.getAvailableTags(key, curKey);
}
public static GarlicMessage buildMessage(RouterContext ctx, GarlicConfig config) {
return buildMessage(ctx, config, new SessionKey(), new HashSet());
/**
* Unused and probably a bad idea.
*
* Used below only on a recursive call if the garlic message contains a garlic message.
* We don't need the SessionKey or SesssionTags returned
* This uses the router's SKM, which is probably not what you want.
* This isn't fully implemented, because the key and tags aren't saved - maybe
* it should force elGamal?
*
* @param ctx scope
* @param config how/what to wrap
*/
private static GarlicMessage buildMessage(RouterContext ctx, GarlicConfig config) {
Log log = ctx.logManager().getLog(GarlicMessageBuilder.class);
log.error("buildMessage 2 args, using router SKM", new Exception("who did it"));
return buildMessage(ctx, config, new SessionKey(), new HashSet(), ctx.sessionKeyManager());
}
public static GarlicMessage buildMessage(RouterContext ctx, GarlicConfig config, SessionKey wrappedKey, Set wrappedTags) {
return buildMessage(ctx, config, wrappedKey, wrappedTags, DEFAULT_TAGS);
/**
* called by OCMJH
*
* @param ctx scope
* @param config how/what to wrap
* @param wrappedKey output parameter that will be filled with the sessionKey used
* @param wrappedTags output parameter that will be filled with the sessionTags used
*/
public static GarlicMessage buildMessage(RouterContext ctx, GarlicConfig config, SessionKey wrappedKey, Set<SessionTag> wrappedTags,
SessionKeyManager skm) {
return buildMessage(ctx, config, wrappedKey, wrappedTags, DEFAULT_TAGS, false, skm);
}
public static GarlicMessage buildMessage(RouterContext ctx, GarlicConfig config, SessionKey wrappedKey, Set wrappedTags, int numTagsToDeliver) {
/** unused */
/***
public static GarlicMessage buildMessage(RouterContext ctx, GarlicConfig config, SessionKey wrappedKey, Set wrappedTags,
int numTagsToDeliver) {
return buildMessage(ctx, config, wrappedKey, wrappedTags, numTagsToDeliver, false);
}
***/
public static GarlicMessage buildMessage(RouterContext ctx, GarlicConfig config, SessionKey wrappedKey, Set wrappedTags, int numTagsToDeliver, boolean forceElGamal) {
/**
* @param ctx scope
* @param config how/what to wrap
* @param wrappedKey output parameter that will be filled with the sessionKey used
* @param wrappedTags output parameter that will be filled with the sessionTags used
* @param numTagsToDeliver only if the estimated available tags are below the threshold
*/
private static GarlicMessage buildMessage(RouterContext ctx, GarlicConfig config, SessionKey wrappedKey, Set<SessionTag> wrappedTags,
int numTagsToDeliver, boolean forceElGamal, SessionKeyManager skm) {
Log log = ctx.logManager().getLog(GarlicMessageBuilder.class);
PublicKey key = config.getRecipientPublicKey();
if (key == null) {
@ -104,14 +141,14 @@ public class GarlicMessageBuilder {
if (log.shouldLog(Log.INFO))
log.info("Encrypted with public key " + key + " to expire on " + new Date(config.getExpiration()));
SessionKey curKey = ctx.sessionKeyManager().getCurrentKey(key);
SessionKey curKey = skm.getCurrentKey(key);
SessionTag curTag = null;
if (curKey == null)
curKey = ctx.sessionKeyManager().createSession(key);
curKey = skm.createSession(key);
if (!forceElGamal) {
curTag = ctx.sessionKeyManager().consumeNextAvailableTag(key, curKey);
curTag = skm.consumeNextAvailableTag(key, curKey);
int availTags = ctx.sessionKeyManager().getAvailableTags(key, curKey);
int availTags = skm.getAvailableTags(key, curKey);
if (log.shouldLog(Log.DEBUG))
log.debug("Available tags for encryption to " + key + ": " + availTags);
@ -120,7 +157,7 @@ public class GarlicMessageBuilder {
wrappedTags.add(new SessionTag(true));
if (log.shouldLog(Log.INFO))
log.info("Too few are available (" + availTags + "), so we're including more");
} else if (ctx.sessionKeyManager().getAvailableTimeLeft(key, curKey) < 60*1000) {
} else if (skm.getAvailableTimeLeft(key, curKey) < 60*1000) {
// if we have enough tags, but they expire in under 30 seconds, we want more
for (int i = 0; i < numTagsToDeliver; i++)
wrappedTags.add(new SessionTag(true));
@ -138,16 +175,19 @@ public class GarlicMessageBuilder {
}
/**
* used by TestJob and directly above
*
* @param ctx scope
* @param config how/what to wrap
* @param wrappedKey output parameter that will be filled with the sessionKey used
* @param wrappedKey unused - why??
* @param wrappedTags output parameter that will be filled with the sessionTags used
* @param target public key of the location being garlic routed to (may be null if we
* know the encryptKey and encryptTag)
* @param encryptKey sessionKey used to encrypt the current message
* @param encryptTag sessionTag used to encrypt the current message
*/
public static GarlicMessage buildMessage(RouterContext ctx, GarlicConfig config, SessionKey wrappedKey, Set wrappedTags, PublicKey target, SessionKey encryptKey, SessionTag encryptTag) {
public static GarlicMessage buildMessage(RouterContext ctx, GarlicConfig config, SessionKey wrappedKey, Set<SessionTag> wrappedTags,
PublicKey target, SessionKey encryptKey, SessionTag encryptTag) {
Log log = ctx.logManager().getLog(GarlicMessageBuilder.class);
if (config == null)
throw new IllegalArgumentException("Null config specified");
@ -209,6 +249,7 @@ public class GarlicMessageBuilder {
cloves[i] = buildClove(ctx, (PayloadGarlicConfig)c);
} else {
log.debug("Subclove IS NOT a payload garlic clove");
// See notes below
cloves[i] = buildClove(ctx, c);
}
if (cloves[i] == null)
@ -242,6 +283,22 @@ public class GarlicMessageBuilder {
return buildCommonClove(ctx, clove, config);
}
/**
* UNUSED
*
* The Garlic Message we are building contains another garlic message,
* as specified by a GarlicConfig (NOT a PayloadGarlicConfig).
*
* So this calls back to the top, to buildMessage(ctx, config),
* which uses the router's SKM, i.e. the wrong one.
* Unfortunately we've lost the reference to the SessionKeyManager way down here,
* so we can't call buildMessage(ctx, config, key, tags, skm).
*
* If we do ever end up constructing a garlic message that contains a garlic message,
* we'll have to fix this by passing the skm through the last buildMessage,
* through buildCloveSet, to here.
*
*/
private static byte[] buildClove(RouterContext ctx, GarlicConfig config) throws DataFormatException, IOException {
GarlicClove clove = new GarlicClove(ctx);
GarlicMessage msg = buildMessage(ctx, config);

View File

@ -10,6 +10,7 @@ package net.i2p.router.message;
import java.util.Date;
import net.i2p.crypto.SessionKeyManager;
import net.i2p.data.Certificate;
import net.i2p.data.DataFormatException;
import net.i2p.data.DataHelper;
@ -32,13 +33,14 @@ public class GarlicMessageParser {
_log = _context.logManager().getLog(GarlicMessageParser.class);
}
public CloveSet getGarlicCloves(GarlicMessage message, PrivateKey encryptionKey) {
/** @param skm use tags from this session key manager */
public CloveSet getGarlicCloves(GarlicMessage message, PrivateKey encryptionKey, SessionKeyManager skm) {
byte encData[] = message.getData();
byte decrData[] = null;
try {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Decrypting with private key " + encryptionKey);
decrData = _context.elGamalAESEngine().decrypt(encData, encryptionKey);
decrData = _context.elGamalAESEngine().decrypt(encData, encryptionKey, skm);
} catch (DataFormatException dfe) {
if (_log.shouldLog(Log.WARN))
_log.warn("Error decrypting", dfe);

View File

@ -8,6 +8,7 @@ package net.i2p.router.message;
*
*/
import net.i2p.crypto.SessionKeyManager;
import net.i2p.data.DataHelper;
import net.i2p.data.Hash;
import net.i2p.data.PrivateKey;
@ -47,13 +48,16 @@ public class GarlicMessageReceiver {
_clientDestination = clientDestination;
_parser = new GarlicMessageParser(context);
_receiver = receiver;
//_log.error("New GMR dest = " + clientDestination);
}
public void receive(GarlicMessage message) {
PrivateKey decryptionKey = null;
SessionKeyManager skm = null;
if (_clientDestination != null) {
LeaseSetKeys keys = _context.keyManager().getKeys(_clientDestination);
if (keys != null) {
skm = _context.clientManager().getClientSessionKeyManager(_clientDestination);
if (keys != null && skm != null) {
decryptionKey = keys.getDecryptionKey();
} else {
if (_log.shouldLog(Log.WARN))
@ -62,9 +66,10 @@ public class GarlicMessageReceiver {
}
} else {
decryptionKey = _context.keyManager().getPrivateKey();
skm = _context.sessionKeyManager();
}
CloveSet set = _parser.getGarlicCloves(message, decryptionKey);
CloveSet set = _parser.getGarlicCloves(message, decryptionKey, skm);
if (set != null) {
for (int i = 0; i < set.getCloveCount(); i++) {
GarlicClove clove = set.getClove(i);

View File

@ -31,14 +31,18 @@ import net.i2p.util.Log;
public class HandleGarlicMessageJob extends JobImpl implements GarlicMessageReceiver.CloveReceiver {
private Log _log;
private GarlicMessage _message;
private RouterIdentity _from;
private Hash _fromHash;
private Map _cloves; // map of clove Id --> Expiration of cloves we've already seen
//private RouterIdentity _from;
//private Hash _fromHash;
//private Map _cloves; // map of clove Id --> Expiration of cloves we've already seen
//private MessageHandler _handler;
private GarlicMessageParser _parser;
//private GarlicMessageParser _parser;
private final static int FORWARD_PRIORITY = 50;
/**
* @param from ignored
* @param fromHash ignored
*/
public HandleGarlicMessageJob(RouterContext context, GarlicMessage msg, RouterIdentity from, Hash fromHash) {
super(context);
_log = context.logManager().getLog(HandleGarlicMessageJob.class);
@ -46,11 +50,11 @@ public class HandleGarlicMessageJob extends JobImpl implements GarlicMessageRece
if (_log.shouldLog(Log.DEBUG))
_log.debug("New handle garlicMessageJob called w/ message from [" + from + "]", new Exception("Debug"));
_message = msg;
_from = from;
_fromHash = fromHash;
_cloves = new HashMap();
//_from = from;
//_fromHash = fromHash;
//_cloves = new HashMap();
//_handler = new MessageHandler(context);
_parser = new GarlicMessageParser(context);
//_parser = new GarlicMessageParser(context);
}
public String getName() { return "Handle Inbound Garlic Message"; }

View File

@ -17,6 +17,7 @@ import net.i2p.data.LeaseSet;
import net.i2p.data.Payload;
import net.i2p.data.PublicKey;
import net.i2p.data.SessionKey;
import net.i2p.data.SessionTag;
import net.i2p.data.TunnelId;
import net.i2p.data.i2np.DataMessage;
import net.i2p.data.i2np.DatabaseStoreMessage;
@ -46,13 +47,15 @@ class OutboundClientMessageJobHelper {
*
* For now, its just a tunneled DeliveryStatusMessage
*
* Unused?
*
* @param bundledReplyLeaseSet if specified, the given LeaseSet will be packaged with the message (allowing
* much faster replies, since their netDb search will return almost instantly)
* @return garlic, or null if no tunnels were found (or other errors)
*/
static GarlicMessage createGarlicMessage(RouterContext ctx, long replyToken, long expiration, PublicKey recipientPK,
Payload data, Hash from, Destination dest, TunnelInfo replyTunnel,
SessionKey wrappedKey, Set wrappedTags,
SessionKey wrappedKey, Set<SessionTag> wrappedTags,
boolean requireAck, LeaseSet bundledReplyLeaseSet) {
PayloadGarlicConfig dataClove = buildDataClove(ctx, data, dest, expiration);
return createGarlicMessage(ctx, replyToken, expiration, recipientPK, dataClove, from, dest, replyTunnel, wrappedKey,
@ -62,15 +65,18 @@ class OutboundClientMessageJobHelper {
* Allow the app to specify the data clove directly, which enables OutboundClientMessage to resend the
* same payload (including expiration and unique id) in different garlics (down different tunnels)
*
* This is called from OCMOSJ
*
* @return garlic, or null if no tunnels were found (or other errors)
*/
static GarlicMessage createGarlicMessage(RouterContext ctx, long replyToken, long expiration, PublicKey recipientPK,
PayloadGarlicConfig dataClove, Hash from, Destination dest, TunnelInfo replyTunnel, SessionKey wrappedKey,
Set wrappedTags, boolean requireAck, LeaseSet bundledReplyLeaseSet) {
Set<SessionTag> wrappedTags, boolean requireAck, LeaseSet bundledReplyLeaseSet) {
GarlicConfig config = createGarlicConfig(ctx, replyToken, expiration, recipientPK, dataClove, from, dest, replyTunnel, requireAck, bundledReplyLeaseSet);
if (config == null)
return null;
GarlicMessage msg = GarlicMessageBuilder.buildMessage(ctx, config, wrappedKey, wrappedTags);
GarlicMessage msg = GarlicMessageBuilder.buildMessage(ctx, config, wrappedKey, wrappedTags,
ctx.clientManager().getClientSessionKeyManager(from));
return msg;
}

View File

@ -10,6 +10,8 @@ import java.util.Map;
import java.util.Properties;
import java.util.Set;
import net.i2p.crypto.SessionKeyManager;
import net.i2p.crypto.TagSetHandle;
import net.i2p.data.Base64;
import net.i2p.data.Certificate;
import net.i2p.data.Destination;
@ -20,6 +22,7 @@ import net.i2p.data.Payload;
import net.i2p.data.PublicKey;
import net.i2p.data.RouterInfo;
import net.i2p.data.SessionKey;
import net.i2p.data.SessionTag;
import net.i2p.data.i2cp.MessageId;
import net.i2p.data.i2np.DataMessage;
import net.i2p.data.i2np.DeliveryInstructions;
@ -471,7 +474,8 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
return;
}
int existingTags = GarlicMessageBuilder.estimateAvailableTags(getContext(), _leaseSet.getEncryptionKey(), _from);
int existingTags = GarlicMessageBuilder.estimateAvailableTags(getContext(), _leaseSet.getEncryptionKey(),
_from.calculateHash());
_outTunnel = selectOutboundTunnel(_to);
// boolean wantACK = _wantACK || existingTags <= 30 || getContext().random().nextInt(100) < 5;
// what's the point of 5% random? possible improvements or replacements:
@ -489,7 +493,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
PublicKey key = _leaseSet.getEncryptionKey();
SessionKey sessKey = new SessionKey();
Set tags = new HashSet();
Set<SessionTag> tags = new HashSet();
// If we want an ack, bundle a leaseSet... (so he can get back to us)
LeaseSet replyLeaseSet = getReplyLeaseSet(wantACK);
// ... and vice versa (so we know he got it)
@ -531,8 +535,16 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
SendTimeoutJob onFail = null;
ReplySelector selector = null;
if (wantACK) {
onReply = new SendSuccessJob(getContext(), sessKey, tags);
onFail = new SendTimeoutJob(getContext());
TagSetHandle tsh = null;
if ( (sessKey != null) && (tags != null) && (tags.size() > 0) ) {
if (_leaseSet != null) {
SessionKeyManager skm = getContext().clientManager().getClientSessionKeyManager(_from.calculateHash());
if (skm != null)
tsh = skm.tagsDelivered(_leaseSet.getEncryptionKey(), sessKey, tags);
}
}
onReply = new SendSuccessJob(getContext(), sessKey, tsh);
onFail = new SendTimeoutJob(getContext(), sessKey, tsh);
selector = new ReplySelector(token);
}
@ -550,9 +562,9 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
+ _lease.getGateway().toBase64());
DispatchJob dispatchJob = new DispatchJob(getContext(), msg, selector, onReply, onFail, (int)(_overallExpiration-getContext().clock().now()));
if (false) // dispatch may take 100+ms, so toss it in its own job
getContext().jobQueue().addJob(dispatchJob);
else
//if (false) // dispatch may take 100+ms, so toss it in its own job
// getContext().jobQueue().addJob(dispatchJob);
//else
dispatchJob.runJob();
} else {
if (_log.shouldLog(Log.WARN))
@ -848,6 +860,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
/** build the payload clove that will be used for all of the messages, placing the clove in the status structure */
private boolean buildClove() {
// FIXME set SKM
PayloadGarlicConfig clove = new PayloadGarlicConfig();
DeliveryInstructions instructions = new DeliveryInstructions();
@ -932,14 +945,14 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
*/
private class SendSuccessJob extends JobImpl implements ReplyJob {
private SessionKey _key;
private Set _tags;
private TagSetHandle _tags;
/**
* Create a new success job that will be fired when the message encrypted with
* the given session key and bearing the specified tags are confirmed delivered.
*
*/
public SendSuccessJob(RouterContext enclosingContext, SessionKey key, Set tags) {
public SendSuccessJob(RouterContext enclosingContext, SessionKey key, TagSetHandle tags) {
super(enclosingContext);
_key = key;
_tags = tags;
@ -955,10 +968,10 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
+ ": SUCCESS! msg " + _clientMessageId
+ " sent after " + sendTime + "ms");
if ( (_key != null) && (_tags != null) && (_tags.size() > 0) ) {
if (_leaseSet != null)
getContext().sessionKeyManager().tagsDelivered(_leaseSet.getEncryptionKey(),
_key, _tags);
if (_key != null && _tags != null && _leaseSet != null) {
SessionKeyManager skm = getContext().clientManager().getClientSessionKeyManager(_from.calculateHash());
if (skm != null)
skm.tagsAcked(_leaseSet.getEncryptionKey(), _key, _tags);
}
long dataMsgId = _cloveId;
@ -994,8 +1007,13 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
*
*/
private class SendTimeoutJob extends JobImpl {
public SendTimeoutJob(RouterContext enclosingContext) {
private SessionKey _key;
private TagSetHandle _tags;
public SendTimeoutJob(RouterContext enclosingContext, SessionKey key, TagSetHandle tags) {
super(enclosingContext);
_key = key;
_tags = tags;
}
public String getName() { return "Send client message timed out"; }
@ -1005,6 +1023,11 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
+ ": Soft timeout through the lease " + _lease);
_lease.setNumFailure(_lease.getNumFailure()+1);
if (_key != null && _tags != null && _leaseSet != null) {
SessionKeyManager skm = getContext().clientManager().getClientSessionKeyManager(_from.calculateHash());
if (skm != null)
skm.failTags(_leaseSet.getEncryptionKey(), _key, _tags);
}
dieFatal();
}
}

View File

@ -0,0 +1,225 @@
package net.i2p.router.networkdb.kademlia;
import java.util.ArrayList;
import java.util.List;
import net.i2p.data.Hash;
import net.i2p.data.i2np.DatabaseLookupMessage;
import net.i2p.data.i2np.DatabaseSearchReplyMessage;
import net.i2p.data.i2np.DatabaseStoreMessage;
import net.i2p.data.i2np.I2NPMessage;
import net.i2p.router.Job;
import net.i2p.router.JobImpl;
import net.i2p.router.MessageSelector;
import net.i2p.router.OutNetMessage;
import net.i2p.router.ReplyJob;
import net.i2p.router.RouterContext;
import net.i2p.router.TunnelInfo;
import net.i2p.util.Log;
/**
* Try sending a search to some floodfill peers, but if we don't get a successful
* match within half the allowed lookup time, give up and start querying through
* the normal (kademlia) channels. This should cut down on spurious lookups caused
* by simple delays in responses from floodfill peers
*
*/
public class FloodSearchJob extends JobImpl {
private Log _log;
private FloodfillNetworkDatabaseFacade _facade;
private Hash _key;
private final List _onFind;
private final List _onFailed;
private long _expiration;
private int _timeoutMs;
private long _origExpiration;
private boolean _isLease;
private volatile int _lookupsRemaining;
private volatile boolean _dead;
public FloodSearchJob(RouterContext ctx, FloodfillNetworkDatabaseFacade facade, Hash key, Job onFind, Job onFailed, int timeoutMs, boolean isLease) {
super(ctx);
_log = ctx.logManager().getLog(FloodSearchJob.class);
_facade = facade;
_key = key;
_onFind = new ArrayList();
_onFind.add(onFind);
_onFailed = new ArrayList();
_onFailed.add(onFailed);
int timeout = -1;
timeout = timeoutMs / FLOOD_SEARCH_TIME_FACTOR;
if (timeout < timeoutMs)
timeout = timeoutMs;
_timeoutMs = timeout;
_expiration = timeout + ctx.clock().now();
_origExpiration = timeoutMs + ctx.clock().now();
_isLease = isLease;
_lookupsRemaining = 0;
_dead = false;
}
void addDeferred(Job onFind, Job onFailed, long timeoutMs, boolean isLease) {
if (_dead) {
getContext().jobQueue().addJob(onFailed);
} else {
if (onFind != null) synchronized (_onFind) { _onFind.add(onFind); }
if (onFailed != null) synchronized (_onFailed) { _onFailed.add(onFailed); }
}
}
public long getExpiration() { return _expiration; }
private static final int CONCURRENT_SEARCHES = 2;
private static final int FLOOD_SEARCH_TIME_FACTOR = 2;
private static final int FLOOD_SEARCH_TIME_MIN = 30*1000;
public void runJob() {
// pick some floodfill peers and send out the searches
List floodfillPeers = _facade.getFloodfillPeers();
FloodLookupSelector replySelector = new FloodLookupSelector(getContext(), this);
ReplyJob onReply = new FloodLookupMatchJob(getContext(), this);
Job onTimeout = new FloodLookupTimeoutJob(getContext(), this);
OutNetMessage out = getContext().messageRegistry().registerPending(replySelector, onReply, onTimeout, _timeoutMs);
for (int i = 0; _lookupsRemaining < CONCURRENT_SEARCHES && i < floodfillPeers.size(); i++) {
Hash peer = (Hash)floodfillPeers.get(i);
if (peer.equals(getContext().routerHash()))
continue;
DatabaseLookupMessage dlm = new DatabaseLookupMessage(getContext(), true);
TunnelInfo replyTunnel = getContext().tunnelManager().selectInboundTunnel();
TunnelInfo outTunnel = getContext().tunnelManager().selectOutboundTunnel();
if ( (replyTunnel == null) || (outTunnel == null) ) {
_dead = true;
List removed = null;
synchronized (_onFailed) {
removed = new ArrayList(_onFailed);
_onFailed.clear();
}
while (removed.size() > 0)
getContext().jobQueue().addJob((Job)removed.remove(0));
getContext().messageRegistry().unregisterPending(out);
return;
}
dlm.setFrom(replyTunnel.getPeer(0));
dlm.setMessageExpiration(getContext().clock().now()+10*1000);
dlm.setReplyTunnel(replyTunnel.getReceiveTunnelId(0));
dlm.setSearchKey(_key);
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": Floodfill search for " + _key.toBase64() + " to " + peer.toBase64());
getContext().tunnelDispatcher().dispatchOutbound(dlm, outTunnel.getSendTunnelId(0), peer);
_lookupsRemaining++;
}
if (_lookupsRemaining <= 0) {
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": Floodfill search for " + _key.toBase64() + " had no peers to send to");
// no floodfill peers, go to the normal ones
getContext().messageRegistry().unregisterPending(out);
_facade.searchFull(_key, _onFind, _onFailed, _timeoutMs*FLOOD_SEARCH_TIME_FACTOR, _isLease);
}
}
public String getName() { return "NetDb search (phase 1)"; }
Hash getKey() { return _key; }
void decrementRemaining() { _lookupsRemaining--; }
int getLookupsRemaining() { return _lookupsRemaining; }
void failed() {
if (_dead) return;
_dead = true;
int timeRemaining = (int)(_origExpiration - getContext().clock().now());
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": Floodfill search for " + _key.toBase64() + " failed with " + timeRemaining);
if (timeRemaining > 0) {
_facade.searchFull(_key, _onFind, _onFailed, timeRemaining, _isLease);
} else {
List removed = null;
synchronized (_onFailed) {
removed = new ArrayList(_onFailed);
_onFailed.clear();
}
while (removed.size() > 0)
getContext().jobQueue().addJob((Job)removed.remove(0));
}
}
void success() {
if (_dead) return;
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": Floodfill search for " + _key.toBase64() + " successful");
_dead = true;
_facade.complete(_key);
List removed = null;
synchronized (_onFind) {
removed = new ArrayList(_onFind);
_onFind.clear();
}
while (removed.size() > 0)
getContext().jobQueue().addJob((Job)removed.remove(0));
}
private static class FloodLookupTimeoutJob extends JobImpl {
private FloodSearchJob _search;
public FloodLookupTimeoutJob(RouterContext ctx, FloodSearchJob job) {
super(ctx);
_search = job;
}
public void runJob() {
_search.decrementRemaining();
if (_search.getLookupsRemaining() <= 0)
_search.failed();
}
public String getName() { return "NetDb search (phase 1) timeout"; }
}
private static class FloodLookupMatchJob extends JobImpl implements ReplyJob {
private Log _log;
private FloodSearchJob _search;
public FloodLookupMatchJob(RouterContext ctx, FloodSearchJob job) {
super(ctx);
_log = ctx.logManager().getLog(FloodLookupMatchJob.class);
_search = job;
}
public void runJob() {
if ( (getContext().netDb().lookupLeaseSetLocally(_search.getKey()) != null) ||
(getContext().netDb().lookupRouterInfoLocally(_search.getKey()) != null) ) {
_search.success();
} else {
int remaining = _search.getLookupsRemaining();
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + "/" + _search.getJobId() + ": got a reply looking for "
+ _search.getKey().toBase64() + ", with " + remaining + " outstanding searches");
// netDb reply pointing us at other people
if (remaining <= 0)
_search.failed();
}
}
public String getName() { return "NetDb search (phase 1) match"; }
public void setMessage(I2NPMessage message) {}
}
private static class FloodLookupSelector implements MessageSelector {
private RouterContext _context;
private FloodSearchJob _search;
public FloodLookupSelector(RouterContext ctx, FloodSearchJob search) {
_context = ctx;
_search = search;
}
public boolean continueMatching() { return _search.getLookupsRemaining() > 0; }
public long getExpiration() { return _search.getExpiration(); }
public boolean isMatch(I2NPMessage message) {
if (message == null) return false;
if (message instanceof DatabaseStoreMessage) {
DatabaseStoreMessage dsm = (DatabaseStoreMessage)message;
// is it worth making sure the reply came in on the right tunnel?
if (_search.getKey().equals(dsm.getKey())) {
_search.decrementRemaining();
return true;
}
} else if (message instanceof DatabaseSearchReplyMessage) {
DatabaseSearchReplyMessage dsrm = (DatabaseSearchReplyMessage)message;
if (_search.getKey().equals(dsrm.getSearchKey())) {
_search.decrementRemaining();
return true;
}
}
return false;
}
}
}

View File

@ -351,210 +351,3 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
}
}
}
/**
* Try sending a search to some floodfill peers, but if we don't get a successful
* match within half the allowed lookup time, give up and start querying through
* the normal (kademlia) channels. This should cut down on spurious lookups caused
* by simple delays in responses from floodfill peers
*
*/
class FloodSearchJob extends JobImpl {
private Log _log;
private FloodfillNetworkDatabaseFacade _facade;
private Hash _key;
private final List _onFind;
private final List _onFailed;
private long _expiration;
private int _timeoutMs;
private long _origExpiration;
private boolean _isLease;
private volatile int _lookupsRemaining;
private volatile boolean _dead;
public FloodSearchJob(RouterContext ctx, FloodfillNetworkDatabaseFacade facade, Hash key, Job onFind, Job onFailed, int timeoutMs, boolean isLease) {
super(ctx);
_log = ctx.logManager().getLog(FloodSearchJob.class);
_facade = facade;
_key = key;
_onFind = new ArrayList();
_onFind.add(onFind);
_onFailed = new ArrayList();
_onFailed.add(onFailed);
int timeout = -1;
timeout = timeoutMs / FLOOD_SEARCH_TIME_FACTOR;
if (timeout < timeoutMs)
timeout = timeoutMs;
_timeoutMs = timeout;
_expiration = timeout + ctx.clock().now();
_origExpiration = timeoutMs + ctx.clock().now();
_isLease = isLease;
_lookupsRemaining = 0;
_dead = false;
}
void addDeferred(Job onFind, Job onFailed, long timeoutMs, boolean isLease) {
if (_dead) {
getContext().jobQueue().addJob(onFailed);
} else {
if (onFind != null) synchronized (_onFind) { _onFind.add(onFind); }
if (onFailed != null) synchronized (_onFailed) { _onFailed.add(onFailed); }
}
}
public long getExpiration() { return _expiration; }
private static final int CONCURRENT_SEARCHES = 2;
private static final int FLOOD_SEARCH_TIME_FACTOR = 2;
private static final int FLOOD_SEARCH_TIME_MIN = 30*1000;
public void runJob() {
// pick some floodfill peers and send out the searches
List floodfillPeers = _facade.getFloodfillPeers();
FloodLookupSelector replySelector = new FloodLookupSelector(getContext(), this);
ReplyJob onReply = new FloodLookupMatchJob(getContext(), this);
Job onTimeout = new FloodLookupTimeoutJob(getContext(), this);
OutNetMessage out = getContext().messageRegistry().registerPending(replySelector, onReply, onTimeout, _timeoutMs);
for (int i = 0; _lookupsRemaining < CONCURRENT_SEARCHES && i < floodfillPeers.size(); i++) {
Hash peer = (Hash)floodfillPeers.get(i);
if (peer.equals(getContext().routerHash()))
continue;
DatabaseLookupMessage dlm = new DatabaseLookupMessage(getContext(), true);
TunnelInfo replyTunnel = getContext().tunnelManager().selectInboundTunnel();
TunnelInfo outTunnel = getContext().tunnelManager().selectOutboundTunnel();
if ( (replyTunnel == null) || (outTunnel == null) ) {
_dead = true;
List removed = null;
synchronized (_onFailed) {
removed = new ArrayList(_onFailed);
_onFailed.clear();
}
while (removed.size() > 0)
getContext().jobQueue().addJob((Job)removed.remove(0));
getContext().messageRegistry().unregisterPending(out);
return;
}
dlm.setFrom(replyTunnel.getPeer(0));
dlm.setMessageExpiration(getContext().clock().now()+10*1000);
dlm.setReplyTunnel(replyTunnel.getReceiveTunnelId(0));
dlm.setSearchKey(_key);
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": Floodfill search for " + _key.toBase64() + " to " + peer.toBase64());
getContext().tunnelDispatcher().dispatchOutbound(dlm, outTunnel.getSendTunnelId(0), peer);
_lookupsRemaining++;
}
if (_lookupsRemaining <= 0) {
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": Floodfill search for " + _key.toBase64() + " had no peers to send to");
// no floodfill peers, go to the normal ones
getContext().messageRegistry().unregisterPending(out);
_facade.searchFull(_key, _onFind, _onFailed, _timeoutMs*FLOOD_SEARCH_TIME_FACTOR, _isLease);
}
}
public String getName() { return "NetDb search (phase 1)"; }
Hash getKey() { return _key; }
void decrementRemaining() { _lookupsRemaining--; }
int getLookupsRemaining() { return _lookupsRemaining; }
void failed() {
if (_dead) return;
_dead = true;
int timeRemaining = (int)(_origExpiration - getContext().clock().now());
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": Floodfill search for " + _key.toBase64() + " failed with " + timeRemaining);
if (timeRemaining > 0) {
_facade.searchFull(_key, _onFind, _onFailed, timeRemaining, _isLease);
} else {
List removed = null;
synchronized (_onFailed) {
removed = new ArrayList(_onFailed);
_onFailed.clear();
}
while (removed.size() > 0)
getContext().jobQueue().addJob((Job)removed.remove(0));
}
}
void success() {
if (_dead) return;
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": Floodfill search for " + _key.toBase64() + " successful");
_dead = true;
_facade.complete(_key);
List removed = null;
synchronized (_onFind) {
removed = new ArrayList(_onFind);
_onFind.clear();
}
while (removed.size() > 0)
getContext().jobQueue().addJob((Job)removed.remove(0));
}
}
class FloodLookupTimeoutJob extends JobImpl {
private FloodSearchJob _search;
public FloodLookupTimeoutJob(RouterContext ctx, FloodSearchJob job) {
super(ctx);
_search = job;
}
public void runJob() {
_search.decrementRemaining();
if (_search.getLookupsRemaining() <= 0)
_search.failed();
}
public String getName() { return "NetDb search (phase 1) timeout"; }
}
class FloodLookupMatchJob extends JobImpl implements ReplyJob {
private Log _log;
private FloodSearchJob _search;
public FloodLookupMatchJob(RouterContext ctx, FloodSearchJob job) {
super(ctx);
_log = ctx.logManager().getLog(FloodLookupMatchJob.class);
_search = job;
}
public void runJob() {
if ( (getContext().netDb().lookupLeaseSetLocally(_search.getKey()) != null) ||
(getContext().netDb().lookupRouterInfoLocally(_search.getKey()) != null) ) {
_search.success();
} else {
int remaining = _search.getLookupsRemaining();
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + "/" + _search.getJobId() + ": got a reply looking for "
+ _search.getKey().toBase64() + ", with " + remaining + " outstanding searches");
// netDb reply pointing us at other people
if (remaining <= 0)
_search.failed();
}
}
public String getName() { return "NetDb search (phase 1) match"; }
public void setMessage(I2NPMessage message) {}
}
class FloodLookupSelector implements MessageSelector {
private RouterContext _context;
private FloodSearchJob _search;
public FloodLookupSelector(RouterContext ctx, FloodSearchJob search) {
_context = ctx;
_search = search;
}
public boolean continueMatching() { return _search.getLookupsRemaining() > 0; }
public long getExpiration() { return _search.getExpiration(); }
public boolean isMatch(I2NPMessage message) {
if (message == null) return false;
if (message instanceof DatabaseStoreMessage) {
DatabaseStoreMessage dsm = (DatabaseStoreMessage)message;
// is it worth making sure the reply came in on the right tunnel?
if (_search.getKey().equals(dsm.getKey())) {
_search.decrementRemaining();
return true;
}
} else if (message instanceof DatabaseSearchReplyMessage) {
DatabaseSearchReplyMessage dsrm = (DatabaseSearchReplyMessage)message;
if (_search.getKey().equals(dsrm.getSearchKey())) {
_search.decrementRemaining();
return true;
}
}
return false;
}
}

View File

@ -1003,7 +1003,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
StringBuilder buf = new StringBuilder(size);
out.write("<h2>Network Database Contents (<a href=\"netdb.jsp?l=1\">View LeaseSets</a>)</h2>\n");
if (!_initialized) {
buf.append("<i>Not initialized</i>\n");
buf.append("Not initialized\n");
out.write(buf.toString());
out.flush();
return;
@ -1052,8 +1052,8 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
buf.append("<tr><th>Version</th><th>Count</th></tr>\n");
for (String routerVersion : versionList) {
int num = versions.count(routerVersion);
buf.append("<tr><td>").append(DataHelper.stripHTML(routerVersion));
buf.append("</td><td align=\"right\">").append(num).append("</td></tr>\n");
buf.append("<tr><td align=\"center\">").append(DataHelper.stripHTML(routerVersion));
buf.append("</td><td align=\"center\">").append(num).append("</td></tr>\n");
}
buf.append("</table>\n");
}
@ -1071,7 +1071,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
buf.append("<tr><td><img height=\"11\" width=\"16\" alt=\"").append(country.toUpperCase()).append("\"");
buf.append(" src=\"/flags.jsp?c=").append(country).append("\"> ");
buf.append(_context.commSystem().getCountryName(country));
buf.append("</td><td align=\"right\">").append(num).append("</td></tr>\n");
buf.append("</td><td align=\"center\">").append(num).append("</td></tr>\n");
}
buf.append("</table>\n");
}
@ -1086,21 +1086,26 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
*/
private void renderRouterInfo(StringBuilder buf, RouterInfo info, boolean isUs, boolean full) {
String hash = info.getIdentity().getHash().toBase64();
buf.append("<a name=\"").append(hash.substring(0, 6)).append("\" ></a>");
buf.append("<table><tr><th><a name=\"").append(hash.substring(0, 6)).append("\" ></a>");
if (isUs) {
buf.append("<a name=\"our-info\" ></a><b>Our info: ").append(hash).append("</b><br>\n");
buf.append("<a name=\"our-info\" ></a><b>Our info: ").append(hash).append("</b></th></tr><tr><td>\n");
} else {
buf.append("<b>Peer info for:</b> ").append(hash).append("<br>\n");
buf.append("<b>Peer info for:</b> ").append(hash).append("\n");
if (full) {
buf.append("[<a href=\"netdb.jsp\" >Back</a>]</th></tr><td>\n");
} else {
buf.append("[<a href=\"netdb.jsp?r=").append(hash.substring(0, 6)).append("\" >Full entry</a>]</th></tr><td>\n");
}
}
long age = _context.clock().now() - info.getPublished();
if (isUs && _context.router().isHidden())
buf.append("Hidden, Updated: <i>").append(DataHelper.formatDuration(age)).append(" ago</i><br>\n");
buf.append("<b>Hidden, Updated:</b> ").append(DataHelper.formatDuration(age)).append(" ago<br>\n");
else if (age > 0)
buf.append("Published: <i>").append(DataHelper.formatDuration(age)).append(" ago</i><br>\n");
buf.append("<b>Published:</b> ").append(DataHelper.formatDuration(age)).append(" ago<br>\n");
else
buf.append("Published: <i>in ").append(DataHelper.formatDuration(0-age)).append("???</i><br>\n");
buf.append("Address(es): <i>");
buf.append("<b>Published:</b> in ").append(DataHelper.formatDuration(0-age)).append("???<br>\n");
buf.append("<b>Address(es):</b> ");
String country = _context.commSystem().getCountry(info.getIdentity().getHash());
if(country != null) {
buf.append("<img height=\"11\" width=\"16\" alt=\"").append(country.toUpperCase()).append("\"");
@ -1115,19 +1120,18 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
buf.append('[').append(DataHelper.stripHTML(name)).append('=').append(DataHelper.stripHTML(val)).append("] ");
}
}
buf.append("</i><br>\n");
buf.append("</td></tr>\n");
if (full) {
buf.append("Stats: <br><i><code>\n");
buf.append("<tr><td>Stats: <br><code>\n");
for (Iterator iter = info.getOptions().keySet().iterator(); iter.hasNext(); ) {
String key = (String)iter.next();
String val = info.getOption(key);
buf.append(DataHelper.stripHTML(key)).append(" = ").append(DataHelper.stripHTML(val)).append("<br>\n");
}
buf.append("</code></i>\n");
buf.append("</code></td></tr>\n");
} else {
buf.append("<a href=\"netdb.jsp?r=").append(hash.substring(0, 6)).append("\" >Full entry</a>\n");
}
buf.append("<hr>\n");
buf.append("</td></tr>\n");
}
}

View File

@ -3,6 +3,7 @@ package net.i2p.router.tunnel.pool;
import java.util.HashSet;
import java.util.Set;
import net.i2p.crypto.SessionKeyManager;
import net.i2p.data.Certificate;
import net.i2p.data.SessionKey;
import net.i2p.data.SessionTag;
@ -28,6 +29,8 @@ class TestJob extends JobImpl {
private TunnelInfo _outTunnel;
private TunnelInfo _replyTunnel;
private PooledTunnelCreatorConfig _otherTunnel;
/** save this so we can tell the SKM to kill it if the test fails */
private SessionTag _encryptTag;
/** base to randomize the test delay on */
private static final int TEST_DELAY = 30*1000;
@ -129,6 +132,7 @@ class TestJob extends JobImpl {
SessionKey encryptKey = getContext().keyGenerator().generateSessionKey();
SessionTag encryptTag = new SessionTag(true);
_encryptTag = encryptTag;
SessionKey sentKey = new SessionKey();
Set sentTags = null;
GarlicMessage msg = GarlicMessageBuilder.buildMessage(getContext(), payload, sentKey, sentTags,
@ -142,7 +146,14 @@ class TestJob extends JobImpl {
}
Set encryptTags = new HashSet(1);
encryptTags.add(encryptTag);
getContext().sessionKeyManager().tagsReceived(encryptKey, encryptTags);
// Register the single tag with the appropriate SKM
if (_cfg.isInbound() && !_pool.getSettings().isExploratory()) {
SessionKeyManager skm = getContext().clientManager().getClientSessionKeyManager(_pool.getSettings().getDestination());
if (skm != null)
skm.tagsReceived(encryptKey, encryptTags);
} else {
getContext().sessionKeyManager().tagsReceived(encryptKey, encryptTags);
}
if (_log.shouldLog(Log.DEBUG))
_log.debug("Sending garlic test of " + _outTunnel + " / " + _replyTunnel);
@ -307,8 +318,17 @@ class TestJob extends JobImpl {
public void runJob() {
if (_log.shouldLog(Log.WARN))
_log.warn("Timeout: found? " + _found, getAddedBy());
if (!_found)
if (!_found) {
// don't clog up the SKM with old one-tag tagsets
if (_cfg.isInbound() && !_pool.getSettings().isExploratory()) {
SessionKeyManager skm = getContext().clientManager().getClientSessionKeyManager(_pool.getSettings().getDestination());
if (skm != null)
skm.consumeTag(_encryptTag);
} else {
getContext().sessionKeyManager().consumeTag(_encryptTag);
}
testFailed(getContext().clock().now() - _started);
}
}
@Override

View File

@ -309,7 +309,8 @@ public class TunnelPoolManager implements TunnelManagerFacade {
void buildComplete(PooledTunnelCreatorConfig cfg) {
//buildComplete();
if (cfg.getLength() > 1 &&
!_context.router().gracefulShutdownInProgress()) {
(!_context.router().gracefulShutdownInProgress()) &&
!Boolean.valueOf(_context.getProperty("router.disableTunnelTesting")).booleanValue()) {
TunnelPool pool = cfg.getTunnelPool();
if (pool == null) {
// never seen this before, do we reallly need to bother