0.5 merging

This commit is contained in:
jrandom
2005-02-16 22:43:00 +00:00
committed by zzz
parent 7ef9ce8cc6
commit 6e8e77b9ec
55 changed files with 3695 additions and 6875 deletions

View File

@ -1,83 +0,0 @@
package net.i2p.router;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
import net.i2p.data.Hash;
import net.i2p.data.RouterIdentity;
import net.i2p.data.i2np.I2NPMessage;
/**
* Wrap an I2NP message received from the network prior to handling and processing.
*
*/
public class InNetMessage {
private RouterContext _context;
private I2NPMessage _message;
private RouterIdentity _fromRouter;
private Hash _fromRouterHash;
private long _created;
public InNetMessage(RouterContext context) {
_context = context;
setMessage(null);
setFromRouter(null);
setFromRouterHash(null);
context.messageStateMonitor().inboundMessageAdded();
_created = context.clock().now();
_context.statManager().createRateStat("inNetMessage.timeToDiscard",
"How long until we discard an inbound msg?",
"InNetMessage", new long[] { 5*60*1000, 30*60*1000, 60*60*1000 });
}
/**
* Retrieve the message
*
*/
public I2NPMessage getMessage() { return _message; }
public void setMessage(I2NPMessage msg) { _message = msg; }
/**
* Hash of the router identity from which this message was received, if availale
*
*/
public Hash getFromRouterHash() { return _fromRouterHash; }
public void setFromRouterHash(Hash routerIdentHash) { _fromRouterHash = routerIdentHash; }
/**
* Router identity from which this message was received, if availale
*
*/
public RouterIdentity getFromRouter() { return _fromRouter; }
public void setFromRouter(RouterIdentity router) { _fromRouter = router; }
/**
* Call this after we're done dealing with this message (when we no
* longer need its data)
*
*/
public void processingComplete() {
_message = null;
_context.messageStateMonitor().inboundMessageRead();
long timeToDiscard = _context.clock().now() - _created;
_context.statManager().addRateData("inNetMessage.timeToDiscard",
timeToDiscard, timeToDiscard);
}
public void finalize() {
_context.messageStateMonitor().inboundMessageFinalized();
}
public String toString() {
StringBuffer buf = new StringBuffer(512);
buf.append("InNetMessage: from [").append(getFromRouter());
buf.append("] aka [").append(getFromRouterHash());
buf.append("] message: ").append(getMessage());
return buf.toString();
}
}

View File

@ -0,0 +1,195 @@
package net.i2p.router;
import java.util.Iterator;
import java.util.Properties;
import net.i2p.data.Hash;
/**
* Wrap up the settings for a pool of tunnels (duh)
*
*/
public class TunnelPoolSettings {
private Hash _destination;
private String _destinationNickname;
private int _quantity;
private int _backupQuantity;
private int _rebuildPeriod;
private int _duration;
private int _length;
private int _lengthVariance;
private boolean _isInbound;
private boolean _isExploratory;
private boolean _allowZeroHop;
private Properties _unknownOptions;
/** prefix used to override the router's defaults for clients */
public static final String PREFIX_DEFAULT = "router.defaultPool.";
/** prefix used to configure the inbound exploratory pool */
public static final String PREFIX_INBOUND_EXPLORATORY = "router.inboundPool.";
/** prefix used to configure the outbound exploratory pool */
public static final String PREFIX_OUTBOUND_EXPLORATORY = "router.outboundPool.";
public static final String PROP_NICKNAME = "nickname";
public static final String PROP_QUANTITY = "quantity";
public static final String PROP_BACKUP_QUANTITY = "backupQuantity";
public static final String PROP_REBUILD_PERIOD = "rebuildPeriod";
public static final String PROP_DURATION = "duration";
public static final String PROP_LENGTH = "length";
public static final String PROP_LENGTH_VARIANCE = "lengthVariance";
public static final String PROP_ALLOW_ZERO_HOP = "allowZeroHop";
public static final int DEFAULT_QUANTITY = 2;
public static final int DEFAULT_BACKUP_QUANTITY = 0;
public static final int DEFAULT_REBUILD_PERIOD = 60*1000;
public static final int DEFAULT_DURATION = 10*60*1000;
public static final int DEFAULT_LENGTH = 2;
public static final int DEFAULT_LENGTH_VARIANCE = -1;
public static final boolean DEFAULT_ALLOW_ZERO_HOP = true;
public TunnelPoolSettings() {
_quantity = DEFAULT_QUANTITY;
_backupQuantity = DEFAULT_BACKUP_QUANTITY;
_rebuildPeriod = DEFAULT_REBUILD_PERIOD;
_duration = DEFAULT_DURATION;
_length = DEFAULT_LENGTH;
_lengthVariance = DEFAULT_LENGTH_VARIANCE;
_allowZeroHop = DEFAULT_ALLOW_ZERO_HOP;
_isInbound = false;
_isExploratory = false;
_destination = null;
_destinationNickname = null;
_unknownOptions = new Properties();
}
/** how many tunnels should be available at all times */
public int getQuantity() { return _quantity; }
public void setQuantity(int quantity) { _quantity = quantity; }
/** how many backup tunnels should be kept waiting in the wings */
public int getBackupQuantity() { return _backupQuantity; }
public void setBackupQuantity(int quantity) { _backupQuantity = quantity; }
/** how long before tunnel expiration should new tunnels be built */
public int getRebuildPeriod() { return _rebuildPeriod; }
public void setRebuildPeriod(int periodMs) { _rebuildPeriod = periodMs; }
/** how many remote hops should be in the tunnel */
public int getLength() { return _length; }
public void setLength(int length) { _length = length; }
/** if there are no tunnels to build with, will this pool allow 0 hop tunnels? */
public boolean getAllowZeroHop() { return _allowZeroHop; }
public void setAllowZeroHop(boolean ok) { _allowZeroHop = ok; }
/**
* how should the length be varied. if negative, this randomly skews from
* (length - variance) to (length + variance), or if positive, from length
* to (length + variance), inclusive.
*
*/
public int getLengthVariance() { return _lengthVariance; }
public void setLengthVariance(int variance) { _lengthVariance = variance; }
/** is this an inbound tunnel? */
public boolean isInbound() { return _isInbound; }
public void setIsInbound(boolean isInbound) { _isInbound = isInbound; }
/** is this an exploratory tunnel (or a client tunnel) */
public boolean isExploratory() { return _isExploratory; }
public void setIsExploratory(boolean isExploratory) { _isExploratory = isExploratory; }
public int getDuration() { return _duration; }
public void setDuration(int ms) { _duration = ms; }
/** what destination is this a tunnel for (or null if none) */
public Hash getDestination() { return _destination; }
public void setDestination(Hash dest) { _destination = dest; }
/** what user supplied name was given to the client connected (can be null) */
public String getDestinationNickname() { return _destinationNickname; }
public void setDestinationNickname(String name) { _destinationNickname = name; }
public Properties getUnknownOptions() { return _unknownOptions; }
public void readFromProperties(String prefix, Properties props) {
for (Iterator iter = props.keySet().iterator(); iter.hasNext(); ) {
String name = (String)iter.next();
String value = props.getProperty(name);
if (name.startsWith(prefix)) {
if (name.equalsIgnoreCase(prefix + PROP_ALLOW_ZERO_HOP))
_allowZeroHop = getBoolean(value, DEFAULT_ALLOW_ZERO_HOP);
else if (name.equalsIgnoreCase(prefix + PROP_BACKUP_QUANTITY))
_backupQuantity = getInt(value, DEFAULT_BACKUP_QUANTITY);
else if (name.equalsIgnoreCase(prefix + PROP_DURATION))
_duration = getInt(value, DEFAULT_DURATION);
else if (name.equalsIgnoreCase(prefix + PROP_LENGTH))
_length = getInt(value, DEFAULT_LENGTH);
else if (name.equalsIgnoreCase(prefix + PROP_LENGTH_VARIANCE))
_lengthVariance = getInt(value, DEFAULT_LENGTH_VARIANCE);
else if (name.equalsIgnoreCase(prefix + PROP_QUANTITY))
_quantity = getInt(value, DEFAULT_QUANTITY);
else if (name.equalsIgnoreCase(prefix + PROP_REBUILD_PERIOD))
_rebuildPeriod = getInt(value, DEFAULT_REBUILD_PERIOD);
else if (name.equalsIgnoreCase(prefix + PROP_NICKNAME))
_destinationNickname = value;
else
_unknownOptions.setProperty(name.substring((prefix != null ? prefix.length() : 0)), value);
}
}
}
public void writeToProperties(String prefix, Properties props) {
if (props == null) return;
props.setProperty(prefix + PROP_ALLOW_ZERO_HOP, ""+_allowZeroHop);
props.setProperty(prefix + PROP_BACKUP_QUANTITY, ""+_backupQuantity);
props.setProperty(prefix + PROP_DURATION, ""+_duration);
props.setProperty(prefix + PROP_LENGTH, ""+_length);
props.setProperty(prefix + PROP_LENGTH_VARIANCE, ""+_lengthVariance);
if (_destinationNickname != null)
props.setProperty(prefix + PROP_NICKNAME, ""+_destinationNickname);
props.setProperty(prefix + PROP_QUANTITY, ""+_quantity);
props.setProperty(prefix + PROP_REBUILD_PERIOD, ""+_rebuildPeriod);
for (Iterator iter = _unknownOptions.keySet().iterator(); iter.hasNext(); ) {
String name = (String)iter.next();
String val = _unknownOptions.getProperty(name);
props.setProperty(prefix + name, val);
}
}
public String toString() {
StringBuffer buf = new StringBuffer();
Properties p = new Properties();
writeToProperties("", p);
buf.append("Tunnel pool settings:\n");
buf.append("====================================\n");
for (Iterator iter = p.keySet().iterator(); iter.hasNext(); ) {
String name = (String)iter.next();
String val = p.getProperty(name);
buf.append(name).append(" = [").append(val).append("]\n");
}
buf.append("is inbound? ").append(_isInbound).append("\n");
buf.append("is exploratory? ").append(_isExploratory).append("\n");
buf.append("====================================\n");
return buf.toString();
}
////
////
private static final boolean getBoolean(String str, boolean defaultValue) {
if (str == null) return defaultValue;
boolean v = "TRUE".equalsIgnoreCase(str) || "YES".equalsIgnoreCase(str);
return v;
}
private static final int getInt(String str, int defaultValue) { return (int)getLong(str, defaultValue); }
private static final long getLong(String str, long defaultValue) {
if (str == null) return defaultValue;
try {
long val = Long.parseLong(str);
return val;
} catch (NumberFormatException nfe) {
return defaultValue;
}
}
}

View File

@ -0,0 +1,115 @@
package net.i2p.router.message;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
import net.i2p.data.DataHelper;
import net.i2p.data.Hash;
import net.i2p.data.PrivateKey;
import net.i2p.data.RouterIdentity;
import net.i2p.data.i2np.DeliveryInstructions;
import net.i2p.data.i2np.GarlicClove;
import net.i2p.data.i2np.GarlicMessage;
import net.i2p.data.i2np.I2NPMessage;
import net.i2p.router.JobImpl;
import net.i2p.router.LeaseSetKeys;
import net.i2p.router.Router;
import net.i2p.router.RouterContext;
import net.i2p.util.Log;
/**
* Unencrypt a garlic message and pass off any valid cloves to the configured
* receiver to dispatch as they choose.
*
*/
public class GarlicMessageReceiver {
private RouterContext _context;
private Log _log;
private CloveReceiver _receiver;
private Hash _clientDestination;
private GarlicMessageParser _parser;
private final static int FORWARD_PRIORITY = 50;
public interface CloveReceiver {
public void handleClove(DeliveryInstructions instructions, I2NPMessage data);
}
public GarlicMessageReceiver(RouterContext context, CloveReceiver receiver) {
this(context, receiver, null);
}
public GarlicMessageReceiver(RouterContext context, CloveReceiver receiver, Hash clientDestination) {
_context = context;
_log = context.logManager().getLog(GarlicMessageReceiver.class);
_context.statManager().createRateStat("crypto.garlic.decryptFail", "How often garlic messages are undecryptable", "Encryption", new long[] { 5*60*1000, 60*60*1000, 24*60*60*1000 });
_clientDestination = clientDestination;
_parser = new GarlicMessageParser(context);
_receiver = receiver;
}
public void receive(GarlicMessage message) {
PrivateKey decryptionKey = null;
if (_clientDestination != null) {
LeaseSetKeys keys = _context.keyManager().getKeys(_clientDestination);
if (keys != null) {
decryptionKey = keys.getDecryptionKey();
} else {
if (_log.shouldLog(Log.WARN))
_log.warn("Not trying to decrypt a garlic routed message to a disconnected client");
return;
}
} else {
decryptionKey = _context.keyManager().getPrivateKey();
}
CloveSet set = _parser.getGarlicCloves(message, decryptionKey);
if (set != null) {
for (int i = 0; i < set.getCloveCount(); i++) {
GarlicClove clove = set.getClove(i);
handleClove(clove);
}
} else {
if (_log.shouldLog(Log.ERROR))
_log.error("CloveMessageParser failed to decrypt the message [" + message.getUniqueId()
+ "]", new Exception("Decrypt garlic failed"));
_context.statManager().addRateData("crypto.garlic.decryptFail", 1, 0);
_context.messageHistory().messageProcessingError(message.getUniqueId(),
message.getClass().getName(),
"Garlic could not be decrypted");
}
}
/**
* Validate and pass off any valid cloves to the receiver
*
*/
private void handleClove(GarlicClove clove) {
if (!isValid(clove)) {
if (_log.shouldLog(Log.DEBUG))
_log.warn("Invalid clove " + clove);
return;
}
if (_receiver != null)
_receiver.handleClove(clove.getInstructions(), clove.getData());
}
private boolean isValid(GarlicClove clove) {
boolean valid = _context.messageValidator().validateMessage(clove.getCloveId(),
clove.getExpiration().getTime());
if (!valid) {
String howLongAgo = DataHelper.formatDuration(_context.clock().now()-clove.getExpiration().getTime());
if (_log.shouldLog(Log.ERROR))
_log.error("Clove is NOT valid: id=" + clove.getCloveId()
+ " expiration " + howLongAgo + " ago", new Exception("Invalid within..."));
_context.messageHistory().messageProcessingError(clove.getCloveId(),
clove.getData().getClass().getName(),
"Clove is not valid (expiration " + howLongAgo + " ago)");
}
return valid;
}
}

View File

@ -1,672 +0,0 @@
package net.i2p.router.message;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.util.List;
import net.i2p.data.DataFormatException;
import net.i2p.data.DataHelper;
import net.i2p.data.Destination;
import net.i2p.data.Hash;
import net.i2p.data.Payload;
import net.i2p.data.RouterIdentity;
import net.i2p.data.SessionKey;
import net.i2p.data.TunnelId;
import net.i2p.data.i2np.DataMessage;
import net.i2p.data.i2np.DeliveryInstructions;
import net.i2p.data.i2np.I2NPMessage;
import net.i2p.data.i2np.I2NPMessageException;
import net.i2p.data.i2np.I2NPMessageHandler;
import net.i2p.data.i2np.TunnelMessage;
import net.i2p.data.i2np.TunnelVerificationStructure;
import net.i2p.router.ClientMessage;
import net.i2p.router.InNetMessage;
import net.i2p.router.Job;
import net.i2p.router.JobImpl;
import net.i2p.router.MessageReceptionInfo;
import net.i2p.router.MessageSelector;
import net.i2p.router.ReplyJob;
import net.i2p.router.Router;
import net.i2p.router.RouterContext;
import net.i2p.router.TunnelInfo;
import net.i2p.router.TunnelSelectionCriteria;
import net.i2p.util.Log;
public class HandleTunnelMessageJob extends JobImpl {
private Log _log;
private TunnelMessage _message;
private RouterIdentity _from;
private Hash _fromHash;
private I2NPMessageHandler _handler;
private final static int FORWARD_TIMEOUT = 60*1000;
private final static int FORWARD_PRIORITY = 400;
public HandleTunnelMessageJob(RouterContext ctx, TunnelMessage msg, RouterIdentity from, Hash fromHash) {
super(ctx);
_log = ctx.logManager().getLog(HandleTunnelMessageJob.class);
_handler = new I2NPMessageHandler(ctx);
ctx.statManager().createRateStat("tunnel.unknownTunnelTimeLeft", "How much time is left on tunnel messages we receive that are for unknown tunnels?", "Tunnels", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("tunnel.gatewayMessageSize", "How large are the messages we are forwarding on as an inbound gateway?", "Tunnels", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("tunnel.relayMessageSize", "How large are the messages we are forwarding on as a participant in a tunnel?", "Tunnels", new long[] { 60*1000l, 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("tunnel.endpointMessageSize", "How large are the messages we are forwarding in as an outbound endpoint?", "Tunnels", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("tunnel.expiredAfterAcceptTime", "How long after expiration do we finally start running an expired tunnel message?", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
_message = msg;
_from = from;
_fromHash = fromHash;
}
private TunnelInfo validate(TunnelId id) {
long excessLag = getContext().clock().now() - _message.getMessageExpiration().getTime();
if (excessLag > Router.CLOCK_FUDGE_FACTOR) {
// expired while on the queue
if (_log.shouldLog(Log.WARN))
_log.warn("Accepted message (" + _message.getUniqueId() + ") expired on the queue for tunnel "
+ id.getTunnelId() + " expiring "
+ excessLag
+ "ms ago");
getContext().statManager().addRateData("tunnel.expiredAfterAcceptTime", excessLag, excessLag);
getContext().messageHistory().messageProcessingError(_message.getUniqueId(),
TunnelMessage.class.getName(),
"tunnel message expired on the queue");
return null;
} else if (excessLag > 0) {
// almost expired while on the queue
if (_log.shouldLog(Log.WARN))
_log.warn("Accepted message (" + _message.getUniqueId() + ") *almost* expired on the queue for tunnel "
+ id.getTunnelId() + " expiring "
+ excessLag
+ "ms ago");
} else {
// not expired
}
TunnelInfo info = getContext().tunnelManager().getTunnelInfo(id);
if (info == null) {
Hash from = _fromHash;
if (_from != null)
from = _from.getHash();
getContext().messageHistory().droppedTunnelMessage(id, _message.getUniqueId(),
_message.getMessageExpiration(),
from);
if (_log.shouldLog(Log.ERROR))
_log.error("Received a message for an unknown tunnel [" + id.getTunnelId()
+ "], dropping it: " + _message, getAddedBy());
long timeRemaining = _message.getMessageExpiration().getTime() - getContext().clock().now();
getContext().statManager().addRateData("tunnel.unknownTunnelTimeLeft", timeRemaining, 0);
long lag = getTiming().getActualStart() - getTiming().getStartAfter();
if (_log.shouldLog(Log.ERROR))
_log.error("Lag processing a dropped tunnel message: " + lag);
return null;
}
info = getUs(info);
if (info == null) {
if (_log.shouldLog(Log.ERROR))
_log.error("We are not part of a known tunnel?? wtf! drop.", getAddedBy());
long timeRemaining = _message.getMessageExpiration().getTime() - getContext().clock().now();
getContext().statManager().addRateData("tunnel.unknownTunnelTimeLeft", timeRemaining, 0);
return null;
}
return info;
}
/**
* The current router may be the gateway to the tunnel since there is no
* verification data, or it could be a b0rked message.
*
*/
private void receiveUnverified(TunnelInfo info) {
if (info.getSigningKey() != null) {
if (info.getNextHop() != null) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("We are the gateway to tunnel " + info.getTunnelId().getTunnelId());
byte data[] = _message.getData();
I2NPMessage msg = getBody(data);
getContext().jobQueue().addJob(new HandleGatewayMessageJob(getContext(), msg, info, data.length));
return;
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("We are the gateway and the endpoint for tunnel " + info.getTunnelId().getTunnelId());
if (_log.shouldLog(Log.DEBUG))
_log.debug("Process locally");
if (info.getDestination() != null) {
if (!getContext().clientManager().isLocal(info.getDestination())) {
if (_log.shouldLog(Log.WARN))
_log.warn("Received a message on a tunnel allocated to a client that has disconnected - dropping it!");
if (_log.shouldLog(Log.DEBUG))
_log.debug("Dropping message for disconnected client: " + _message);
getContext().messageHistory().droppedOtherMessage(_message);
getContext().messageHistory().messageProcessingError(_message.getUniqueId(),
_message.getClass().getName(),
"Disconnected client");
return;
}
}
I2NPMessage body = getBody(_message.getData());
if (body != null) {
getContext().jobQueue().addJob(new HandleLocallyJob(getContext(), body, info));
return;
} else {
if (_log.shouldLog(Log.ERROR))
_log.error("Body is null! content of message.getData() = [" +
DataHelper.toString(_message.getData()) + "]", getAddedBy());
if (_log.shouldLog(Log.DEBUG))
_log.debug("Message that failed: " + _message, getAddedBy());
return;
}
}
} else {
if (_log.shouldLog(Log.ERROR))
_log.error("Received a message that we are not the gateway for on tunnel "
+ info.getTunnelId().getTunnelId()
+ " without a verification structure: " + _message, getAddedBy());
return;
}
}
/**
* We may be a participant in the tunnel, as there is a verification structure.
*
*/
private void receiveParticipant(TunnelInfo info) {
// participant
TunnelVerificationStructure struct = _message.getVerificationStructure();
boolean ok = struct.verifySignature(getContext(), info.getVerificationKey().getKey());
if (!ok) {
if (_log.shouldLog(Log.WARN))
_log.warn("Failed tunnel verification! Spoofing / tagging attack? " + _message, getAddedBy());
return;
} else {
if (info.getNextHop() != null) {
if (_log.shouldLog(Log.INFO))
_log.info("Message for tunnel " + info.getTunnelId().getTunnelId()
+ " received where we're not the gateway and there are remaining hops, so forward it on to "
+ info.getNextHop().toBase64() + " via SendTunnelMessageJob");
getContext().statManager().addRateData("tunnel.relayMessageSize",
_message.getData().length, 0);
TunnelMessage msg = new TunnelMessage(getContext());
msg.setData(_message.getData());
msg.setEncryptedDeliveryInstructions(_message.getEncryptedDeliveryInstructions());
msg.setTunnelId(info.getNextHopId());
msg.setVerificationStructure(_message.getVerificationStructure());
msg.setMessageExpiration(_message.getMessageExpiration());
int timeoutMs = (int)(_message.getMessageExpiration().getTime() - getContext().clock().now());
timeoutMs += Router.CLOCK_FUDGE_FACTOR;
if (timeoutMs < 1000) {
if (_log.shouldLog(Log.ERROR))
_log.error("Message " + _message.getUniqueId() + " is valid and we would pass it on through tunnel "
+ info.getTunnelId().getTunnelId() + ", but its too late (expired " + timeoutMs + "ms ago)");
return;
}
SendMessageDirectJob j = new SendMessageDirectJob(getContext(), msg,
info.getNextHop(),
timeoutMs,
FORWARD_PRIORITY);
getContext().jobQueue().addJob(j);
return;
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("No more hops, unwrap and follow the instructions");
getContext().jobQueue().addJob(new HandleEndpointJob(getContext(), info));
return;
}
}
}
public String getName() { return "Handle Inbound Tunnel Message"; }
public void runJob() {
TunnelId id = _message.getTunnelId();
TunnelInfo info = validate(id);
if (info == null)
return;
info.messageProcessed(_message.getMessageSize());
//if ( (_message.getVerificationStructure() == null) && (info.getSigningKey() != null) ) {
if (_message.getVerificationStructure() == null) {
receiveUnverified(info);
} else {
receiveParticipant(info);
}
}
private void processLocally(TunnelInfo ourPlace) {
if (ourPlace.getEncryptionKey() == null) {
if (_log.shouldLog(Log.ERROR))
_log.error("Argh, somehow we don't have the decryption key and we have no more steps", getAddedBy());
return;
}
DeliveryInstructions instructions = getInstructions(_message.getEncryptedDeliveryInstructions(),
ourPlace.getEncryptionKey().getKey());
if (instructions == null) {
if (_log.shouldLog(Log.ERROR))
_log.error("We are the endpoint of a non-zero length tunnel and we don't have instructions. DROP.", getAddedBy());
return;
} else {
I2NPMessage body = null;
if (instructions.getEncrypted()) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Body in the tunnel IS encrypted");
body = decryptBody(_message.getData(), instructions.getEncryptionKey());
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Body in the tunnel is NOT encrypted: " + instructions
+ "\n" + _message, new Exception("Hmmm..."));
body = getBody(_message.getData());
}
if (body == null) {
if (_log.shouldLog(Log.ERROR))
_log.error("Unable to recover the body from the tunnel", getAddedBy());
return;
} else {
getContext().jobQueue().addJob(new ProcessBodyLocallyJob(getContext(), body, instructions, ourPlace));
}
}
}
private void honorInstructions(DeliveryInstructions instructions, I2NPMessage body) {
getContext().statManager().addRateData("tunnel.endpointMessageSize", _message.getData().length, 0);
switch (instructions.getDeliveryMode()) {
case DeliveryInstructions.DELIVERY_MODE_LOCAL:
sendToLocal(body);
break;
case DeliveryInstructions.DELIVERY_MODE_ROUTER:
if (getContext().routerHash().equals(instructions.getRouter())) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Delivery instructions point at a router, but we're that router, so send to local");
sendToLocal(body);
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Delivery instructions point at a router, and we're not that router, so forward it off");
sendToRouter(instructions.getRouter(), body);
}
break;
case DeliveryInstructions.DELIVERY_MODE_TUNNEL:
sendToTunnel(instructions.getRouter(), instructions.getTunnelId(), body);
break;
case DeliveryInstructions.DELIVERY_MODE_DESTINATION:
sendToDest(instructions.getDestination(), body);
break;
}
}
private void sendToDest(Hash dest, I2NPMessage body) {
if (body instanceof DataMessage) {
boolean isLocal = getContext().clientManager().isLocal(dest);
if (isLocal) {
deliverMessage(null, dest, (DataMessage)body);
return;
} else {
if (_log.shouldLog(Log.ERROR))
_log.error("Delivery to remote destinations is not yet supported", getAddedBy());
return;
}
} else {
if (_log.shouldLog(Log.ERROR))
_log.error("Deliver something other than a DataMessage to a Destination? I don't think so.");
return;
}
}
private void sendToTunnel(Hash router, TunnelId id, I2NPMessage body) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Sending on to requested tunnel " + id.getTunnelId() + " on router "
+ router.toBase64());
int timeoutMs = (int)(body.getMessageExpiration().getTime() - getContext().clock().now());
if (timeoutMs < 5000)
timeoutMs = FORWARD_TIMEOUT;
TunnelInfo curInfo = getContext().tunnelManager().getTunnelInfo(_message.getTunnelId());
if (curInfo == null) {
if (_log.shouldLog(Log.WARN))
_log.warn("Tunnel went away (" + _message.getTunnelId() + ")");
return;
}
if (curInfo.getTunnelId().getType() != TunnelId.TYPE_INBOUND) {
// we are not processing a request at the end of an inbound tunnel, so
// there's no reason to hide our location. honor the request directly
TunnelMessage msg = new TunnelMessage(getContext());
msg.setTunnelId(id);
msg.setData(body.toByteArray());
getContext().jobQueue().addJob(new SendMessageDirectJob(getContext(), msg, router, timeoutMs, FORWARD_PRIORITY));
String bodyType = body.getClass().getName();
getContext().messageHistory().wrap(bodyType, body.getUniqueId(), TunnelMessage.class.getName(), msg.getUniqueId());
} else {
// the instructions request that we forward a message remotely from
// the hidden location. honor it by sending it out a tunnel
TunnelId outTunnelId = selectOutboundTunnelId();
if (outTunnelId == null) {
if (_log.shouldLog(Log.WARN))
_log.warn("No outbound tunnels available to forward the message, dropping it");
return;
}
getContext().jobQueue().addJob(new SendTunnelMessageJob(getContext(), body, outTunnelId, router, id,
null, null, null, null, timeoutMs, FORWARD_PRIORITY));
}
}
private TunnelId selectOutboundTunnelId() {
TunnelSelectionCriteria criteria = new TunnelSelectionCriteria();
criteria.setMinimumTunnelsRequired(1);
criteria.setMaximumTunnelsRequired(1);
List ids = getContext().tunnelManager().selectOutboundTunnelIds(criteria);
if ( (ids == null) || (ids.size() <= 0) )
return null;
else
return (TunnelId)ids.get(0);
}
private void sendToRouter(Hash router, I2NPMessage body) {
// TODO: we may want to send it via a tunnel later on, but for now, direct will do.
if (_log.shouldLog(Log.DEBUG))
_log.debug("Sending on to requested router " + router.toBase64());
int timeoutMs = (int)(body.getMessageExpiration().getTime() - getContext().clock().now());
if (timeoutMs < 5000)
timeoutMs = FORWARD_TIMEOUT;
getContext().jobQueue().addJob(new SendMessageDirectJob(getContext(), body, router, timeoutMs, FORWARD_PRIORITY));
}
private void sendToLocal(I2NPMessage body) {
InNetMessage msg = new InNetMessage(getContext());
msg.setMessage(body);
msg.setFromRouter(_from);
msg.setFromRouterHash(_fromHash);
getContext().inNetMessagePool().add(msg);
}
private void deliverMessage(Destination dest, Hash destHash, DataMessage msg) {
boolean valid = getContext().messageValidator().validateMessage(msg.getUniqueId(), msg.getMessageExpiration().getTime());
if (!valid) {
if (_log.shouldLog(Log.WARN))
_log.warn("Duplicate data message received [" + msg.getUniqueId()
+ " expiring on " + msg.getMessageExpiration() + "]");
getContext().messageHistory().droppedOtherMessage(msg);
getContext().messageHistory().messageProcessingError(msg.getUniqueId(), msg.getClass().getName(),
"Duplicate payload");
return;
}
ClientMessage cmsg = new ClientMessage();
Payload payload = new Payload();
payload.setEncryptedData(msg.getData());
MessageReceptionInfo info = new MessageReceptionInfo();
info.setFromPeer(_fromHash);
info.setFromTunnel(_message.getTunnelId());
cmsg.setDestination(dest);
cmsg.setDestinationHash(destHash);
cmsg.setPayload(payload);
cmsg.setReceptionInfo(info);
getContext().messageHistory().receivePayloadMessage(msg.getUniqueId());
// if the destination isn't local, the ClientMessagePool forwards it off as an OutboundClientMessageJob
getContext().clientMessagePool().add(cmsg);
}
private I2NPMessage getBody(byte body[]) {
try {
return _handler.readMessage(body); // new ByteArrayInputStream(body));
} catch (I2NPMessageException ime) {
if (_log.shouldLog(Log.ERROR))
_log.error("Error parsing the message body", ime);
} catch (IOException ioe) {
if (_log.shouldLog(Log.ERROR))
_log.error("Error parsing the message body", ioe);
}
return null;
}
private I2NPMessage decryptBody(byte encryptedMessage[], SessionKey key) {
byte iv[] = new byte[16];
Hash h = getContext().sha().calculateHash(key.getData());
System.arraycopy(h.getData(), 0, iv, 0, iv.length);
byte decrypted[] = getContext().aes().safeDecrypt(encryptedMessage, key, iv);
if (decrypted == null) {
if (_log.shouldLog(Log.ERROR))
_log.error("Error decrypting the message", getAddedBy());
return null;
}
if (decrypted.length <= 0) {
if (_log.shouldLog(Log.ERROR))
_log.error("Received an empty decrypted message? encrypted length: " + encryptedMessage.length, getAddedBy());
return null;
}
return getBody(decrypted);
}
private DeliveryInstructions getInstructions(byte encryptedInstructions[], SessionKey key) {
try {
byte iv[] = new byte[16];
Hash h = getContext().sha().calculateHash(key.getData());
System.arraycopy(h.getData(), 0, iv, 0, iv.length);
byte decrypted[] = getContext().aes().safeDecrypt(encryptedInstructions, key, iv);
if (decrypted == null) {
if (_log.shouldLog(Log.ERROR))
_log.error("Error decrypting the instructions", getAddedBy());
return null;
}
DeliveryInstructions instructions = new DeliveryInstructions();
instructions.readBytes(new ByteArrayInputStream(decrypted));
return instructions;
} catch (DataFormatException dfe) {
if (_log.shouldLog(Log.ERROR))
_log.error("Error parsing the decrypted instructions", dfe);
} catch (IOException ioe) {
if (_log.shouldLog(Log.ERROR))
_log.error("Error reading the decrypted instructions", ioe);
}
return null;
}
private TunnelInfo getUs(TunnelInfo info) {
Hash us = getContext().routerHash();
while (info != null) {
if (us.equals(info.getThisHop()))
return info;
info = info.getNextHopInfo();
}
return null;
}
private boolean validateMessage(TunnelMessage msg, TunnelInfo info) {
TunnelVerificationStructure vstruct = _message.getVerificationStructure();
if (vstruct == null) {
if (_log.shouldLog(Log.WARN))
_log.warn("Verification structure missing. invalid");
return false;
}
if ( (info.getVerificationKey() == null) || (info.getVerificationKey().getKey() == null) ) {
if (_log.shouldLog(Log.ERROR))
_log.error("wtf, no verification key for the tunnel? " + info, getAddedBy());
return false;
}
if (!vstruct.verifySignature(getContext(), info.getVerificationKey().getKey())) {
if (_log.shouldLog(Log.ERROR))
_log.error("Received a tunnel message with an invalid signature!");
// shitlist the sender?
return false;
}
// now validate the message
Hash msgHash = getContext().sha().calculateHash(_message.getData());
if (msgHash.equals(vstruct.getMessageHash())) {
// hash matches. good.
return true;
} else {
if (_log.shouldLog(Log.ERROR))
_log.error("validateMessage: Signed hash does not match real hash. Data has been tampered with!");
// shitlist the sender!
return false;
}
}
public void dropped() {
getContext().messageHistory().messageProcessingError(_message.getUniqueId(), _message.getClass().getName(),
"Dropped due to overload");
}
////
// series of subjobs for breaking this task into smaller components
////
/** we're the gateway, lets deal */
private class HandleGatewayMessageJob extends JobImpl {
private I2NPMessage _body;
private int _length;
private TunnelInfo _info;
public HandleGatewayMessageJob(RouterContext enclosingContext, I2NPMessage body, TunnelInfo tunnel, int length) {
super(enclosingContext);
_body = body;
_length = length;
_info = tunnel;
}
public void runJob() {
RouterContext ctx = HandleTunnelMessageJob.this.getContext();
if (_body != null) {
long expiration = _body.getMessageExpiration().getTime();
long timeout = expiration - ctx.clock().now();
ctx.statManager().addRateData("tunnel.gatewayMessageSize", _length, 0);
if (_log.shouldLog(Log.INFO))
_log.info("Message for tunnel " + _info.getTunnelId()
+ " received at the gateway (us), and since its > 0 length, forward the "
+ _body.getClass().getName() + " message on to "
+ _info.getNextHop().toBase64() + " via SendTunnelMessageJob expiring in "
+ timeout + "ms");
MessageSelector selector = null;
Job onFailure = null;
Job onSuccess = null;
ReplyJob onReply = null;
Hash targetRouter = null;
TunnelId targetTunnelId = null;
SendTunnelMessageJob j = new SendTunnelMessageJob(ctx, _body, _info.getNextHopId(), targetRouter, targetTunnelId, onSuccess, onReply, onFailure, selector, timeout, FORWARD_PRIORITY);
ctx.jobQueue().addJob(j);
} else {
if (_log.shouldLog(Log.WARN))
_log.warn("Body of the message for the tunnel could not be parsed");
if (_log.shouldLog(Log.DEBUG))
_log.debug("Message that failed: " + _message);
}
}
public String getName() { return "Handle Tunnel Message (gateway)"; }
}
/** zero hop tunnel */
private class HandleLocallyJob extends JobImpl {
private I2NPMessage _body;
private TunnelInfo _info;
public HandleLocallyJob(RouterContext enclosingContext, I2NPMessage body, TunnelInfo tunnel) {
super(enclosingContext);
_body = body;
_info = tunnel;
}
public void runJob() {
if (_body instanceof DataMessage) {
// we know where to send it and its something a client can handle, so lets send 'er to the client
if (_log.shouldLog(Log.WARN))
_log.debug("Deliver the message to a local client, as its a payload message and we know the destination");
if (_log.shouldLog(Log.INFO))
_log.info("Message for tunnel " + _info.getTunnelId() + " received at the gateway (us), but its a 0 length tunnel and the message is a DataMessage, so send it to "
+ _info.getDestination().calculateHash().toBase64());
deliverMessage(_info.getDestination(), null, (DataMessage)_body);
} else {
if (_log.shouldLog(Log.INFO))
_log.info("Message for tunnel " + _info.getTunnelId() +
" received at the gateway (us), but its a 0 length tunnel though it is a "
+ _body.getClass().getName() + ", so process it locally");
InNetMessage msg = new InNetMessage(HandleLocallyJob.this.getContext());
msg.setFromRouter(_from);
msg.setFromRouterHash(_fromHash);
msg.setMessage(_body);
HandleLocallyJob.this.getContext().inNetMessagePool().add(msg);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Message added to Inbound network pool for local processing: " + _message);
}
}
public String getName() { return "Handle Tunnel Message (0 hop)"; }
}
/** we're the endpoint of the inbound tunnel */
private class HandleEndpointJob extends JobImpl {
private TunnelInfo _info;
public HandleEndpointJob(RouterContext enclosingContext, TunnelInfo info) {
super(enclosingContext);
_info = info;
}
public void runJob() {
processLocally(_info);
}
public String getName() { return "Handle Tunnel Message (inbound endpoint)"; }
}
/** endpoint of outbound 1+ hop tunnel with instructions */
private class ProcessBodyLocallyJob extends JobImpl {
private I2NPMessage _body;
private TunnelInfo _ourPlace;
private DeliveryInstructions _instructions;
public ProcessBodyLocallyJob(RouterContext enclosingContext, I2NPMessage body, DeliveryInstructions instructions, TunnelInfo ourPlace) {
super(enclosingContext);
_body = body;
_instructions = instructions;
_ourPlace = ourPlace;
}
public void runJob() {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Body read: " + _body);
if ( (_ourPlace.getDestination() != null) && (_body instanceof DataMessage) ) {
// we know where to send it and its something a client can handle, so lets send 'er to the client
if (_log.shouldLog(Log.DEBUG))
_log.debug("Deliver the message to a local client, as its a payload message and we know the destination");
if (_log.shouldLog(Log.INFO))
_log.info("Message for tunnel " + _ourPlace.getTunnelId().getTunnelId()
+ " received where we're the endpoint containing a DataMessage message, so deliver it to "
+ _ourPlace.getDestination().calculateHash().toBase64());
deliverMessage(_ourPlace.getDestination(), null, (DataMessage)_body);
return;
} else {
// Honor the delivery instructions
if (_log.shouldLog(Log.INFO))
_log.info("Message for tunnel " + _ourPlace.getTunnelId().getTunnelId()
+ " received where we're the endpoint containing a "
+ _body.getClass().getName() + " message, so honor the delivery instructions: "
+ _instructions.toString());
honorInstructions(_instructions, _body);
return;
}
}
public String getName() { return "Handle Tunnel Message (outbound endpoint)"; }
}
}

View File

@ -1,201 +0,0 @@
package net.i2p.router.message;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
import java.io.ByteArrayOutputStream;
import java.util.List;
import net.i2p.data.Hash;
import net.i2p.data.Payload;
import net.i2p.data.RouterIdentity;
import net.i2p.data.TunnelId;
import net.i2p.data.i2np.DataMessage;
import net.i2p.data.i2np.DeliveryInstructions;
import net.i2p.data.i2np.I2NPMessage;
import net.i2p.data.i2np.TunnelMessage;
import net.i2p.router.ClientMessage;
import net.i2p.router.InNetMessage;
import net.i2p.router.MessageReceptionInfo;
import net.i2p.router.RouterContext;
import net.i2p.router.TunnelSelectionCriteria;
import net.i2p.util.Log;
/**
* Implement the inbound message processing logic to forward based on delivery instructions and
* send acks.
*
*/
class MessageHandler {
private Log _log;
private RouterContext _context;
public MessageHandler(RouterContext ctx) {
_context = ctx;
_log = _context.logManager().getLog(MessageHandler.class);
}
public void handleMessage(DeliveryInstructions instructions, I2NPMessage message,
long replyId, RouterIdentity from, Hash fromHash,
long expiration, int priority, boolean sendDirect) {
switch (instructions.getDeliveryMode()) {
case DeliveryInstructions.DELIVERY_MODE_LOCAL:
_log.debug("Instructions for LOCAL DELIVERY");
if (message.getType() == DataMessage.MESSAGE_TYPE) {
handleLocalDestination(instructions, message, fromHash);
} else {
handleLocalRouter(message, from, fromHash);
}
break;
case DeliveryInstructions.DELIVERY_MODE_ROUTER:
if (_log.shouldLog(Log.DEBUG))
_log.debug("Instructions for ROUTER DELIVERY to "
+ instructions.getRouter().toBase64());
if (_context.routerHash().equals(instructions.getRouter())) {
handleLocalRouter(message, from, fromHash);
} else {
handleRemoteRouter(message, instructions, expiration, priority);
}
break;
case DeliveryInstructions.DELIVERY_MODE_DESTINATION:
if (_log.shouldLog(Log.DEBUG))
_log.debug("Instructions for DESTINATION DELIVERY to "
+ instructions.getDestination().toBase64());
if (_context.clientManager().isLocal(instructions.getDestination())) {
handleLocalDestination(instructions, message, fromHash);
} else {
_log.error("Instructions requests forwarding on to a non-local destination. Not yet supported");
}
break;
case DeliveryInstructions.DELIVERY_MODE_TUNNEL:
if (_log.shouldLog(Log.DEBUG))
_log.debug("Instructions for TUNNEL DELIVERY to"
+ instructions.getTunnelId().getTunnelId() + " on "
+ instructions.getRouter().toBase64());
handleTunnel(instructions, expiration, message, priority, sendDirect);
break;
default:
_log.error("Message has instructions that are not yet implemented: mode = " + instructions.getDeliveryMode());
}
}
private void handleLocalRouter(I2NPMessage message, RouterIdentity from, Hash fromHash) {
_log.info("Handle " + message.getClass().getName() + " to a local router - toss it on the inbound network pool");
InNetMessage msg = new InNetMessage(_context);
msg.setFromRouter(from);
msg.setFromRouterHash(fromHash);
msg.setMessage(message);
_context.inNetMessagePool().add(msg);
}
private void handleRemoteRouter(I2NPMessage message, DeliveryInstructions instructions,
long expiration, int priority) {
boolean valid = _context.messageValidator().validateMessage(message.getUniqueId(), message.getMessageExpiration().getTime());
if (!valid) {
if (_log.shouldLog(Log.WARN))
_log.warn("Duplicate / expired message received to remote router [" + message.getUniqueId() + " expiring on " + message.getMessageExpiration() + "]");
_context.messageHistory().droppedOtherMessage(message);
_context.messageHistory().messageProcessingError(message.getUniqueId(), message.getClass().getName(), "Duplicate/expired to remote router");
return;
}
if (_log.shouldLog(Log.INFO))
_log.info("Handle " + message.getClass().getName() + " to a remote router "
+ instructions.getRouter().toBase64() + " - fire a SendMessageDirectJob");
int timeoutMs = (int)(expiration-_context.clock().now());
SendMessageDirectJob j = new SendMessageDirectJob(_context, message, instructions.getRouter(), timeoutMs, priority);
_context.jobQueue().addJob(j);
}
private void handleTunnel(DeliveryInstructions instructions, long expiration, I2NPMessage message, int priority, boolean direct) {
Hash to = instructions.getRouter();
long timeoutMs = expiration - _context.clock().now();
TunnelId tunnelId = instructions.getTunnelId();
if (!_context.routerHash().equals(to)) {
// don't validate locally targetted tunnel messages, since then we'd have to tweak
// around message validation thats already in place for SendMessageDirectJob
boolean valid = _context.messageValidator().validateMessage(message.getUniqueId(), message.getMessageExpiration().getTime());
if (!valid) {
if (_log.shouldLog(Log.WARN))
_log.warn("Duplicate / expired tunnel message received [" + message.getUniqueId() + " expiring on " + message.getMessageExpiration() + "]");
_context.messageHistory().droppedOtherMessage(message);
_context.messageHistory().messageProcessingError(message.getUniqueId(),
message.getClass().getName(),
"Duplicate/expired");
return;
}
}
if (direct) {
if (_log.shouldLog(Log.INFO))
_log.info("Handle " + message.getClass().getName() + " to send to remote tunnel "
+ tunnelId.getTunnelId() + " on router " + to.toBase64());
TunnelMessage msg = new TunnelMessage(_context);
msg.setData(message.toByteArray());
msg.setTunnelId(tunnelId);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Placing message of type " + message.getClass().getName()
+ " into the new tunnel message bound for " + tunnelId.getTunnelId()
+ " on " + to.toBase64());
_context.jobQueue().addJob(new SendMessageDirectJob(_context, msg, to, (int)timeoutMs, priority));
String bodyType = message.getClass().getName();
_context.messageHistory().wrap(bodyType, message.getUniqueId(), TunnelMessage.class.getName(), msg.getUniqueId());
} else {
// we received a message with instructions to send it somewhere, but we shouldn't
// expose where we are in the process of honoring it. so, send it out a tunnel
TunnelId outTunnelId = selectOutboundTunnelId();
if (outTunnelId == null) {
if (_log.shouldLog(Log.WARN))
_log.warn("No outbound tunnels available to forward the message, dropping it");
return;
}
_context.jobQueue().addJob(new SendTunnelMessageJob(_context, message, outTunnelId, to, tunnelId,
null, null, null, null, timeoutMs, priority));
}
}
private TunnelId selectOutboundTunnelId() {
TunnelSelectionCriteria criteria = new TunnelSelectionCriteria();
criteria.setMinimumTunnelsRequired(1);
criteria.setMaximumTunnelsRequired(1);
List ids = _context.tunnelManager().selectOutboundTunnelIds(criteria);
if ( (ids == null) || (ids.size() <= 0) )
return null;
else
return (TunnelId)ids.get(0);
}
private void handleLocalDestination(DeliveryInstructions instructions, I2NPMessage message, Hash fromHash) {
boolean valid = _context.messageValidator().validateMessage(message.getUniqueId(), message.getMessageExpiration().getTime());
if (!valid) {
if (_log.shouldLog(Log.WARN))
_log.warn("Duplicate / expired client message received [" + message.getUniqueId() + " expiring on " + message.getMessageExpiration() + "]");
_context.messageHistory().droppedOtherMessage(message);
_context.messageHistory().messageProcessingError(message.getUniqueId(), message.getClass().getName(), "Duplicate/expired client message");
return;
}
if (_log.shouldLog(Log.DEBUG))
_log.debug("Handle " + message.getClass().getName()
+ " to a local destination - build a ClientMessage and pool it");
ClientMessage msg = new ClientMessage();
msg.setDestinationHash(instructions.getDestination());
Payload payload = new Payload();
payload.setEncryptedData(((DataMessage)message).getData());
msg.setPayload(payload);
MessageReceptionInfo info = new MessageReceptionInfo();
info.setFromPeer(fromHash);
msg.setReceptionInfo(info);
_context.messageHistory().receivePayloadMessage(message.getUniqueId());
_context.clientMessagePool().add(msg);
}
}

View File

@ -1,635 +0,0 @@
package net.i2p.router.message;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Properties;
import java.util.Set;
import java.util.TreeMap;
import net.i2p.data.Certificate;
import net.i2p.data.Destination;
import net.i2p.data.Hash;
import net.i2p.data.Lease;
import net.i2p.data.LeaseSet;
import net.i2p.data.Payload;
import net.i2p.data.PublicKey;
import net.i2p.data.SessionKey;
import net.i2p.data.TunnelId;
import net.i2p.data.i2cp.MessageId;
import net.i2p.data.i2np.DataMessage;
import net.i2p.data.i2np.DeliveryInstructions;
import net.i2p.data.i2np.DeliveryStatusMessage;
import net.i2p.data.i2np.GarlicMessage;
import net.i2p.data.i2np.I2NPMessage;
import net.i2p.router.ClientMessage;
import net.i2p.router.JobImpl;
import net.i2p.router.MessageSelector;
import net.i2p.router.ReplyJob;
import net.i2p.router.Router;
import net.i2p.router.RouterContext;
import net.i2p.router.TunnelSelectionCriteria;
import net.i2p.util.Log;
/**
* Send a client message, taking into consideration the fact that there may be
* multiple inbound tunnels that the target provides. This job sends it to one
* of them and if it doesnt get a confirmation within a few seconds (getSendTimeout()),
* it tries the next, continuing on until a confirmation is received, the full
* timeout has been reached (60 seconds, or the ms defined in the client's or
* router's "clientMessageTimeout" option).
*
* After sending through all of the leases without success, if there's still
* time left it fails the leaseSet itself, does a new search for that leaseSet,
* and continues sending down any newly found leases.
*
*/
public class OutboundClientMessageJob extends JobImpl {
private Log _log;
private OutboundClientMessageStatus _status;
private NextStepJob _nextStep;
private LookupLeaseSetFailedJob _lookupLeaseSetFailed;
private long _overallExpiration;
private boolean _shouldBundle;
/**
* final timeout (in milliseconds) that the outbound message will fail in.
* This can be overridden in the router.config or the client's session config
* (the client's session config takes precedence)
*/
public final static String OVERALL_TIMEOUT_MS_PARAM = "clientMessageTimeout";
private final static long OVERALL_TIMEOUT_MS_DEFAULT = 60*1000;
/** how long for each send do we allow before going on to the next? */
private final static long DEFAULT_SEND_PARTIAL_TIMEOUT = 50*1000;
private static final String PROP_SEND_PARTIAL_TIMEOUT = "router.clientPartialSendTimeout";
/** priority of messages, that might get honored some day... */
private final static int SEND_PRIORITY = 500;
/** dont search for the lease more than 6 times */
private final static int MAX_LEASE_LOOKUPS = 6;
/**
* If the client's config specifies shouldBundleReplyInfo=true, messages sent from
* that client to any peers will probabalistically include the sending destination's
* current LeaseSet (allowing the recipient to reply without having to do a full
* netDb lookup). This should improve performance during the initial negotiations,
* but is not necessary for communication that isn't bidirectional.
*
*/
public static final String BUNDLE_REPLY_LEASESET = "shouldBundleReplyInfo";
/**
* Allow the override of the frequency of bundling the reply info in with a message.
* The client app can specify bundleReplyInfoProbability=80 (for instance) and that
* will cause the router to include the sender's leaseSet with 80% of the messages
* sent to the peer.
*
*/
public static final String BUNDLE_PROBABILITY = "bundleReplyInfoProbability";
/**
* How often do messages include the reply leaseSet (out of every 100 tries).
* Including it each time is probably overkill, but who knows.
*/
private static final int BUNDLE_PROBABILITY_DEFAULT = 100;
/**
* Send the sucker
*/
public OutboundClientMessageJob(RouterContext ctx, ClientMessage msg) {
super(ctx);
_log = ctx.logManager().getLog(OutboundClientMessageJob.class);
ctx.statManager().createFrequencyStat("client.sendMessageFailFrequency", "How often does a client fail to send a message?", "ClientMessages", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.sendMessageSize", "How large are messages sent by the client?", "ClientMessages", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.sendAttemptAverage", "How many different tunnels do we have to try when sending a client message?", "ClientMessages", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.sendAckTime", "How long does it take to get an ACK back from a message?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.sendsPerFailure", "How many send attempts do we make when they all fail?", "ClientMessages", new long[] { 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.timeoutCongestionTunnel", "How lagged our tunnels are when a send times out?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.timeoutCongestionMessage", "How fast we process messages locally when a send times out?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.timeoutCongestionInbound", "How much faster we are receiving data than our average bps when a send times out?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
long timeoutMs = OVERALL_TIMEOUT_MS_DEFAULT;
String param = msg.getSenderConfig().getOptions().getProperty(OVERALL_TIMEOUT_MS_PARAM);
if (param == null)
param = ctx.router().getConfigSetting(OVERALL_TIMEOUT_MS_PARAM);
if (param != null) {
try {
timeoutMs = Long.parseLong(param);
} catch (NumberFormatException nfe) {
if (_log.shouldLog(Log.WARN))
_log.warn("Invalid client message timeout specified [" + param
+ "], defaulting to " + OVERALL_TIMEOUT_MS_DEFAULT, nfe);
timeoutMs = OVERALL_TIMEOUT_MS_DEFAULT;
}
}
_overallExpiration = timeoutMs + getContext().clock().now();
_status = new OutboundClientMessageStatus(ctx, msg);
_nextStep = new NextStepJob(getContext());
_lookupLeaseSetFailed = new LookupLeaseSetFailedJob(getContext());
_shouldBundle = getShouldBundle();
}
private long getSendTimeout() {
String timeout = getContext().getProperty(PROP_SEND_PARTIAL_TIMEOUT, ""+DEFAULT_SEND_PARTIAL_TIMEOUT);
try {
return Long.parseLong(timeout);
} catch (NumberFormatException nfe) {
return DEFAULT_SEND_PARTIAL_TIMEOUT;
}
}
public String getName() { return "Outbound client message"; }
public void runJob() {
if (_log.shouldLog(Log.DEBUG))
_log.debug(getJobId() + ": Send outbound client message job beginning");
buildClove();
if (_log.shouldLog(Log.DEBUG))
_log.debug(getJobId() + ": Clove built");
Hash to = _status.getTo().calculateHash();
long timeoutMs = _overallExpiration - getContext().clock().now();
if (_log.shouldLog(Log.DEBUG))
_log.debug(getJobId() + ": Send outbound client message - sending off leaseSet lookup job");
_status.incrementLookups();
getContext().netDb().lookupLeaseSet(to, _nextStep, _lookupLeaseSetFailed, timeoutMs);
}
/**
* Continue on sending through the next tunnel
*/
private void sendNext() {
if (_log.shouldLog(Log.DEBUG)) {
_log.debug(getJobId() + ": sendNext() called with " + _status.getNumSent() + " already sent");
}
if (_status.getSuccess()) {
if (_log.shouldLog(Log.DEBUG))
_log.debug(getJobId() + ": sendNext() - already successful!");
return;
}
if (_status.getFailure()) {
if (_log.shouldLog(Log.DEBUG))
_log.debug(getJobId() + ": sendNext() - already failed!");
return;
}
long now = getContext().clock().now();
if (now >= _overallExpiration) {
if (_log.shouldLog(Log.WARN))
_log.warn(getJobId() + ": sendNext() - Expired");
dieFatal();
return;
}
Lease nextLease = getNextLease();
if (_log.shouldLog(Log.DEBUG))
_log.debug(getJobId() + ": Send outbound client message - next lease found for ["
+ _status.getTo().calculateHash().toBase64() + "] - "
+ nextLease);
if (nextLease == null) {
if (_log.shouldLog(Log.WARN))
_log.warn(getJobId() + ": No more leases, and we still haven't heard back from the peer"
+ ", refetching the leaseSet to try again");
LeaseSet ls = _status.getLeaseSet();
_status.setLeaseSet(null);
long remainingMs = _overallExpiration - getContext().clock().now();
if (_status.getNumLookups() < MAX_LEASE_LOOKUPS) {
_status.incrementLookups();
Hash to = _status.getMessage().getDestination().calculateHash();
_status.clearAlreadySent(); // so we can send down old tunnels again
getContext().netDb().fail(to); // so we don't just fetch what we have
getContext().netDb().lookupLeaseSet(to, _nextStep, _lookupLeaseSetFailed, remainingMs);
if (ls != null)
getContext().jobQueue().addJob(new ShortCircuitSearchJob(getContext(), ls));
return;
} else {
if (_log.shouldLog(Log.WARN))
_log.warn(getJobId() + ": sendNext() - max # lease lookups exceeded! "
+ _status.getNumLookups());
dieFatal();
return;
}
}
getContext().jobQueue().addJob(new SendJob(getContext(), nextLease));
}
private static final long MAX_SEARCH_INTERVAL = 10*1000;
/**
* If the netDb refetch isn't going well, lets fall back on the old leaseSet
* anyway
*
*/
private class ShortCircuitSearchJob extends JobImpl {
private LeaseSet _ls;
public ShortCircuitSearchJob(RouterContext enclosingContext, LeaseSet ls) {
super(enclosingContext);
_ls = ls;
ShortCircuitSearchJob.this.getTiming().setStartAfter(getContext().clock().now() + MAX_SEARCH_INTERVAL);
}
public String getName() { return "Short circuit search"; }
public void runJob() {
LeaseSet ls = getContext().netDb().lookupLeaseSetLocally(_ls.getDestination().calculateHash());
if (ls == null) {
try {
markTimeout();
getContext().netDb().store(_ls.getDestination().calculateHash(), _ls);
} catch (IllegalArgumentException iae) {
// ignore - it expired anyway
}
}
}
}
/**
* fetch the next lease that we should try sending through, or null if there
* are no remaining leases available (or there weren't any in the first place...).
* This implements the logic to determine which lease should be next by picking a
* random one that has been failing the least (e.g. if there are 3 leases in the leaseSet
* and one has failed, the other two are randomly chosen as the 'next')
*
*/
private Lease getNextLease() {
LeaseSet ls = _status.getLeaseSet();
if (ls == null) {
ls = getContext().netDb().lookupLeaseSetLocally(_status.getTo().calculateHash());
if (ls == null) {
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": Lookup locally didn't find the leaseSet");
return null;
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug(getJobId() + ": Lookup locally DID find the leaseSet");
}
_status.setLeaseSet(ls);
}
long now = getContext().clock().now();
// get the possible leases
List leases = new ArrayList(4);
for (int i = 0; i < ls.getLeaseCount(); i++) {
Lease lease = ls.getLease(i);
if (lease.isExpired(Router.CLOCK_FUDGE_FACTOR)) {
if (_log.shouldLog(Log.WARN))
_log.warn(getJobId() + ": getNextLease() - expired lease! - " + lease);
continue;
}
if (!_status.alreadySent(lease.getRouterIdentity().getHash(), lease.getTunnelId())) {
leases.add(lease);
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug(getJobId() + ": getNextLease() - skipping lease we've already sent it down - "
+ lease);
}
}
if (leases.size() <= 0) {
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": No leases found, since we've tried them all (so fail it and relookup)");
return null;
}
// randomize the ordering (so leases with equal # of failures per next
// sort are randomly ordered)
Collections.shuffle(leases);
// ordered by lease number of failures
TreeMap orderedLeases = new TreeMap();
for (Iterator iter = leases.iterator(); iter.hasNext(); ) {
Lease lease = (Lease)iter.next();
long id = lease.getNumFailure();
while (orderedLeases.containsKey(new Long(id)))
id++;
orderedLeases.put(new Long(id), lease);
if (_log.shouldLog(Log.DEBUG))
_log.debug(getJobId() + ": ranking lease we havent sent it down as " + id);
}
return (Lease)orderedLeases.get(orderedLeases.firstKey());
}
private boolean getShouldBundle() {
Properties opts = _status.getMessage().getSenderConfig().getOptions();
String wantBundle = opts.getProperty(BUNDLE_REPLY_LEASESET, "true");
if ("true".equals(wantBundle)) {
int probability = BUNDLE_PROBABILITY_DEFAULT;
String str = opts.getProperty(BUNDLE_PROBABILITY);
try {
if (str != null)
probability = Integer.parseInt(str);
} catch (NumberFormatException nfe) {
if (_log.shouldLog(Log.WARN))
_log.warn(getJobId() + ": Bundle leaseSet probability overridden incorrectly ["
+ str + "]", nfe);
}
if (probability >= getContext().random().nextInt(100))
return true;
else
return false;
} else {
return false;
}
}
/**
* Send the message to the specified tunnel by creating a new garlic message containing
* the (already created) payload clove as well as a new delivery status message. This garlic
* message is sent out one of our tunnels, destined for the lease (tunnel+router) specified, and the delivery
* status message is targetting one of our free inbound tunnels as well. We use a new
* reply selector to keep an eye out for that delivery status message's token
*
*/
private void send(Lease lease) {
long token = getContext().random().nextLong(I2NPMessage.MAX_ID_VALUE);
PublicKey key = _status.getLeaseSet().getEncryptionKey();
SessionKey sessKey = new SessionKey();
Set tags = new HashSet();
LeaseSet replyLeaseSet = null;
if (_shouldBundle) {
replyLeaseSet = getContext().netDb().lookupLeaseSetLocally(_status.getFrom().calculateHash());
}
GarlicMessage msg = OutboundClientMessageJobHelper.createGarlicMessage(getContext(), token,
_overallExpiration, key,
_status.getClove(),
_status.getTo(), sessKey,
tags, true, replyLeaseSet);
if (_log.shouldLog(Log.DEBUG))
_log.debug(getJobId() + ": send(lease) - token expected " + token);
_status.sent(lease.getRouterIdentity().getHash(), lease.getTunnelId());
SendSuccessJob onReply = new SendSuccessJob(getContext(), lease, sessKey, tags);
SendTimeoutJob onFail = new SendTimeoutJob(getContext(), lease);
ReplySelector selector = new ReplySelector(token);
if (_log.shouldLog(Log.DEBUG))
_log.debug(getJobId() + ": Placing GarlicMessage into the new tunnel message bound for "
+ lease.getTunnelId() + " on "
+ lease.getRouterIdentity().getHash().toBase64());
TunnelId outTunnelId = selectOutboundTunnel();
if (outTunnelId != null) {
if (_log.shouldLog(Log.DEBUG))
_log.debug(getJobId() + ": Sending tunnel message out " + outTunnelId + " to "
+ lease.getTunnelId() + " on "
+ lease.getRouterIdentity().getHash().toBase64());
SendTunnelMessageJob j = new SendTunnelMessageJob(getContext(), msg, outTunnelId,
lease.getRouterIdentity().getHash(),
lease.getTunnelId(), null, onReply,
onFail, selector, getSendTimeout(),
SEND_PRIORITY);
getContext().jobQueue().addJob(j);
} else {
if (_log.shouldLog(Log.ERROR))
_log.error(getJobId() + ": Could not find any outbound tunnels to send the payload through... wtf?");
dieFatal();
}
}
/**
* Pick an arbitrary outbound tunnel to send the message through, or null if
* there aren't any around
*
*/
private TunnelId selectOutboundTunnel() {
TunnelSelectionCriteria crit = new TunnelSelectionCriteria();
crit.setMaximumTunnelsRequired(1);
crit.setMinimumTunnelsRequired(1);
List tunnelIds = getContext().tunnelManager().selectOutboundTunnelIds(crit);
if (tunnelIds.size() <= 0)
return null;
else
return (TunnelId)tunnelIds.get(0);
}
/**
* give up the ghost, this message just aint going through. tell the client to fuck off.
*
* this is safe to call multiple times (only tells the client once)
*/
private void dieFatal() {
if (_status.getSuccess()) return;
boolean alreadyFailed = _status.failed();
long sendTime = getContext().clock().now() - _status.getStart();
ClientMessage msg = _status.getMessage();
if (alreadyFailed) {
if (_log.shouldLog(Log.DEBUG))
_log.debug(getJobId() + ": dieFatal() - already failed sending " + msg.getMessageId()
+ ", no need to do it again", new Exception("Duplicate death?"));
return;
} else {
if (_log.shouldLog(Log.WARN))
_log.warn(getJobId() + ": Failed to send the message " + msg.getMessageId() + " after "
+ _status.getNumSent() + " sends and " + _status.getNumLookups()
+ " lookups (and " + sendTime + "ms)",
new Exception("Message send failure"));
}
getContext().messageHistory().sendPayloadMessage(msg.getMessageId().getMessageId(), false, sendTime);
getContext().clientManager().messageDeliveryStatusUpdate(msg.getFromDestination(), msg.getMessageId(), false);
getContext().statManager().updateFrequency("client.sendMessageFailFrequency");
getContext().statManager().addRateData("client.sendAttemptAverage", _status.getNumSent(), sendTime);
getContext().statManager().addRateData("client.sendsPerFailure", _status.getNumSent(), sendTime);
}
/** build the payload clove that will be used for all of the messages, placing the clove in the status structure */
private void buildClove() {
PayloadGarlicConfig clove = new PayloadGarlicConfig();
DeliveryInstructions instructions = new DeliveryInstructions();
instructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_DESTINATION);
instructions.setDestination(_status.getTo().calculateHash());
instructions.setDelayRequested(false);
instructions.setDelaySeconds(0);
instructions.setEncrypted(false);
clove.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null));
clove.setDeliveryInstructions(instructions);
clove.setExpiration(_overallExpiration);
clove.setId(getContext().random().nextLong(I2NPMessage.MAX_ID_VALUE));
DataMessage msg = new DataMessage(getContext());
msg.setData(_status.getMessage().getPayload().getEncryptedData());
clove.setPayload(msg);
clove.setRecipientPublicKey(null);
clove.setRequestAck(false);
_status.setClove(clove);
if (_log.shouldLog(Log.DEBUG))
_log.debug(getJobId() + ": Built payload clove with id " + clove.getId());
}
/**
* a send or netDb lookup timed out, so lets update some stats to help
* determine why
*
*/
private void markTimeout() {
long messageDelay = getContext().throttle().getMessageDelay();
long tunnelLag = getContext().throttle().getTunnelLag();
long inboundDelta = (long)getContext().throttle().getInboundRateDelta();
getContext().statManager().addRateData("client.timeoutCongestionTunnel", tunnelLag, 1);
getContext().statManager().addRateData("client.timeoutCongestionMessage", messageDelay, 1);
getContext().statManager().addRateData("client.timeoutCongestionInbound", inboundDelta, 1);
}
/**
* Keep an eye out for any of the delivery status message tokens that have been
* sent down the various tunnels to deliver this message
*
*/
private class ReplySelector implements MessageSelector {
private long _pendingToken;
public ReplySelector(long token) {
_pendingToken = token;
}
public boolean continueMatching() { return false; }
public long getExpiration() { return _overallExpiration; }
public boolean isMatch(I2NPMessage inMsg) {
if (inMsg.getType() == DeliveryStatusMessage.MESSAGE_TYPE) {
return _pendingToken == ((DeliveryStatusMessage)inMsg).getMessageId();
} else {
return false;
}
}
}
/** queued by the db lookup success and the send timeout to get us to try the next lease */
private class NextStepJob extends JobImpl {
public NextStepJob(RouterContext enclosingContext) {
super(enclosingContext);
}
public String getName() { return "Process next step for outbound client message"; }
public void runJob() { sendNext(); }
}
/**
* we couldn't even find the leaseSet, but try again (or die
* if we've already tried too hard)
*
*/
private class LookupLeaseSetFailedJob extends JobImpl {
public LookupLeaseSetFailedJob(RouterContext enclosingContext) {
super(enclosingContext);
}
public String getName() { return "Lookup for outbound client message failed"; }
public void runJob() {
markTimeout();
sendNext();
}
}
/** send a message to a lease */
private class SendJob extends JobImpl {
private Lease _lease;
public SendJob(RouterContext enclosingContext, Lease lease) {
super(enclosingContext);
_lease = lease;
}
public String getName() { return "Send outbound client message through the lease"; }
public void runJob() { send(_lease); }
}
/**
* Called after we get a confirmation that the message was delivered safely
* (hoo-ray!)
*
*/
private class SendSuccessJob extends JobImpl implements ReplyJob {
private Lease _lease;
private SessionKey _key;
private Set _tags;
/**
* Create a new success job that will be fired when the message encrypted with
* the given session key and bearing the specified tags are confirmed delivered.
*
*/
public SendSuccessJob(RouterContext enclosingContext, Lease lease, SessionKey key, Set tags) {
super(enclosingContext);
_lease = lease;
_key = key;
_tags = tags;
}
public String getName() { return "Send client message successful to a lease"; }
public void runJob() {
long sendTime = getContext().clock().now() - _status.getStart();
boolean alreadySuccessful = _status.success();
MessageId msgId = _status.getMessage().getMessageId();
if (_log.shouldLog(Log.INFO))
_log.info(OutboundClientMessageJob.this.getJobId()
+ ": SUCCESS! msg " + msgId
+ " sent after " + sendTime + "ms after "
+ _status.getNumLookups() + " lookups and "
+ _status.getNumSent() + " sends");
if ( (_key != null) && (_tags != null) && (_tags.size() > 0) ) {
LeaseSet ls = _status.getLeaseSet();
if (ls != null)
getContext().sessionKeyManager().tagsDelivered(ls.getEncryptionKey(),
_key, _tags);
}
if (alreadySuccessful) {
if (_log.shouldLog(Log.DEBUG))
_log.debug(OutboundClientMessageJob.this.getJobId()
+ ": Success is a duplicate for " + _status.getMessage().getMessageId()
+ ", dont notify again...");
return;
}
long dataMsgId = _status.getClove().getId();
getContext().messageHistory().sendPayloadMessage(dataMsgId, true, sendTime);
getContext().clientManager().messageDeliveryStatusUpdate(_status.getFrom(), msgId, true);
_lease.setNumSuccess(_lease.getNumSuccess()+1);
getContext().statManager().addRateData("client.sendAckTime", sendTime, 0);
getContext().statManager().addRateData("client.sendMessageSize", _status.getMessage().getPayload().getSize(), sendTime);
getContext().statManager().addRateData("client.sendAttemptAverage", _status.getNumSent(), sendTime);
}
public void setMessage(I2NPMessage msg) {}
}
/**
* Fired after the basic timeout for sending through the given tunnel has been reached.
* We'll accept successes later, but won't expect them
*
*/
private class SendTimeoutJob extends JobImpl {
private Lease _lease;
public SendTimeoutJob(RouterContext enclosingContext, Lease lease) {
super(enclosingContext);
_lease = lease;
}
public String getName() { return "Send client message timed out through a lease"; }
public void runJob() {
if (_log.shouldLog(Log.DEBUG))
_log.debug(OutboundClientMessageJob.this.getJobId()
+ ": Soft timeout through the lease " + _lease);
markTimeout();
_lease.setNumFailure(_lease.getNumFailure()+1);
sendNext();
}
}
}

View File

@ -1,623 +0,0 @@
package net.i2p.router.message;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.util.Date;
import net.i2p.data.DataFormatException;
import net.i2p.data.DataStructure;
import net.i2p.data.Hash;
import net.i2p.data.Payload;
import net.i2p.data.RouterIdentity;
import net.i2p.data.SessionKey;
import net.i2p.data.TunnelId;
import net.i2p.data.i2np.DataMessage;
import net.i2p.data.i2np.DeliveryInstructions;
import net.i2p.data.i2np.I2NPMessage;
import net.i2p.data.i2np.TunnelMessage;
import net.i2p.data.i2np.TunnelVerificationStructure;
import net.i2p.router.ClientMessage;
import net.i2p.router.InNetMessage;
import net.i2p.router.Job;
import net.i2p.router.JobImpl;
import net.i2p.router.MessageReceptionInfo;
import net.i2p.router.MessageSelector;
import net.i2p.router.OutNetMessage;
import net.i2p.router.ReplyJob;
import net.i2p.router.Router;
import net.i2p.router.RouterContext;
import net.i2p.router.TunnelInfo;
import net.i2p.util.Log;
/**
* Send a message down a tunnel that we are the gateway for
*
*/
public class SendTunnelMessageJob extends JobImpl {
private Log _log;
private I2NPMessage _message;
private Hash _destRouter;
private TunnelId _tunnelId;
private TunnelId _targetTunnelId;
private Job _onSend;
private ReplyJob _onReply;
private Job _onFailure;
private MessageSelector _selector;
private long _timeout;
private long _expiration;
private int _priority;
private int _state;
public SendTunnelMessageJob(RouterContext ctx, I2NPMessage msg, TunnelId tunnelId, Job onSend, ReplyJob onReply, Job onFailure, MessageSelector selector, long timeoutMs, int priority) {
this(ctx, msg, tunnelId, null, null, onSend, onReply, onFailure, selector, timeoutMs, priority);
}
public SendTunnelMessageJob(RouterContext ctx, I2NPMessage msg, TunnelId tunnelId, Hash targetRouter, TunnelId targetTunnelId, Job onSend, ReplyJob onReply, Job onFailure, MessageSelector selector, long timeoutMs, int priority) {
super(ctx);
_state = 0;
_log = ctx.logManager().getLog(SendTunnelMessageJob.class);
if (msg == null)
throw new IllegalArgumentException("wtf, null message? sod off");
if (tunnelId == null)
throw new IllegalArgumentException("wtf, no tunnelId? nuh uh");
_state = 1;
_message = msg;
_destRouter = targetRouter;
_tunnelId = tunnelId;
_targetTunnelId = targetTunnelId;
_onSend = onSend;
_onReply = onReply;
_onFailure = onFailure;
_selector = selector;
_timeout = timeoutMs;
_priority = priority;
if (timeoutMs < 50*1000) {
if (_log.shouldLog(Log.INFO))
_log.info("Sending tunnel message to expire in " + timeoutMs
+ "ms containing " + msg.getUniqueId() + " (a "
+ msg.getClass().getName() + ")",
new Exception("SendTunnel from"));
}
//_log.info("Send tunnel message " + msg.getClass().getName() + " to " + _destRouter + " over " + _tunnelId + " targetting tunnel " + _targetTunnelId, new Exception("SendTunnel from"));
if (timeoutMs < 5*1000) {
if (_log.shouldLog(Log.WARN))
_log.warn("Very little time given [" + timeoutMs + "], resetting to 5s", new Exception("stingy bastard"));
_expiration = getContext().clock().now() + 5*1000;
} else {
_expiration = getContext().clock().now() + timeoutMs;
}
_state = 2;
}
public void runJob() {
_state = 3;
TunnelInfo info = getContext().tunnelManager().getTunnelInfo(_tunnelId);
if (info == null) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Message for unknown tunnel [" + _tunnelId
+ "] received, forward to " + _destRouter);
if ( (_tunnelId == null) || (_destRouter == null) ) {
if (_log.shouldLog(Log.ERROR))
_log.error("Someone br0ke us. where is this message supposed to go again?",
getAddedBy());
if (_onFailure != null)
getContext().jobQueue().addJob(_onFailure);
return;
} else {
_state = 4;
forwardToGateway();
_state = 0;
return;
}
}
info.messageProcessed(_message.getMessageSize());
if (isEndpoint(info)) {
if (_log.shouldLog(Log.INFO))
_log.info("Tunnel message where we're both the gateway and the endpoint - honor instructions");
_state = 5;
honorInstructions(info);
_state = 0;
return;
} else if (isGateway(info)) {
_state = 6;
handleAsGateway(info);
_state = 0;
return;
} else {
_state = 7;
handleAsParticipant(info);
_state = 0;
return;
}
}
/**
* Forward this job's message to the gateway of the tunnel requested
*
*/
private void forwardToGateway() {
_state = 8;
TunnelMessage msg = new TunnelMessage(getContext());
msg.setData(_message.toByteArray());
msg.setTunnelId(_tunnelId);
msg.setMessageExpiration(new Date(_expiration));
getContext().jobQueue().addJob(new SendMessageDirectJob(getContext(), msg,
_destRouter, _onSend,
_onReply, _onFailure,
_selector,
(int)(_expiration-getContext().clock().now()),
_priority));
String bodyType = _message.getClass().getName();
getContext().messageHistory().wrap(bodyType, _message.getUniqueId(),
TunnelMessage.class.getName(), msg.getUniqueId());
_state = 9;
return;
}
/**
* We are the gateway for the tunnel this message is bound to,
* so wrap it accordingly and send it on its way.
*
*/
private void handleAsGateway(TunnelInfo info) {
_state = 10;
// since we are the gateway, we don't need to verify the data structures
TunnelInfo us = getUs(info);
if (us == null) {
if (_log.shouldLog(Log.ERROR))
_log.error("We are not participating in this /known/ tunnel - was the router reset?");
if (_onFailure != null)
getContext().jobQueue().addJob(_onFailure);
_state = 11;
} else {
_state = 12;
// we're the gateway, so sign, encrypt, and forward to info.getNextHop()
TunnelMessage msg = prepareMessage(info);
_state = 66;
if (msg == null) {
if (_log.shouldLog(Log.ERROR))
_log.error("wtf, unable to prepare a tunnel message to the next hop, when we're the gateway and hops remain? tunnel: " + info);
if (_onFailure != null)
getContext().jobQueue().addJob(_onFailure);
_state = 13;
return;
}
if (_log.shouldLog(Log.DEBUG))
_log.debug("Tunnel message created: " + msg + " out of encrypted message: "
+ _message);
long now = getContext().clock().now();
if (_expiration < now - Router.CLOCK_FUDGE_FACTOR) {
if (_log.shouldLog(Log.ERROR))
_log.error("We are the gateway to " + info.getTunnelId().getTunnelId()
+ " and the message " + msg.getUniqueId() + " is valid, but it has timed out ("
+ (now - _expiration) + "ms ago)");
if (_onFailure != null)
getContext().jobQueue().addJob(_onFailure);
_state = 14;
return;
}else if (_expiration < now + 15*1000) {
if (_log.shouldLog(Log.WARN))
_log.warn("Adding a tunnel message that will expire shortly ["
+ new Date(_expiration) + "]", getAddedBy());
}
_state = 67;
msg.setMessageExpiration(new Date(_expiration));
_state = 68;
Job j = new SendMessageDirectJob(getContext(), msg,
info.getNextHop(), _onSend,
_onReply, _onFailure,
_selector,
(int)(_expiration - getContext().clock().now()),
_priority);
_state = 69;
getContext().jobQueue().addJob(j);
_state = 15;
}
}
/**
* We are the participant in the tunnel, so verify the signature / data and
* forward it to the next hop.
*
*/
private void handleAsParticipant(TunnelInfo info) {
_state = 16;
// SendTunnelMessageJob shouldn't be used for participants!
if (_log.shouldLog(Log.DEBUG))
_log.debug("SendTunnelMessageJob for a participant... ", getAddedBy());
if (!(_message instanceof TunnelMessage)) {
if (_log.shouldLog(Log.ERROR))
_log.error("Cannot inject non-tunnel messages as a participant!" + _message, getAddedBy());
if (_onFailure != null)
getContext().jobQueue().addJob(_onFailure);
_state = 17;
return;
}
TunnelMessage msg = (TunnelMessage)_message;
TunnelVerificationStructure struct = msg.getVerificationStructure();
if ( (info.getVerificationKey() == null) || (info.getVerificationKey().getKey() == null) ) {
if (_log.shouldLog(Log.ERROR))
_log.error("No verification key for the participant? tunnel: " + info, getAddedBy());
if (_onFailure != null)
getContext().jobQueue().addJob(_onFailure);
_state = 18;
return;
}
boolean ok = struct.verifySignature(getContext(), info.getVerificationKey().getKey());
_state = 19;
if (!ok) {
if (_log.shouldLog(Log.WARN))
_log.warn("Failed tunnel verification! Spoofing / tagging attack? " + _message, getAddedBy());
if (_onFailure != null)
getContext().jobQueue().addJob(_onFailure);
_state = 20;
return;
} else {
_state = 21;
if (info.getNextHop() != null) {
_state = 22;
if (_log.shouldLog(Log.INFO))
_log.info("Message for tunnel " + info.getTunnelId().getTunnelId() + " received where we're not the gateway and there are remaining hops, so forward it on to "
+ info.getNextHop().toBase64() + " via SendMessageDirectJob");
SendMessageDirectJob j = new SendMessageDirectJob(getContext(), msg, info.getNextHop(), _onSend,
null, _onFailure, null,
(int)(_message.getMessageExpiration().getTime() - getContext().clock().now()),
_priority);
getContext().jobQueue().addJob(j);
_state = 23;
return;
} else {
_state = 24;
if (_log.shouldLog(Log.ERROR))
_log.error("Should not be reached - participant, but no more hops?!");
if (_onFailure != null)
getContext().jobQueue().addJob(_onFailure);
_state = 25;
return;
}
}
}
/** find our place in the tunnel */
private TunnelInfo getUs(TunnelInfo info) {
_state = 26;
Hash us = getContext().routerHash();
TunnelInfo lastUs = null;
while (info != null) {
if (us.equals(info.getThisHop()))
lastUs = info;
info = info.getNextHopInfo();
_state = 28;
}
_state = 27;
return lastUs;
}
/** are we the endpoint for the tunnel? */
private boolean isEndpoint(TunnelInfo info) {
TunnelInfo us = getUs(info);
_state = 29;
if (us == null) return false;
return (us.getNextHop() == null);
}
/** are we the gateway for the tunnel? */
private boolean isGateway(TunnelInfo info) {
TunnelInfo us = getUs(info);
_state = 30;
if (us == null) return false;
return (us.getSigningKey() != null); // only the gateway can sign
}
private static final int INSTRUCTIONS_PADDING = 32;
private static final int PAYLOAD_PADDING = 32;
/**
* Build the tunnel message with appropriate instructions for the
* tunnel endpoint, then encrypt and sign it.
*
*/
private TunnelMessage prepareMessage(TunnelInfo info) {
_state = 31;
TunnelMessage msg = new TunnelMessage(getContext());
SessionKey key = getContext().keyGenerator().generateSessionKey();
DeliveryInstructions instructions = new DeliveryInstructions();
instructions.setDelayRequested(false);
instructions.setEncrypted(true);
instructions.setEncryptionKey(key);
// if we aren't told where to send it, have it be processed locally at the endpoint
// but if we are, have the endpoint forward it appropriately.
// note that this algorithm does not currently support instructing the endpoint to send to a Destination
if (_destRouter != null) {
_state = 32;
instructions.setRouter(_destRouter);
if (_targetTunnelId != null) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Instructions target tunnel " + _targetTunnelId
+ " on router " + _destRouter.calculateHash());
instructions.setTunnelId(_targetTunnelId);
instructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_TUNNEL);
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Instructions target router " + _destRouter.toBase64());
instructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_ROUTER);
}
} else {
_state = 33;
if (_message instanceof DataMessage) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Instructions are for local message delivery at the endpoint with a DataMessage to be sent to a Destination");
instructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_LOCAL);
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Instructions are for local delivery at the endpoint targetting the now-local router");
instructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_LOCAL);
}
}
if (info == null) {
if (_log.shouldLog(Log.WARN))
_log.warn("Tunnel info is null to send message " + _message);
_state = 34;
return null;
} else if ( (info.getEncryptionKey() == null) || (info.getEncryptionKey().getKey() == null) ) {
if (_log.shouldLog(Log.WARN))
_log.warn("Tunnel encryption key is null when we're the gateway?! info: " + info);
_state = 35;
return null;
}
_state = 36;
byte encryptedInstructions[] = encrypt(instructions, info.getEncryptionKey().getKey(), INSTRUCTIONS_PADDING);
byte encryptedMessage[] = encrypt(_message, key, PAYLOAD_PADDING);
_state = 37;
TunnelVerificationStructure verification = createVerificationStructure(encryptedMessage, info);
_state = 38;
String bodyType = _message.getClass().getName();
getContext().messageHistory().wrap(bodyType, _message.getUniqueId(), TunnelMessage.class.getName(), msg.getUniqueId());
_state = 39;
if (_log.shouldLog(Log.DEBUG))
_log.debug("Tunnel message prepared: instructions = " + instructions);
msg.setData(encryptedMessage);
msg.setEncryptedDeliveryInstructions(encryptedInstructions);
msg.setTunnelId(_tunnelId);
msg.setVerificationStructure(verification);
_state = 40;
return msg;
}
/**
* Create and sign the verification structure, using the tunnel's signing key
*
*/
private TunnelVerificationStructure createVerificationStructure(byte encryptedMessage[], TunnelInfo info) {
_state = 41;
TunnelVerificationStructure struct = new TunnelVerificationStructure();
struct.setMessageHash(getContext().sha().calculateHash(encryptedMessage));
struct.sign(getContext(), info.getSigningKey().getKey());
_state = 42;
return struct;
}
/**
* encrypt the structure (the message or instructions)
*
* @param paddedSize minimum size to pad to
*/
private byte[] encrypt(DataStructure struct, SessionKey key, int paddedSize) {
_state = 43;
try {
ByteArrayOutputStream baos = new ByteArrayOutputStream(paddedSize);
byte data[] = struct.toByteArray();
baos.write(data);
byte iv[] = new byte[16];
Hash h = getContext().sha().calculateHash(key.getData());
System.arraycopy(h.getData(), 0, iv, 0, iv.length);
_state = 44;
return getContext().aes().safeEncrypt(baos.toByteArray(), key, iv, paddedSize);
} catch (IOException ioe) {
if (_log.shouldLog(Log.ERROR))
_log.error("Error writing out data to encrypt", ioe);
}
_state = 45;
return null;
}
/**
* We are both the endpoint and gateway for the tunnel, so honor
* what was requested of us (processing the message locally,
* forwarding to a router, forwarding to a tunnel, etc)
*
*/
private void honorInstructions(TunnelInfo info) {
_state = 46;
if (_selector != null)
createFakeOutNetMessage();
if (_onSend != null) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Firing onSend as we're honoring the instructions");
getContext().jobQueue().addJob(_onSend);
_state = 47;
}
// since we are the gateway, we don't need to decrypt the delivery instructions or the payload
RouterIdentity ident = getContext().router().getRouterInfo().getIdentity();
if (_destRouter != null) {
_state = 48;
honorSendRemote(info, ident);
_state = 49;
} else {
_state = 50;
honorSendLocal(info, ident);
_state = 51;
}
}
/**
* We are the gateway and endpoint and we have been asked to forward the
* message to a remote location (either a tunnel or a router).
*
*/
private void honorSendRemote(TunnelInfo info, RouterIdentity ident) {
_state = 52;
I2NPMessage msg = null;
if (_targetTunnelId != null) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Forward " + _message.getClass().getName()
+ " message off to remote tunnel "
+ _targetTunnelId.getTunnelId() + " on router "
+ _destRouter.toBase64());
TunnelMessage tmsg = new TunnelMessage(getContext());
tmsg.setEncryptedDeliveryInstructions(null);
tmsg.setTunnelId(_targetTunnelId);
tmsg.setVerificationStructure(null);
byte data[] = _message.toByteArray();
tmsg.setData(data);
msg = tmsg;
_state = 53;
} else {
_state = 54;
if (_log.shouldLog(Log.DEBUG))
_log.debug("Forward " + _message.getClass().getName()
+ " message off to remote router " + _destRouter.toBase64());
msg = _message;
}
long now = getContext().clock().now();
//if (_expiration < now) {
//_expiration = now + Router.CLOCK_FUDGE_FACTOR;
//_log.info("Fudging the message send so it expires in the fudge factor...");
//}
long timeLeft = _expiration - now;
if (timeLeft < 10*1000) {
if (_log.shouldLog(Log.WARN))
_log.warn("Why are we trying to send a " + _message.getClass().getName()
+ " message with " + (_expiration-now) + "ms left?", getAddedBy());
if (timeLeft + Router.CLOCK_FUDGE_FACTOR < 0) {
_log.error("Timed out honoring request to send a " + _message.getClass().getName()
+ " message remotely [" + _message.getUniqueId() + "] expired "
+ (0-timeLeft) + "ms ago");
return;
}
}
_state = 55;
String bodyType = _message.getClass().getName();
getContext().messageHistory().wrap(bodyType, _message.getUniqueId(),
TunnelMessage.class.getName(), msg.getUniqueId());
_state = 56;
// don't specify a selector, since createFakeOutNetMessage already does that
SendMessageDirectJob j = new SendMessageDirectJob(getContext(), msg, _destRouter,
_onSend, _onReply, _onFailure,
null, (int)(timeLeft),
_priority);
_state = 57;
getContext().jobQueue().addJob(j);
}
/**
* We are the gateway and endpoint, and the instructions say to forward the
* message to, uh, us. The message may be a normal network message or they
* may be a client DataMessage.
*
*/
private void honorSendLocal(TunnelInfo info, RouterIdentity ident) {
_state = 59;
if ( (info.getDestination() == null) || !(_message instanceof DataMessage) ) {
// its a network message targeting us...
if (_log.shouldLog(Log.DEBUG))
_log.debug("Destination is null or its not a DataMessage - pass it off to the InNetMessagePool");
_state = 59;
InNetMessage msg = new InNetMessage(getContext());
msg.setFromRouter(ident);
msg.setFromRouterHash(ident.getHash());
msg.setMessage(_message);
getContext().inNetMessagePool().add(msg);
_state = 60;
} else {
_state = 61;
if (_log.shouldLog(Log.DEBUG))
_log.debug("Destination is not null and it is a DataMessage - pop it into the ClientMessagePool");
DataMessage msg = (DataMessage)_message;
boolean valid = getContext().messageValidator().validateMessage(msg.getUniqueId(), msg.getMessageExpiration().getTime());
if (!valid) {
if (_log.shouldLog(Log.WARN))
_log.warn("Duplicate data message received [" + msg.getUniqueId() + " expiring on " + msg.getMessageExpiration() + "]");
getContext().messageHistory().droppedOtherMessage(msg);
getContext().messageHistory().messageProcessingError(msg.getUniqueId(), msg.getClass().getName(), "Duplicate");
_state = 62;
return;
}
Payload payload = new Payload();
payload.setEncryptedData(msg.getData());
MessageReceptionInfo receptionInfo = new MessageReceptionInfo();
receptionInfo.setFromPeer(ident.getHash());
receptionInfo.setFromTunnel(_tunnelId);
ClientMessage clientMessage = new ClientMessage();
clientMessage.setDestination(info.getDestination());
clientMessage.setPayload(payload);
clientMessage.setReceptionInfo(receptionInfo);
getContext().clientMessagePool().add(clientMessage);
getContext().messageHistory().receivePayloadMessage(msg.getUniqueId());
_state = 63;
}
}
private void createFakeOutNetMessage() {
_state = 64;
// now we create a fake outNetMessage to go onto the registry so we can select
if (_log.shouldLog(Log.DEBUG))
_log.debug("Registering a fake outNetMessage for the message tunneled locally since we have a selector");
OutNetMessage outM = new OutNetMessage(getContext());
outM.setExpiration(_expiration);
outM.setMessage(_message);
outM.setOnFailedReplyJob(_onFailure);
outM.setOnFailedSendJob(_onFailure);
outM.setOnReplyJob(_onReply);
outM.setOnSendJob(_onSend);
outM.setPriority(_priority);
outM.setReplySelector(_selector);
outM.setTarget(getContext().netDb().lookupRouterInfoLocally(_destRouter));
getContext().messageRegistry().registerPending(outM);
// we dont really need the data
outM.discardData();
_state = 65;
}
public String getName() { return "Send Tunnel Message" + (_state == 0 ? "" : ""+_state); }
}

View File

@ -1,34 +0,0 @@
package net.i2p.router.message;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
import net.i2p.data.Hash;
import net.i2p.data.RouterIdentity;
import net.i2p.data.i2np.I2NPMessage;
import net.i2p.data.i2np.TunnelMessage;
import net.i2p.router.HandlerJobBuilder;
import net.i2p.router.Job;
import net.i2p.router.RouterContext;
/**
* HandlerJobBuilder to build jobs to handle TunnelMessages
*
*/
public class TunnelMessageHandler implements HandlerJobBuilder {
private RouterContext _context;
public TunnelMessageHandler(RouterContext context) {
_context = context;
}
public Job createJob(I2NPMessage receivedMessage, RouterIdentity from, Hash fromHash) {
HandleTunnelMessageJob job = new HandleTunnelMessageJob(_context, (TunnelMessage)receivedMessage, from, fromHash);
return job;
}
}

View File

@ -0,0 +1,36 @@
package net.i2p.router.tunnel;
import net.i2p.I2PAppContext;
import net.i2p.util.DecayingBloomFilter;
/**
* Manage the IV validation for all of the router's tunnels by way of a big
* decaying bloom filter.
*
*/
public class BloomFilterIVValidator implements IVValidator {
private I2PAppContext _context;
private DecayingBloomFilter _filter;
/**
* After 2*halflife, an entry is completely forgotten from the bloom filter.
* To avoid the issue of overlap within different tunnels, this is set
* higher than it needs to be.
*
*/
private static final int HALFLIFE_MS = 10*60*1000;
public BloomFilterIVValidator(I2PAppContext ctx, int KBps) {
_context = ctx;
_filter = new DecayingBloomFilter(ctx, HALFLIFE_MS, 16);
ctx.statManager().createRateStat("tunnel.duplicateIV", "Note that a duplicate IV was received", "Tunnels",
new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
}
public boolean receiveIV(byte[] iv) {
boolean dup = _filter.add(iv);
if (dup) _context.statManager().addRateData("tunnel.duplicateIV", 1, 1);
return !dup; // return true if it is OK, false if it isn't
}
public void destroy() { _filter.stopDecaying(); }
}

View File

@ -0,0 +1,12 @@
package net.i2p.router.tunnel;
/**
* accept everything
*/
class DummyValidator implements IVValidator {
private static final DummyValidator _instance = new DummyValidator();
public static DummyValidator getInstance() { return _instance; }
private DummyValidator() {}
public boolean receiveIV(byte[] iv) { return true; }
}

View File

@ -0,0 +1,25 @@
package net.i2p.router.tunnel;
import java.util.HashSet;
import net.i2p.data.ByteArray;
/**
* waste lots of RAM
*/
class HashSetIVValidator implements IVValidator {
private HashSet _received;
public HashSetIVValidator() {
_received = new HashSet();
}
public boolean receiveIV(byte[] iv) {
//if (true) // foo!
// return true;
ByteArray ba = new ByteArray(iv);
boolean isNew = false;
synchronized (_received) {
isNew = _received.add(ba);
}
return isNew;
}
}

View File

@ -0,0 +1,59 @@
package net.i2p.router.tunnel;
import net.i2p.data.RouterInfo;
import net.i2p.data.i2np.TunnelDataMessage;
import net.i2p.router.JobImpl;
import net.i2p.router.OutNetMessage;
import net.i2p.router.RouterContext;
/**
*
*/
public class InboundGatewayReceiver implements TunnelGateway.Receiver {
private RouterContext _context;
private HopConfig _config;
private RouterInfo _target;
public InboundGatewayReceiver(RouterContext ctx, HopConfig cfg) {
_context = ctx;
_config = cfg;
}
public void receiveEncrypted(byte[] encrypted) {
receiveEncrypted(encrypted, false);
}
public void receiveEncrypted(byte[] encrypted, boolean alreadySearched) {
if (_target == null) {
_target = _context.netDb().lookupRouterInfoLocally(_config.getSendTo());
if (_target == null) {
ReceiveJob j = null;
if (!alreadySearched)
j = new ReceiveJob(encrypted);
_context.netDb().lookupRouterInfo(_config.getSendTo(), j, j, 5*1000);
return;
}
}
TunnelDataMessage msg = new TunnelDataMessage(_context);
msg.setData(encrypted);
msg.setTunnelId(_config.getSendTunnel());
OutNetMessage out = new OutNetMessage(_context);
out.setMessage(msg);
out.setTarget(_target);
out.setExpiration(msg.getMessageExpiration());
out.setPriority(400);
_context.outNetMessagePool().add(out);
}
private class ReceiveJob extends JobImpl {
private byte[] _encrypted;
public ReceiveJob(byte data[]) {
super(_context);
_encrypted = data;
}
public String getName() { return "lookup first hop"; }
public void runJob() {
receiveEncrypted(_encrypted, true);
}
}
}

View File

@ -0,0 +1,149 @@
package net.i2p.router.tunnel;
import java.util.Date;
import net.i2p.data.Hash;
import net.i2p.data.TunnelId;
import net.i2p.data.RouterInfo;
import net.i2p.data.Payload;
import net.i2p.data.i2np.DataMessage;
import net.i2p.data.i2np.DeliveryInstructions;
import net.i2p.data.i2np.I2NPMessage;
import net.i2p.data.i2np.GarlicMessage;
import net.i2p.data.i2np.GarlicClove;
import net.i2p.data.i2np.TunnelGatewayMessage;
import net.i2p.router.ClientMessage;
import net.i2p.router.JobImpl;
import net.i2p.router.OutNetMessage;
import net.i2p.router.RouterContext;
import net.i2p.router.TunnelInfo;
import net.i2p.router.message.GarlicMessageReceiver;
import net.i2p.util.Log;
/**
* When a message arrives at the inbound tunnel endpoint, this distributor
* honors the instructions (safely)
*/
public class InboundMessageDistributor implements GarlicMessageReceiver.CloveReceiver {
private RouterContext _context;
private Log _log;
private Hash _client;
private GarlicMessageReceiver _receiver;
private static final int MAX_DISTRIBUTE_TIME = 10*1000;
public InboundMessageDistributor(RouterContext ctx, Hash client) {
_context = ctx;
_client = client;
_log = ctx.logManager().getLog(InboundMessageDistributor.class);
_receiver = new GarlicMessageReceiver(ctx, this, client);
}
public void distribute(I2NPMessage msg, Hash target) {
distribute(msg, target, null);
}
public void distribute(I2NPMessage msg, Hash target, TunnelId tunnel) {
// allow messages on client tunnels even after client disconnection, as it may
// include e.g. test messages, etc. DataMessages will be dropped anyway
/*
if ( (_client != null) && (!_context.clientManager().isLocal(_client)) ) {
if (_log.shouldLog(Log.INFO))
_log.info("Not distributing a message, as it came down a client's tunnel ("
+ _client.toBase64() + ") after the client disconnected: " + msg);
return;
}
*/
if ( (target == null) || ( (tunnel == null) && (_context.routerHash().equals(target) ) ) ) {
// targetting us either implicitly (no target) or explicitly (no tunnel)
// make sure we don't honor any remote requests directly (garlic instructions, etc)
if (msg.getType() == GarlicMessage.MESSAGE_TYPE) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("received garlic message in the tunnel, parse it out");
_receiver.receive((GarlicMessage)msg);
} else {
if (_log.shouldLog(Log.INFO))
_log.info("distributing inbound tunnel message into our inNetMessagePool: " + msg);
_context.inNetMessagePool().add(msg, null, null);
}
} else if (_context.routerHash().equals(target)) {
// the want to send it to a tunnel, except we are also that tunnel's gateway
// dispatch it directly
if (_log.shouldLog(Log.INFO))
_log.info("distributing inbound tunnel message back out, except we are the gateway");
TunnelGatewayMessage gw = new TunnelGatewayMessage(_context);
gw.setMessage(msg);
gw.setTunnelId(tunnel);
gw.setMessageExpiration(_context.clock().now()+10*1000);
gw.setUniqueId(_context.random().nextLong(I2NPMessage.MAX_ID_VALUE));
_context.tunnelDispatcher().dispatch(gw);
} else {
// ok, they want us to send it remotely, but that'd bust our anonymity,
// so we send it out a tunnel first
TunnelInfo out = _context.tunnelManager().selectOutboundTunnel(_client);
if (_log.shouldLog(Log.INFO))
_log.info("distributing inbound tunnel message back out " + out
+ " targetting " + target.toBase64().substring(0,4));
TunnelId outId = out.getSendTunnelId(0);
if (outId == null) {
if (_log.shouldLog(Log.ERROR))
_log.error("wtf, outbound tunnel has no outboundId? " + out
+ " failing to distribute " + msg);
return;
}
_context.tunnelDispatcher().dispatchOutbound(msg, outId, tunnel, target);
}
}
/**
* Handle a clove removed from the garlic message
*
*/
public void handleClove(DeliveryInstructions instructions, I2NPMessage data) {
switch (instructions.getDeliveryMode()) {
case DeliveryInstructions.DELIVERY_MODE_LOCAL:
if (_log.shouldLog(Log.DEBUG))
_log.debug("local delivery instructions for clove: " + data.getClass().getName());
if (data instanceof GarlicMessage) {
_receiver.receive((GarlicMessage)data);
return;
} else {
_context.inNetMessagePool().add(data, null, null);
return;
}
case DeliveryInstructions.DELIVERY_MODE_DESTINATION:
if (!(data instanceof DataMessage)) {
if (_log.shouldLog(Log.ERROR))
_log.error("cant send a " + data.getClass().getName() + " to a destination");
} else if ( (_client != null) && (_client.equals(instructions.getDestination())) ) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("data message came down a tunnel for "
+ _client.toBase64().substring(0,4));
DataMessage dm = (DataMessage)data;
Payload payload = new Payload();
payload.setEncryptedData(dm.getData());
ClientMessage m = new ClientMessage();
m.setDestinationHash(_client);
m.setPayload(payload);
_context.clientManager().messageReceived(m);
} else {
if (_log.shouldLog(Log.ERROR))
_log.error("this data message came down a tunnel for "
+ (_client == null ? "no one" : _client.toBase64().substring(0,4))
+ " but targetted "
+ instructions.getDestination().toBase64().substring(0,4));
}
return;
case DeliveryInstructions.DELIVERY_MODE_ROUTER: // fall through
case DeliveryInstructions.DELIVERY_MODE_TUNNEL:
if (_log.shouldLog(Log.INFO))
_log.info("clove targetted " + instructions.getRouter() + ":" + instructions.getTunnelId()
+ ", treat recursively to prevent leakage");
distribute(data, instructions.getRouter(), instructions.getTunnelId());
return;
default:
if (_log.shouldLog(Log.ERROR))
_log.error("Unknown instruction " + instructions.getDeliveryMode() + ": " + instructions);
return;
}
}
}

View File

@ -0,0 +1,98 @@
package net.i2p.router.tunnel;
import net.i2p.data.Hash;
import net.i2p.data.TunnelId;
import net.i2p.data.RouterInfo;
import net.i2p.data.i2np.I2NPMessage;
import net.i2p.data.i2np.TunnelGatewayMessage;
import net.i2p.router.JobImpl;
import net.i2p.router.OutNetMessage;
import net.i2p.router.RouterContext;
import net.i2p.util.Log;
/**
* When a message arrives at the outbound tunnel endpoint, this distributor
* honors the instructions.
*/
public class OutboundMessageDistributor {
private RouterContext _context;
private Log _log;
private static final int MAX_DISTRIBUTE_TIME = 10*1000;
public OutboundMessageDistributor(RouterContext ctx) {
_context = ctx;
_log = ctx.logManager().getLog(OutboundMessageDistributor.class);
}
public void distribute(I2NPMessage msg, Hash target) {
distribute(msg, target, null);
}
public void distribute(I2NPMessage msg, Hash target, TunnelId tunnel) {
RouterInfo info = _context.netDb().lookupRouterInfoLocally(target);
if (info == null) {
_log.debug("outbound distributor to " + target.toBase64().substring(0,4)
+ "." + (tunnel != null ? tunnel.getTunnelId() + "" : "")
+ ": no info locally, searching...");
_context.netDb().lookupRouterInfo(target, new DistributeJob(_context, msg, target, tunnel), null, MAX_DISTRIBUTE_TIME);
return;
} else {
distribute(msg, info, tunnel);
}
}
public void distribute(I2NPMessage msg, RouterInfo target, TunnelId tunnel) {
I2NPMessage m = msg;
if (tunnel != null) {
TunnelGatewayMessage t = new TunnelGatewayMessage(_context);
t.setMessage(msg);
t.setTunnelId(tunnel);
t.setMessageExpiration(m.getMessageExpiration());
m = t;
}
if (_context.routerHash().equals(target.getIdentity().calculateHash())) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("queueing inbound message to ourselves: " + m);
_context.inNetMessagePool().add(m, null, null);
return;
} else {
OutNetMessage out = new OutNetMessage(_context);
out.setExpiration(_context.clock().now() + MAX_DISTRIBUTE_TIME);
out.setTarget(target);
out.setMessage(m);
out.setPriority(400);
if (_log.shouldLog(Log.DEBUG))
_log.debug("queueing outbound message to " + target.getIdentity().calculateHash().toBase64().substring(0,4));
_context.outNetMessagePool().add(out);
}
}
private class DistributeJob extends JobImpl {
private I2NPMessage _message;
private Hash _target;
private TunnelId _tunnel;
public DistributeJob(RouterContext ctx, I2NPMessage msg, Hash target, TunnelId id) {
super(ctx);
_message = msg;
_target = target;
_tunnel = id;
}
public String getName() { return "distribute outbound message"; }
public void runJob() {
RouterInfo info = getContext().netDb().lookupRouterInfoLocally(_target);
if (info != null) {
_log.debug("outbound distributor to " + _target.toBase64().substring(0,4)
+ "." + (_tunnel != null ? _tunnel.getTunnelId() + "" : "")
+ ": found on search");
distribute(_message, info, _tunnel);
} else {
_log.error("outbound distributor to " + _target.toBase64().substring(0,4)
+ "." + (_tunnel != null ? _tunnel.getTunnelId() + "" : "")
+ ": NOT found on search");
}
}
}
}

View File

@ -0,0 +1,84 @@
package net.i2p.router.tunnel;
import net.i2p.data.Hash;
import net.i2p.data.RouterInfo;
import net.i2p.data.i2np.TunnelDataMessage;
import net.i2p.router.JobImpl;
import net.i2p.router.OutNetMessage;
import net.i2p.router.RouterContext;
import net.i2p.util.Log;
/**
* Receive the outbound message after it has been preprocessed and encrypted,
* then forward it on to the first hop in the tunnel.
*
*/
class OutboundReceiver implements TunnelGateway.Receiver {
private RouterContext _context;
private Log _log;
private TunnelCreatorConfig _config;
public OutboundReceiver(RouterContext ctx, TunnelCreatorConfig cfg) {
_context = ctx;
_log = ctx.logManager().getLog(OutboundReceiver.class);
_config = cfg;
}
public void receiveEncrypted(byte encrypted[]) {
TunnelDataMessage msg = new TunnelDataMessage(_context);
msg.setData(encrypted);
msg.setTunnelId(_config.getConfig(0).getSendTunnel());
if (_log.shouldLog(Log.DEBUG))
_log.debug("received encrypted, sending out " + _config + ": " + msg);
RouterInfo ri = _context.netDb().lookupRouterInfoLocally(_config.getPeer(1));
if (ri != null) {
send(msg, ri);
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("lookup of " + _config.getPeer(1).toBase64().substring(0,4)
+ " required for " + msg);
_context.netDb().lookupRouterInfo(_config.getPeer(1), new SendJob(msg), new FailedJob(), 10*1000);
}
}
private void send(TunnelDataMessage msg, RouterInfo ri) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("forwarding encrypted data out " + _config + ": " + msg);
OutNetMessage m = new OutNetMessage(_context);
m.setMessage(msg);
m.setExpiration(msg.getMessageExpiration());
m.setTarget(ri);
m.setPriority(400);
_context.outNetMessagePool().add(m);
_config.incrementProcessedMessages();
}
private class SendJob extends JobImpl {
private TunnelDataMessage _msg;
public SendJob(TunnelDataMessage msg) {
super(_context);
_msg = msg;
}
public String getName() { return "forward a tunnel message"; }
public void runJob() {
RouterInfo ri = _context.netDb().lookupRouterInfoLocally(_config.getPeer(1));
if (_log.shouldLog(Log.DEBUG))
_log.debug("lookup of " + _config.getPeer(1).toBase64().substring(0,4)
+ " successful? " + (ri != null));
if (ri != null)
send(_msg, ri);
}
}
private class FailedJob extends JobImpl {
public FailedJob() {
super(_context);
}
public String getName() { return "failed looking for our outbound gateway"; }
public void runJob() {
if (_log.shouldLog(Log.ERROR))
_log.error("lookup of " + _config.getPeer(1).toBase64().substring(0,4)
+ " failed for " + _config);
}
}
}

View File

@ -0,0 +1,51 @@
package net.i2p.router.tunnel;
import net.i2p.data.Hash;
import net.i2p.data.RouterInfo;
import net.i2p.data.TunnelId;
import net.i2p.data.i2np.I2NPMessage;
import net.i2p.data.i2np.TunnelDataMessage;
import net.i2p.data.i2np.TunnelGatewayMessage;
import net.i2p.router.JobImpl;
import net.i2p.router.OutNetMessage;
import net.i2p.router.RouterContext;
import net.i2p.util.Log;
/**
* We are the end of an outbound tunnel that we did not create. Gather fragments
* and honor the instructions as received.
*
*/
public class OutboundTunnelEndpoint {
private RouterContext _context;
private Log _log;
private HopConfig _config;
private HopProcessor _processor;
private FragmentHandler _handler;
private OutboundMessageDistributor _outDistributor;
public OutboundTunnelEndpoint(RouterContext ctx, HopConfig config, HopProcessor processor) {
_context = ctx;
_log = ctx.logManager().getLog(OutboundTunnelEndpoint.class);
_config = config;
_processor = processor;
_handler = new RouterFragmentHandler(ctx, new DefragmentedHandler());
_outDistributor = new OutboundMessageDistributor(ctx);
}
public void dispatch(TunnelDataMessage msg, Hash recvFrom) {
_config.incrementProcessedMessages();
_processor.process(msg.getData(), 0, msg.getData().length, recvFrom);
_handler.receiveTunnelMessage(msg.getData(), 0, msg.getData().length);
}
private class DefragmentedHandler implements FragmentHandler.DefragmentedReceiver {
public void receiveComplete(I2NPMessage msg, Hash toRouter, TunnelId toTunnel) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("outbound tunnel " + _config + " received a full message: " + msg
+ " to be forwarded on to "
+ (toRouter != null ? toRouter.toBase64().substring(0,4) : "")
+ (toTunnel != null ? toTunnel.getTunnelId() + "" : ""));
_outDistributor.distribute(msg, toRouter, toTunnel);
}
}
}

View File

@ -0,0 +1,25 @@
package net.i2p.router.tunnel;
import net.i2p.router.RouterContext;
/**
* Minor extension to allow message history integration
*/
public class RouterFragmentHandler extends FragmentHandler {
private RouterContext _routerContext;
public RouterFragmentHandler(RouterContext context, DefragmentedReceiver receiver) {
super(context, receiver);
_routerContext = context;
}
protected void noteReception(long messageId, int fragmentId) {
_routerContext.messageHistory().receiveTunnelFragment(messageId, fragmentId);
}
protected void noteCompletion(long messageId) {
_routerContext.messageHistory().receiveTunnelFragmentComplete(messageId);
}
protected void noteFailure(long messageId) {
_routerContext.messageHistory().droppedFragmentedMessage(messageId);
}
}

View File

@ -0,0 +1,20 @@
package net.i2p.router.tunnel;
import net.i2p.router.RouterContext;
/**
* Minor extension to track fragmentation
*
*/
public class TrivialRouterPreprocessor extends TrivialPreprocessor {
private RouterContext _routerContext;
public TrivialRouterPreprocessor(RouterContext ctx) {
super(ctx);
_routerContext = ctx;
}
protected void notePreprocessing(long messageId, int numFragments) {
_routerContext.messageHistory().fragmentMessage(messageId, numFragments);
}
}

View File

@ -0,0 +1,488 @@
package net.i2p.router.tunnel;
import java.io.IOException;
import java.io.Writer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import net.i2p.router.RouterContext;
import net.i2p.data.DataHelper;
import net.i2p.data.Hash;
import net.i2p.data.TunnelId;
import net.i2p.data.RouterInfo;
import net.i2p.data.i2np.I2NPMessage;
import net.i2p.data.i2np.TunnelDataMessage;
import net.i2p.data.i2np.TunnelGatewayMessage;
import net.i2p.router.JobImpl;
import net.i2p.router.Router;
import net.i2p.router.RouterContext;
import net.i2p.router.Service;
import net.i2p.util.Log;
/**
* Handle the actual processing and forwarding of messages through the
* various tunnels.
*
*/
public class TunnelDispatcher implements Service {
private RouterContext _context;
private Log _log;
private Map _outboundGateways;
private Map _outboundEndpoints;
private Map _participants;
private Map _inboundGateways;
/** id to HopConfig */
private Map _participatingConfig;
/** what is the date/time on which the last non-locally-created tunnel expires? */
private long _lastParticipatingExpiration;
private BloomFilterIVValidator _validator;
/** Creates a new instance of TunnelDispatcher */
public TunnelDispatcher(RouterContext ctx) {
_context = ctx;
_log = ctx.logManager().getLog(TunnelDispatcher.class);
_outboundGateways = new HashMap();
_outboundEndpoints = new HashMap();
_participants = new HashMap();
_inboundGateways = new HashMap();
_participatingConfig = new HashMap();
_lastParticipatingExpiration = 0;
_validator = null;
ctx.statManager().createRateStat("tunnel.participatingTunnels",
"How many tunnels are we participating in?", "Tunnels",
new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("tunnel.dispatchOutboundPeer",
"How many messages we send out a tunnel targetting a peer?", "Tunnels",
new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("tunnel.dispatchOutboundTunnel",
"How many messages we send out a tunnel targetting a tunnel?", "Tunnels",
new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("tunnel.dispatchInbound",
"How many messages we send through our tunnel gateway?", "Tunnels",
new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("tunnel.dispatchParticipant",
"How many messages we send through a tunnel we are participating in?", "Tunnels",
new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("tunnel.dispatchEndpoint",
"How many messages we receive as the outbound endpoint of a tunnel?", "Tunnels",
new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("tunnel.joinOutboundGateway",
"How many tunnels we join as the outbound gateway?", "Tunnels",
new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("tunnel.joinOutboundGatewayZeroHop",
"How many zero hop tunnels we join as the outbound gateway?", "Tunnels",
new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("tunnel.joinInboundEndpoint",
"How many tunnels we join as the inbound endpoint?", "Tunnels",
new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("tunnel.joinInboundEndpointZeroHop",
"How many zero hop tunnels we join as the inbound endpoint?", "Tunnels",
new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("tunnel.joinParticipant",
"How many tunnels we join as a participant?", "Tunnels",
new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("tunnel.joinOutboundEndpoint",
"How many tunnels we join as the outbound endpoint?", "Tunnels",
new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("tunnel.joinInboundGateway",
"How many tunnels we join as the inbound gateway?", "Tunnels",
new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("tunnel.dispatchGatewayTime",
"How long it takes to dispatch a TunnelGatewayMessage", "Tunnels",
new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("tunnel.dispatchDataTime",
"How long it takes to dispatch a TunnelDataMessage", "Tunnels",
new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("tunnel.dispatchOutboundTime",
"How long it takes to dispatch an outbound message", "Tunnels",
new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("tunnel.dispatchOutboundZeroHopTime",
"How long it takes to dispatch an outbound message through a zero hop tunnel", "Tunnels",
new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("tunnel.participatingMessageCount",
"How many messages are sent through a participating tunnel?", "Tunnels",
new long[] { 60*10*1000l, 60*60*1000l, 24*60*60*1000l });
}
/**
* We are the outbound gateway - we created this tunnel
*/
public void joinOutbound(TunnelCreatorConfig cfg) {
if (_log.shouldLog(Log.INFO))
_log.info("Outbound built successfully: " + cfg);
if (cfg.getLength() > 1) {
TunnelGateway.QueuePreprocessor preproc = new TrivialRouterPreprocessor(_context);
TunnelGateway.Sender sender = new OutboundSender(_context, cfg);
TunnelGateway.Receiver receiver = new OutboundReceiver(_context, cfg);
TunnelGateway gw = new TunnelGateway(_context, preproc, sender, receiver);
TunnelId outId = cfg.getConfig(0).getSendTunnel();
synchronized (_outboundGateways) {
_outboundGateways.put(outId, gw);
}
_context.statManager().addRateData("tunnel.joinOutboundGateway", 1, 0);
} else {
TunnelGatewayZeroHop gw = new TunnelGatewayZeroHop(_context, cfg);
TunnelId outId = cfg.getConfig(0).getSendTunnel();
synchronized (_outboundGateways) {
_outboundGateways.put(outId, gw);
}
_context.statManager().addRateData("tunnel.joinOutboundGatewayZeroHop", 1, 0);
}
}
/**
* We are the inbound endpoint - we created this tunnel
*/
public void joinInbound(TunnelCreatorConfig cfg) {
if (_log.shouldLog(Log.INFO))
_log.info("Inbound built successfully: " + cfg);
if (cfg.getLength() > 1) {
TunnelParticipant participant = new TunnelParticipant(_context, new InboundEndpointProcessor(_context, cfg, _validator));
TunnelId recvId = cfg.getConfig(cfg.getLength()-1).getReceiveTunnel();
synchronized (_participants) {
_participants.put(recvId, participant);
}
_context.statManager().addRateData("tunnel.joinInboundEndpoint", 1, 0);
} else {
TunnelGatewayZeroHop gw = new TunnelGatewayZeroHop(_context, cfg);
TunnelId recvId = cfg.getConfig(0).getReceiveTunnel();
synchronized (_inboundGateways) {
_inboundGateways.put(recvId, gw);
}
_context.statManager().addRateData("tunnel.joinInboundEndpointZeroHop", 1, 0);
}
}
/**
* We are a participant in this tunnel, but not as the endpoint or gateway
*
*/
public void joinParticipant(HopConfig cfg) {
if (_log.shouldLog(Log.INFO))
_log.info("Joining as participant: " + cfg);
TunnelId recvId = cfg.getReceiveTunnel();
TunnelParticipant participant = new TunnelParticipant(_context, cfg, new HopProcessor(_context, cfg, _validator));
synchronized (_participants) {
_participants.put(recvId, participant);
}
int numParticipants = 0;
synchronized (_participatingConfig) {
_participatingConfig.put(recvId, cfg);
numParticipants = _participatingConfig.size();
}
_context.statManager().addRateData("tunnel.participatingTunnels", numParticipants, 0);
_context.statManager().addRateData("tunnel.joinParticipant", 1, 0);
if (cfg.getExpiration() > _lastParticipatingExpiration)
_lastParticipatingExpiration = cfg.getExpiration();
_context.jobQueue().addJob(new LeaveTunnel(_context, cfg));
}
/**
* We are the outbound endpoint in this tunnel, and did not create it
*
*/
public void joinOutboundEndpoint(HopConfig cfg) {
if (_log.shouldLog(Log.INFO))
_log.info("Joining as outbound endpoint: " + cfg);
TunnelId recvId = cfg.getReceiveTunnel();
OutboundTunnelEndpoint endpoint = new OutboundTunnelEndpoint(_context, cfg, new HopProcessor(_context, cfg, _validator));
synchronized (_outboundEndpoints) {
_outboundEndpoints.put(recvId, endpoint);
}
int numParticipants = 0;
synchronized (_participatingConfig) {
_participatingConfig.put(recvId, cfg);
numParticipants = _participatingConfig.size();
}
_context.statManager().addRateData("tunnel.participatingTunnels", numParticipants, 0);
_context.statManager().addRateData("tunnel.joinOutboundEndpoint", 1, 0);
if (cfg.getExpiration() > _lastParticipatingExpiration)
_lastParticipatingExpiration = cfg.getExpiration();
_context.jobQueue().addJob(new LeaveTunnel(_context, cfg));
}
/**
* We are the inbound gateway in this tunnel, and did not create it
*
*/
public void joinInboundGateway(HopConfig cfg) {
if (_log.shouldLog(Log.INFO))
_log.info("Joining as inbound gateway: " + cfg);
TunnelGateway.QueuePreprocessor preproc = new TrivialRouterPreprocessor(_context);
TunnelGateway.Sender sender = new InboundSender(_context, cfg);
TunnelGateway.Receiver receiver = new InboundGatewayReceiver(_context, cfg);
TunnelGateway gw = new TunnelGateway(_context, preproc, sender, receiver);
TunnelId recvId = cfg.getReceiveTunnel();
synchronized (_inboundGateways) {
_inboundGateways.put(recvId, gw);
}
int numParticipants = 0;
synchronized (_participatingConfig) {
_participatingConfig.put(recvId, cfg);
numParticipants = _participatingConfig.size();
}
_context.statManager().addRateData("tunnel.participatingTunnels", numParticipants, 0);
_context.statManager().addRateData("tunnel.joinInboundGateway", 1, 0);
if (cfg.getExpiration() > _lastParticipatingExpiration)
_lastParticipatingExpiration = cfg.getExpiration();
_context.jobQueue().addJob(new LeaveTunnel(_context, cfg));
}
public int getParticipatingCount() {
synchronized (_participatingConfig) {
return _participatingConfig.size();
}
}
/** what is the date/time on which the last non-locally-created tunnel expires? */
public long getLastParticipatingExpiration() { return _lastParticipatingExpiration; }
/**
* We no longer want to participate in this tunnel that we created
*/
public void remove(TunnelCreatorConfig cfg) {
if (cfg.isInbound()) {
TunnelId recvId = cfg.getConfig(cfg.getLength()-1).getReceiveTunnel();
if (_log.shouldLog(Log.DEBUG))
_log.debug("removing our own inbound " + cfg);
boolean removed = false;
synchronized (_participants) {
removed = (null != _participants.remove(recvId));
}
if (!removed) {
synchronized (_inboundGateways) {
_inboundGateways.remove(recvId);
}
}
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("removing our own outbound " + cfg);
TunnelId outId = cfg.getConfig(0).getSendTunnel();
synchronized (_outboundGateways) {
_outboundGateways.remove(outId);
}
}
}
/**
* No longer participate in the tunnel that someone asked us to be a member of
*
*/
public void remove(HopConfig cfg) {
TunnelId recvId = cfg.getReceiveTunnel();
if (_log.shouldLog(Log.DEBUG))
_log.debug("removing " + cfg);
boolean removed = false;
synchronized (_participatingConfig) {
removed = (null != _participatingConfig.remove(recvId));
}
if (!removed) {
if (_log.shouldLog(Log.WARN))
_log.warn("Participating tunnel, but no longer listed in participatingConfig? " + cfg);
}
synchronized (_participants) {
removed = (null != _participants.remove(recvId));
}
if (removed) return;
synchronized (_inboundGateways) {
removed = (null != _inboundGateways.remove(recvId));
}
if (removed) return;
synchronized (_outboundEndpoints) {
removed = (null != _outboundEndpoints.remove(recvId));
}
_context.statManager().addRateData("tunnel.participatingMessageCount", cfg.getProcessedMessagesCount(), 10*60*1000);
}
/**
* We are participating in a tunnel (perhaps we're even the endpoint), so
* take the message and do what it says. If there are later hops, that
* means encrypt a layer and forward it on. If there aren't later hops,
* how we handle it depends upon whether we created it or not. If we didn't,
* simply honor the instructions. If we did, unwrap all the layers of
* encryption and honor those instructions (within reason).
*
*/
public void dispatch(TunnelDataMessage msg, Hash recvFrom) {
long before = _context.clock().now();
TunnelParticipant participant = null;
synchronized (_participants) {
participant = (TunnelParticipant)_participants.get(msg.getTunnelId());
}
if (participant != null) {
// we are either just a random participant or the inbound endpoint
if (_log.shouldLog(Log.DEBUG))
_log.debug("dispatch to participant " + participant + ": " + msg.getUniqueId() + " from "
+ recvFrom.toBase64().substring(0,4));
participant.dispatch(msg, recvFrom);
_context.statManager().addRateData("tunnel.dispatchParticipant", 1, 0);
} else {
OutboundTunnelEndpoint endpoint = null;
synchronized (_outboundEndpoints) {
endpoint = (OutboundTunnelEndpoint)_outboundEndpoints.get(msg.getTunnelId());
}
if (endpoint != null) {
// we are the outobund endpoint
if (_log.shouldLog(Log.DEBUG))
_log.debug("dispatch where we are the outbound endpoint: " + endpoint + ": "
+ msg + " from " + recvFrom.toBase64().substring(0,4));
endpoint.dispatch(msg, recvFrom);
_context.statManager().addRateData("tunnel.dispatchEndpoint", 1, 0);
} else {
_context.messageHistory().droppedTunnelDataMessageUnknown(msg.getUniqueId(), msg.getTunnelId().getTunnelId());
if (_log.shouldLog(Log.ERROR))
_log.error("no matching participant/endpoint for id=" + msg.getTunnelId().getTunnelId()
+ ": existing = " + _participants.keySet()
+ " / " + _outboundEndpoints.keySet());
}
}
long dispatchTime = _context.clock().now() - before;
_context.statManager().addRateData("tunnel.dispatchDataTime", dispatchTime, dispatchTime);
}
/**
* We are the inbound tunnel gateway, so encrypt it as necessary and forward
* it on.
*
*/
public void dispatch(TunnelGatewayMessage msg) {
long before = _context.clock().now();
TunnelGateway gw = null;
synchronized (_inboundGateways) {
gw = (TunnelGateway)_inboundGateways.get(msg.getTunnelId());
}
if (gw != null) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("dispatch where we are the inbound gateway: " + gw + ": " + msg);
if ( (msg.getMessageExpiration() < before - Router.CLOCK_FUDGE_FACTOR) || (msg.getMessage().getMessageExpiration() < before - Router.CLOCK_FUDGE_FACTOR) ) {
if (_log.shouldLog(Log.ERROR))
_log.error("Not dispatching a gateway message for tunnel " + msg.getTunnelId().getTunnelId()
+ " as the wrapper's expiration is in " + DataHelper.formatDuration(msg.getMessageExpiration()-before)
+ " and/or the content's expiration is in " + DataHelper.formatDuration(msg.getMessage().getMessageExpiration()-before)
+ " with messageId " + msg.getUniqueId() + "/" + msg.getMessage().getUniqueId() + " and message type "
+ msg.getMessage().getClass().getName());
return;
}
gw.add(msg);
_context.statManager().addRateData("tunnel.dispatchInbound", 1, 0);
} else {
_context.messageHistory().droppedTunnelGatewayMessageUnknown(msg.getUniqueId(), msg.getTunnelId().getTunnelId());
if (_log.shouldLog(Log.ERROR))
_log.error("no matching tunnel for id=" + msg.getTunnelId().getTunnelId()
+ ": gateway message expiring in "
+ DataHelper.formatDuration(msg.getMessageExpiration()-_context.clock().now())
+ "/"
+ DataHelper.formatDuration(msg.getMessage().getMessageExpiration()-_context.clock().now())
+ " messageId " + msg.getUniqueId()
+ "/" + msg.getMessage().getUniqueId()
+ " messageType: " + msg.getMessage().getClass().getName()
+ " existing = " + _inboundGateways.keySet());
}
long dispatchTime = _context.clock().now() - before;
_context.statManager().addRateData("tunnel.dispatchGatewayTime", dispatchTime, dispatchTime);
}
/**
* We are the outbound tunnel gateway (we created it), so wrap up this message
* with instructions to be forwarded to the targetPeer when it reaches the
* endpoint.
*
* @param msg raw message to deliver to the target peer
* @param outboundTunnel tunnel to send the message out
* @param targetPeer peer to receive the message
*/
public void dispatchOutbound(I2NPMessage msg, TunnelId outboundTunnel, Hash targetPeer) {
dispatchOutbound(msg, outboundTunnel, null, targetPeer);
}
/**
* We are the outbound tunnel gateway (we created it), so wrap up this message
* with instructions to be forwarded to the targetTunnel on the targetPeer when
* it reaches the endpoint.
*
* @param msg raw message to deliver to the targetTunnel on the targetPeer
* @param outboundTunnel tunnel to send the message out
* @param targetTunnel tunnel on the targetPeer to deliver the message to
* @param targetPeer gateway to the tunnel to receive the message
*/
public void dispatchOutbound(I2NPMessage msg, TunnelId outboundTunnel, TunnelId targetTunnel, Hash targetPeer) {
if (outboundTunnel == null) throw new IllegalArgumentException("wtf, null outbound tunnel?");
long before = _context.clock().now();
TunnelGateway gw = null;
synchronized (_outboundGateways) {
gw = (TunnelGateway)_outboundGateways.get(outboundTunnel);
}
if (gw != null) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("dispatch outbound through " + outboundTunnel.getTunnelId()
+ ": " + msg);
if (msg.getMessageExpiration() < before) {
if (_log.shouldLog(Log.ERROR))
_log.error("why are you sending a tunnel message that expired "
+ (before-msg.getMessageExpiration()) + "ms ago? "
+ msg, new Exception("cause"));
return;
}
gw.add(msg, targetPeer, targetTunnel);
if (targetTunnel == null)
_context.statManager().addRateData("tunnel.dispatchOutboundPeer", 1, 0);
else
_context.statManager().addRateData("tunnel.dispatchOutboundTunnel", 1, 0);
} else {
_context.messageHistory().droppedTunnelGatewayMessageUnknown(msg.getUniqueId(), outboundTunnel.getTunnelId());
if (_log.shouldLog(Log.ERROR))
_log.error("no matching outbound tunnel for id=" + outboundTunnel
+ ": existing = " + _outboundGateways.keySet());
}
long dispatchTime = _context.clock().now() - before;
if (gw instanceof TunnelGatewayZeroHop)
_context.statManager().addRateData("tunnel.dispatchOutboundZeroHopTime", dispatchTime, dispatchTime);
else
_context.statManager().addRateData("tunnel.dispatchOutboundTime", dispatchTime, dispatchTime);
}
public List listParticipatingTunnels() {
synchronized (_participatingConfig) {
return new ArrayList(_participatingConfig.values());
}
}
public void startup() {
// NB: 256 == assume max rate (size adjusted to handle 256 messages per second)
_validator = new BloomFilterIVValidator(_context, 256);
}
public void shutdown() {
if (_validator != null)
_validator.destroy();
_validator = null;
}
public void restart() {
shutdown();
startup();
}
public void renderStatusHTML(Writer out) throws IOException {}
private class LeaveTunnel extends JobImpl {
private HopConfig _config;
public LeaveTunnel(RouterContext ctx, HopConfig config) {
super(ctx);
_config = config;
getTiming().setStartAfter(config.getExpiration() + 2*Router.CLOCK_FUDGE_FACTOR);
}
public String getName() { return "Leave participant"; }
public void runJob() {
remove(_config);
}
}
}

View File

@ -0,0 +1,70 @@
package net.i2p.router.tunnel;
import java.util.ArrayList;
import java.util.List;
import net.i2p.data.Hash;
import net.i2p.data.TunnelId;
import net.i2p.data.i2np.I2NPMessage;
import net.i2p.data.i2np.TunnelGatewayMessage;
import net.i2p.router.RouterContext;
import net.i2p.util.Log;
import net.i2p.util.SimpleTimer;
/**
* Serve as the gatekeeper for a tunnel with no hops.
*
*/
public class TunnelGatewayZeroHop extends TunnelGateway {
private RouterContext _context;
private Log _log;
private TunnelCreatorConfig _config;
private OutboundMessageDistributor _outDistributor;
private InboundMessageDistributor _inDistributor;
/**
*
*/
public TunnelGatewayZeroHop(RouterContext context, TunnelCreatorConfig config) {
super(context, null, null, null);
_context = context;
_log = context.logManager().getLog(TunnelGatewayZeroHop.class);
_config = config;
if (config.isInbound())
_inDistributor = new InboundMessageDistributor(_context, config.getDestination());
else
_outDistributor = new OutboundMessageDistributor(context);
}
/**
* Add a message to be sent down the tunnel, where we are the inbound gateway.
*
* @param msg message received to be sent through the tunnel
*/
public void add(TunnelGatewayMessage msg) {
add(msg.getMessage(), null, null);
}
/**
* Add a message to be sent down the tunnel (immediately forwarding it to the
* {@link InboundMessageDistributor} or {@link OutboundMessageDistributor}, as
* necessary).
*
* @param msg message to be sent through the tunnel
* @param toRouter router to send to after the endpoint (or null for endpoint processing)
* @param toTunnel tunnel to send to after the endpoint (or null for endpoint or router processing)
*/
public void add(I2NPMessage msg, Hash toRouter, TunnelId toTunnel) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("zero hop gateway: distribute " + (_config.isInbound() ? "inbound " : " outbound ")
+ " to " + (toRouter != null ? toRouter.toBase64().substring(0,4) : "" )
+ "." + (toTunnel != null ? toTunnel.getTunnelId() + "" : "")
+ ": " + msg);
if (_config.isInbound()) {
_inDistributor.distribute(msg, toRouter, toTunnel);
} else {
_outDistributor.distribute(msg, toRouter, toTunnel);
}
_config.incrementProcessedMessages();
}
}

View File

@ -0,0 +1,157 @@
package net.i2p.router.tunnel;
import java.util.Date;
import net.i2p.data.Hash;
import net.i2p.data.RouterInfo;
import net.i2p.data.TunnelId;
import net.i2p.data.i2np.I2NPMessage;
import net.i2p.data.i2np.TunnelDataMessage;
import net.i2p.router.JobImpl;
import net.i2p.router.OutNetMessage;
import net.i2p.router.RouterContext;
import net.i2p.util.Log;
/**
* Participate in a tunnel at a location other than the gateway or outbound
* endpoint. This participant should be provided with the necessary processor
* if it is an inbound tunnel endpoint, and that will enable the
* InboundMessageDistributor to receive defragmented and decrypted messages,
* which it will then selectively forward.
*/
public class TunnelParticipant {
private RouterContext _context;
private Log _log;
private HopConfig _config;
private HopProcessor _processor;
private InboundEndpointProcessor _inboundEndpointProcessor;
private InboundMessageDistributor _inboundDistributor;
private FragmentHandler _handler;
public TunnelParticipant(RouterContext ctx, HopConfig config, HopProcessor processor) {
this(ctx, config, processor, null);
}
public TunnelParticipant(RouterContext ctx, InboundEndpointProcessor inEndProc) {
this(ctx, null, null, inEndProc);
}
private TunnelParticipant(RouterContext ctx, HopConfig config, HopProcessor processor, InboundEndpointProcessor inEndProc) {
_context = ctx;
_log = ctx.logManager().getLog(TunnelParticipant.class);
_config = config;
_processor = processor;
if ( (config == null) || (config.getSendTo() == null) )
_handler = new RouterFragmentHandler(ctx, new DefragmentedHandler());
_inboundEndpointProcessor = inEndProc;
if (inEndProc != null)
_inboundDistributor = new InboundMessageDistributor(ctx, inEndProc.getDestination());
}
public void dispatch(TunnelDataMessage msg, Hash recvFrom) {
boolean ok = false;
if (_processor != null)
ok = _processor.process(msg.getData(), 0, msg.getData().length, recvFrom);
else if (_inboundEndpointProcessor != null)
ok = _inboundEndpointProcessor.retrievePreprocessedData(msg.getData(), 0, msg.getData().length, recvFrom);
if (!ok) {
if (_log.shouldLog(Log.ERROR))
_log.error("Failed to dispatch " + msg + ": processor=" + _processor
+ " inboundEndpoint=" + _inboundEndpointProcessor);
return;
}
if ( (_config != null) && (_config.getSendTo() != null) ) {
_config.incrementProcessedMessages();
RouterInfo ri = _context.netDb().lookupRouterInfoLocally(_config.getSendTo());
if (ri != null) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Send off to nextHop directly (" + _config.getSendTo().toBase64().substring(0,4)
+ " for " + msg);
send(_config, msg, ri);
} else {
if (_log.shouldLog(Log.WARN))
_log.warn("Lookup the nextHop (" + _config.getSendTo().toBase64().substring(0,4)
+ " for " + msg);
_context.netDb().lookupRouterInfo(_config.getSendTo(), new SendJob(msg), new TimeoutJob(msg), 10*1000);
}
} else {
_inboundEndpointProcessor.getConfig().incrementProcessedMessages();
if (_log.shouldLog(Log.DEBUG))
_log.debug("Receive fragment: on " + _config + ": " + msg);
_handler.receiveTunnelMessage(msg.getData(), 0, msg.getData().length);
}
}
private class DefragmentedHandler implements FragmentHandler.DefragmentedReceiver {
public void receiveComplete(I2NPMessage msg, Hash toRouter, TunnelId toTunnel) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Receive complete: on " + _config + ": " + msg);
_inboundDistributor.distribute(msg, toRouter, toTunnel);
}
}
private void send(HopConfig config, TunnelDataMessage msg, RouterInfo ri) {
msg.setUniqueId(_context.random().nextLong(I2NPMessage.MAX_ID_VALUE));
msg.setMessageExpiration(_context.clock().now() + 10*1000);
OutNetMessage m = new OutNetMessage(_context);
msg.setTunnelId(config.getSendTunnel());
m.setMessage(msg);
m.setExpiration(msg.getMessageExpiration());
m.setTarget(ri);
m.setPriority(400);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Forward on from " + _config + ": " + msg);
_context.outNetMessagePool().add(m);
}
private class SendJob extends JobImpl {
private TunnelDataMessage _msg;
public SendJob(TunnelDataMessage msg) {
super(_context);
_msg = msg;
}
public String getName() { return "forward a tunnel message"; }
public void runJob() {
RouterInfo ri = _context.netDb().lookupRouterInfoLocally(_config.getSendTo());
if (ri != null) {
send(_config, _msg, ri);
} else {
if (_log.shouldLog(Log.ERROR))
_log.error("Lookup the nextHop (" + _config.getSendTo().toBase64().substring(0,4)
+ " failed! where do we go for " + _config + "? msg dropped: " + _msg);
}
}
}
private class TimeoutJob extends JobImpl {
private TunnelDataMessage _msg;
public TimeoutJob(TunnelDataMessage msg) {
super(_context);
_msg = msg;
}
public String getName() { return "timeout looking for next hop info"; }
public void runJob() {
RouterInfo ri = _context.netDb().lookupRouterInfoLocally(_config.getSendTo());
if (ri != null) {
if (_log.shouldLog(Log.ERROR))
_log.error("Lookup the nextHop (" + _config.getSendTo().toBase64().substring(0,4)
+ " failed, but we found it!! where do we go for " + _config + "? msg dropped: " + _msg);
} else {
if (_log.shouldLog(Log.ERROR))
_log.error("Lookup the nextHop (" + _config.getSendTo().toBase64().substring(0,4)
+ " failed! where do we go for " + _config + "? msg dropped: " + _msg);
}
}
}
public String toString() {
if (_config != null) {
StringBuffer buf = new StringBuffer(64);
buf.append("participant at ").append(_config.toString());
return buf.toString();
} else {
return "inbound endpoint";
}
}
}

View File

@ -0,0 +1,32 @@
package net.i2p.router.tunnel.pool;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import net.i2p.router.RouterContext;
import net.i2p.router.TunnelPoolSettings;
/**
* Pick peers randomly out of the fast pool, and put them into tunnels in a
* random order
*
*/
class ClientPeerSelector extends TunnelPeerSelector {
public List selectPeers(RouterContext ctx, TunnelPoolSettings settings) {
int length = getLength(ctx, settings);
if (length < 0)
return null;
HashSet matches = new HashSet(length);
ctx.profileOrganizer().selectFastPeers(length, null, matches);
matches.remove(ctx.routerHash());
ArrayList rv = new ArrayList(matches);
Collections.shuffle(rv, ctx.random());
if (settings.isInbound())
rv.add(0, ctx.routerHash());
else
rv.add(ctx.routerHash());
return rv;
}
}

View File

@ -0,0 +1,49 @@
package net.i2p.router.tunnel.pool;
import net.i2p.router.JobImpl;
import net.i2p.router.Router;
import net.i2p.router.RouterContext;
import net.i2p.router.tunnel.TunnelCreatorConfig;
class ExpireJob extends JobImpl {
private TunnelPool _pool;
private TunnelCreatorConfig _cfg;
private Object _buildToken;
private boolean _leaseUpdated;
public ExpireJob(RouterContext ctx, TunnelCreatorConfig cfg, TunnelPool pool, Object buildToken) {
super(ctx);
_pool = pool;
_cfg = cfg;
_buildToken = buildToken;
_leaseUpdated = false;
// give 'em some extra time before dropping 'em
getTiming().setStartAfter(cfg.getExpiration()); // + Router.CLOCK_FUDGE_FACTOR);
}
public String getName() {
if (_pool.getSettings().isExploratory()) {
if (_pool.getSettings().isInbound()) {
return "Expire exploratory inbound tunnel";
} else {
return "Expire exploratory outbound tunnel";
}
} else {
if (_pool.getSettings().isInbound()) {
return "Expire client inbound tunnel";
} else {
return "Expire client outbound tunnel";
}
}
}
public void runJob() {
if (!_leaseUpdated) {
_pool.removeTunnel(_cfg);
_leaseUpdated = true;
_pool.refreshLeaseSet();
requeue(Router.CLOCK_FUDGE_FACTOR);
} else {
// already removed/refreshed, but now lets make it
// so we dont even honor the tunnel anymore
getContext().tunnelDispatcher().remove(_cfg);
}
}
}

View File

@ -0,0 +1,32 @@
package net.i2p.router.tunnel.pool;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import net.i2p.router.RouterContext;
import net.i2p.router.TunnelPoolSettings;
/**
* Pick peers randomly out of the not-failing pool, and put them into randomly
* ordered tunnels.
*
*/
class ExploratoryPeerSelector extends TunnelPeerSelector {
public List selectPeers(RouterContext ctx, TunnelPoolSettings settings) {
int length = getLength(ctx, settings);
if (length < 0)
return null;
HashSet matches = new HashSet(length);
ctx.profileOrganizer().selectNotFailingPeers(length, null, matches);
matches.remove(ctx.routerHash());
ArrayList rv = new ArrayList(matches);
Collections.shuffle(rv, ctx.random());
if (settings.isInbound())
rv.add(0, ctx.routerHash());
else
rv.add(ctx.routerHash());
return rv;
}
}

View File

@ -0,0 +1,164 @@
package net.i2p.router.tunnel.pool;
import net.i2p.data.Certificate;
import net.i2p.data.DataHelper;
import net.i2p.data.Hash;
import net.i2p.data.RouterIdentity;
import net.i2p.data.TunnelId;
import net.i2p.data.i2np.DeliveryInstructions;
import net.i2p.data.i2np.GarlicMessage;
import net.i2p.data.i2np.I2NPMessage;
import net.i2p.data.i2np.TunnelCreateMessage;
import net.i2p.data.i2np.TunnelCreateStatusMessage;
import net.i2p.data.i2np.TunnelGatewayMessage;
import net.i2p.router.HandlerJobBuilder;
import net.i2p.router.Job;
import net.i2p.router.JobImpl;
import net.i2p.router.RouterContext;
import net.i2p.router.message.GarlicMessageBuilder;
import net.i2p.router.message.PayloadGarlicConfig;
import net.i2p.router.message.SendMessageDirectJob;
import net.i2p.router.tunnel.HopConfig;
import net.i2p.util.Log;
/**
* Receive a request to join a tunnel, and if we aren't overloaded (per the
* throttle), join it (updating the tunnelDispatcher), then send back the
* agreement. Even if we are overloaded, send back a reply stating how
* overloaded we are.
*
*/
public class HandleTunnelCreateMessageJob extends JobImpl {
private Log _log;
private TunnelCreateMessage _request;
/** job builder to redirect all tunnelCreateMessages through this job type */
static class Builder implements HandlerJobBuilder {
private RouterContext _ctx;
public Builder(RouterContext ctx) { _ctx = ctx; }
public Job createJob(I2NPMessage receivedMessage, RouterIdentity from, Hash fromHash) {
return new HandleTunnelCreateMessageJob(_ctx, (TunnelCreateMessage)receivedMessage);
}
}
public HandleTunnelCreateMessageJob(RouterContext ctx, TunnelCreateMessage msg) {
super(ctx);
_log = ctx.logManager().getLog(HandleTunnelCreateMessageJob.class);
_request = msg;
}
public String getName() { return "Handle tunnel join request"; }
public void runJob() {
if (_log.shouldLog(Log.DEBUG))
_log.debug("handle join request: " + _request);
int status = shouldAccept();
if (status > 0) {
if (_log.shouldLog(Log.WARN))
_log.warn("reject(" + status + ") join request: " + _request);
sendRejection(status);
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("accept join request: " + _request);
accept();
}
}
private int shouldAccept() { return getContext().throttle().acceptTunnelRequest(_request); }
private void accept() {
byte recvId[] = new byte[4];
getContext().random().nextBytes(recvId);
HopConfig cfg = new HopConfig();
long expiration = _request.getDurationSeconds()*1000 + getContext().clock().now();
cfg.setExpiration(expiration);
cfg.setIVKey(_request.getIVKey());
cfg.setLayerKey(_request.getLayerKey());
cfg.setOptions(_request.getOptions());
cfg.setReceiveTunnelId(recvId);
if (_request.getIsGateway()) {
if (_log.shouldLog(Log.INFO))
_log.info("join as inbound tunnel gateway pointing at "
+ _request.getNextRouter().toBase64().substring(0,4) + ":"
+ _request.getNextTunnelId().getTunnelId()
+ " (nonce=" + _request.getNonce() + ")");
// serve as the inbound tunnel gateway
cfg.setSendTo(_request.getNextRouter());
cfg.setSendTunnelId(DataHelper.toLong(4, _request.getNextTunnelId().getTunnelId()));
getContext().tunnelDispatcher().joinInboundGateway(cfg);
} else if (_request.getNextRouter() == null) {
if (_log.shouldLog(Log.INFO))
_log.info("join as outbound tunnel endpoint (nonce=" + _request.getNonce() + ")");
// serve as the outbound tunnel endpoint
getContext().tunnelDispatcher().joinOutboundEndpoint(cfg);
} else {
if (_log.shouldLog(Log.INFO))
_log.info("join as tunnel participant pointing at "
+ _request.getNextRouter().toBase64().substring(0,4) + ":"
+ _request.getNextTunnelId().getTunnelId()
+ " (nonce=" + _request.getNonce() + ")");
// serve as a general participant
cfg.setSendTo(_request.getNextRouter());
cfg.setSendTunnelId(DataHelper.toLong(4, _request.getNextTunnelId().getTunnelId()));
getContext().tunnelDispatcher().joinParticipant(cfg);
}
sendAcceptance(recvId);
}
private static final byte[] REJECTION_TUNNEL_ID = new byte[] { (byte)0xFF, (byte)0xFF, (byte)0xFF, (byte)0xFF };
private static final int REPLY_TIMEOUT = 30*1000;
private static final int REPLY_PRIORITY = 500;
private void sendAcceptance(byte recvId[]) {
sendReply(recvId, TunnelCreateStatusMessage.STATUS_SUCCESS);
}
private void sendRejection(int severity) {
sendReply(REJECTION_TUNNEL_ID, severity);
}
private void sendReply(byte recvId[], int status) {
TunnelCreateStatusMessage reply = new TunnelCreateStatusMessage(getContext());
reply.setNonce(_request.getNonce());
reply.setReceiveTunnelId(new TunnelId(DataHelper.fromLong(recvId, 0, 4)));
reply.setStatus(status);
GarlicMessage msg = createReply(reply);
if (msg == null)
throw new RuntimeException("wtf, couldn't create reply? to " + _request);
TunnelGatewayMessage gw = new TunnelGatewayMessage(getContext());
gw.setMessage(msg);
gw.setTunnelId(_request.getReplyTunnel());
gw.setMessageExpiration(msg.getMessageExpiration());
if (_log.shouldLog(Log.DEBUG))
_log.debug("sending (" + status + ") to the tunnel "
+ _request.getReplyGateway().toBase64().substring(0,4) + ":"
+ _request.getReplyTunnel() + " wrt " + _request);
getContext().jobQueue().addJob(new SendMessageDirectJob(getContext(), gw, _request.getReplyGateway(),
REPLY_TIMEOUT, REPLY_PRIORITY));
}
private GarlicMessage createReply(TunnelCreateStatusMessage reply) {
DeliveryInstructions instructions = new DeliveryInstructions();
instructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_LOCAL);
PayloadGarlicConfig cfg = new PayloadGarlicConfig();
cfg.setPayload(reply);
cfg.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null));
cfg.setDeliveryInstructions(instructions);
cfg.setRequestAck(false);
cfg.setExpiration(getContext().clock().now() + REPLY_TIMEOUT);
cfg.setId(getContext().random().nextLong(I2NPMessage.MAX_ID_VALUE));
GarlicMessage msg = GarlicMessageBuilder.buildMessage(getContext(), cfg,
null, // we dont care about the tags
null, // or keys sent
null, // and we don't know what public key to use
_request.getReplyKey(), _request.getReplyTag());
return msg;
}
}

View File

@ -0,0 +1,50 @@
package net.i2p.router.tunnel.pool;
import net.i2p.router.JobImpl;
import net.i2p.router.RouterContext;
import net.i2p.router.tunnel.TunnelCreatorConfig;
import net.i2p.util.Log;
/**
* The tunnel is fully built, so now add it to our handler, to the pool, and
* build the necessary test and rebuilding jobs.
*
*/
class OnCreatedJob extends JobImpl {
private Log _log;
private TunnelPool _pool;
private Object _buildToken;
private PooledTunnelCreatorConfig _cfg;
private boolean _fake;
public OnCreatedJob(RouterContext ctx, TunnelPool pool, PooledTunnelCreatorConfig cfg, boolean fake, Object buildToken) {
super(ctx);
_log = ctx.logManager().getLog(OnCreatedJob.class);
_pool = pool;
_cfg = cfg;
_fake = fake;
_buildToken = buildToken;
}
public String getName() { return "Tunnel built"; }
public void runJob() {
_log.debug("Created successfully: " + _cfg);
if (_cfg.isInbound()) {
getContext().tunnelDispatcher().joinInbound(_cfg);
} else {
getContext().tunnelDispatcher().joinOutbound(_cfg);
}
_pool.addTunnel(_cfg);
TestJob testJob = (_cfg.getLength() > 1 ? new TestJob(getContext(), _cfg, _pool, _buildToken) : null);
RebuildJob rebuildJob = (_fake ? null : new RebuildJob(getContext(), _cfg, _pool, _buildToken));
ExpireJob expireJob = new ExpireJob(getContext(), _cfg, _pool, _buildToken);
_cfg.setTunnelPool(_pool);
_cfg.setTestJob(testJob);
_cfg.setRebuildJob(rebuildJob);
_cfg.setExpireJob(expireJob);
if (_cfg.getLength() > 1) // no need to test 0 hop tunnels
getContext().jobQueue().addJob(testJob);
if (!_fake) // if we built a 0 hop tunnel in response to a failure, don't rebuild
getContext().jobQueue().addJob(rebuildJob);
getContext().jobQueue().addJob(expireJob);
}
}

View File

@ -0,0 +1,61 @@
package net.i2p.router.tunnel.pool;
import net.i2p.data.Hash;
import net.i2p.router.RouterContext;
import net.i2p.router.tunnel.TunnelCreatorConfig;
/**
*
*/
public class PooledTunnelCreatorConfig extends TunnelCreatorConfig {
private RouterContext _context;
private TunnelPool _pool;
private boolean _failed;
private TestJob _testJob;
private RebuildJob _rebuildJob;
private ExpireJob _expireJob;
/** Creates a new instance of PooledTunnelCreatorConfig */
public PooledTunnelCreatorConfig(RouterContext ctx, int length, boolean isInbound) {
this(ctx, length, isInbound, null);
}
public PooledTunnelCreatorConfig(RouterContext ctx, int length, boolean isInbound, Hash destination) {
super(length, isInbound, destination);
_context = ctx;
_failed = false;
_pool = null;
}
public void testSuccessful(int ms) {
if (_testJob != null) {
_testJob.testSuccessful(ms);
}
}
/**
* The tunnel failed, so stop using it
*/
public void tunnelFailed() {
_failed = true;
// remove us from the pool (but not the dispatcher) so that we aren't
// selected again. _expireJob is left to do its thing, in case there
// are any straggling messages coming down the tunnel
_pool.tunnelFailed(this);
if (_rebuildJob != null) {
// rebuild asap (_rebuildJob will be null if we were just a stopgap)
_rebuildJob.getTiming().setStartAfter(_context.clock().now() + 10*1000);
_context.jobQueue().addJob(_rebuildJob);
}
if (_testJob != null) // just in case...
_context.jobQueue().removeJob(_testJob);
}
public boolean getTunnelFailed() { return _failed; }
public void setTunnelPool(TunnelPool pool) { _pool = pool; }
public TunnelPool getTunnelPool() { return _pool; }
public void setTestJob(TestJob job) { _testJob = job; }
public void setRebuildJob(RebuildJob job) { _rebuildJob = job; }
public void setExpireJob(ExpireJob job) { _expireJob = job; }
}

View File

@ -0,0 +1,30 @@
package net.i2p.router.tunnel.pool;
import net.i2p.router.JobImpl;
import net.i2p.router.RouterContext;
import net.i2p.router.tunnel.TunnelCreatorConfig;
/**
* Build a new tunnel to replace the existing one before it expires. This job
* should be removed (or scheduled to run immediately) if the tunnel fails.
*
*/
class RebuildJob extends JobImpl {
private TunnelPool _pool;
private Object _buildToken;
private TunnelCreatorConfig _cfg;
public RebuildJob(RouterContext ctx, TunnelCreatorConfig cfg, TunnelPool pool, Object buildToken) {
super(ctx);
_pool = pool;
_cfg = cfg;
_buildToken = buildToken;
long rebuildOn = cfg.getExpiration() - pool.getSettings().getRebuildPeriod();
rebuildOn -= ctx.random().nextInt(pool.getSettings().getRebuildPeriod());
getTiming().setStartAfter(rebuildOn);
}
public String getName() { return "Rebuild tunnel"; }
public void runJob() {
_pool.getBuilder().buildTunnel(getContext(), _pool, _buildToken);
}
}

View File

@ -0,0 +1,360 @@
package net.i2p.router.tunnel.pool;
import java.util.HashSet;
import java.util.Set;
import net.i2p.data.Certificate;
import net.i2p.data.DataHelper;
import net.i2p.data.Hash;
import net.i2p.data.PublicKey;
import net.i2p.data.RouterInfo;
import net.i2p.data.SessionKey;
import net.i2p.data.SessionTag;
import net.i2p.data.TunnelId;
import net.i2p.data.i2np.I2NPMessage;
import net.i2p.data.i2np.TunnelCreateMessage;
import net.i2p.data.i2np.TunnelCreateStatusMessage;
import net.i2p.router.Job;
import net.i2p.router.JobImpl;
import net.i2p.router.MessageSelector;
import net.i2p.router.RouterContext;
import net.i2p.router.ReplyJob;
import net.i2p.router.TunnelInfo;
import net.i2p.router.tunnel.TunnelCreatorConfig;
import net.i2p.router.tunnel.HopConfig;
import net.i2p.router.peermanager.TunnelHistory;
import net.i2p.util.Log;
/**
* queue up a job to request the endpoint to join the tunnel, which then
* requeues up another job for earlier hops, etc, until it reaches the
* gateway. after the gateway is confirmed, onCreated is fired.
*
*/
public class RequestTunnelJob extends JobImpl {
private Log _log;
private Job _onCreated;
private Job _onFailed;
private int _currentHop;
private RouterInfo _currentPeer;
private HopConfig _currentConfig;
private int _lookups;
private TunnelCreatorConfig _config;
private long _lastSendTime;
private boolean _isFake;
static final int HOP_REQUEST_TIMEOUT = 30*1000;
private static final int LOOKUP_TIMEOUT = 10*1000;
public RequestTunnelJob(RouterContext ctx, TunnelCreatorConfig cfg, Job onCreated, Job onFailed, int hop, boolean isFake) {
super(ctx);
_log = ctx.logManager().getLog(RequestTunnelJob.class);
_config = cfg;
_onCreated = onCreated;
_onFailed = onFailed;
_currentHop = hop;
_currentPeer = null;
_lookups = 0;
_lastSendTime = 0;
_isFake = isFake;
ctx.statManager().createRateStat("tunnel.receiveRejectionProbabalistic", "How often we are rejected probabalistically?", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("tunnel.receiveRejectionTransient", "How often we are rejected due to transient overload?", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("tunnel.receiveRejectionBandwidth", "How often we are rejected due to bandwidth overload?", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("tunnel.receiveRejectionCritical", "How often we are rejected due to critical failure?", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("tunnel.buildFailure", "How often we fail to build a tunnel?", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("tunnel.buildSuccess", "How often we succeed building a tunnel?", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
if (_log.shouldLog(Log.DEBUG))
_log.debug("Requesting hop " + hop + " in " + cfg);
if (hop < 0)
throw new IllegalArgumentException("invalid endpoint hop [" + hop + "] cfg: " + cfg);
}
public String getName() { return "Request tunnel participation"; }
public void runJob() {
_currentConfig = _config.getConfig(_currentHop);
Hash peer = _config.getPeer(_currentHop);
if (getContext().routerHash().equals(peer)) {
requestSelf();
} else {
if (_currentPeer == null) {
_currentPeer = getContext().netDb().lookupRouterInfoLocally(peer);
if (_currentPeer == null) {
_lookups++;
if (_lookups > 1) {
peerFail(0);
return;
}
getContext().netDb().lookupRouterInfo(peer, this, this, LOOKUP_TIMEOUT);
return;
}
}
requestRemote(peer);
}
}
private void requestSelf() {
if (_config.isInbound()) {
// inbound tunnel, which means we are the first person asked, and if
// it is a zero hop tunnel, then we are also the last person asked
long id = getContext().random().nextLong(TunnelId.MAX_ID_VALUE);
_currentConfig.setReceiveTunnelId(DataHelper.toLong(4, id));
if (_config.getLength() > 1) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Requesting ourselves to join an inbound tunnel, receiving on "
+ _currentConfig.getReceiveTunnel() + ": " + _config);
// inbound tunnel with more than just ourselves
RequestTunnelJob req = new RequestTunnelJob(getContext(), _config, _onCreated,
_onFailed, _currentHop - 1, _isFake);
if (_isFake)
req.runJob();
else
getContext().jobQueue().addJob(req);
} else if (_onCreated != null) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Requesting ourselves to join a 0 hop inbound tunnel, receiving on "
+ _currentConfig.getReceiveTunnel() + ": " + _config);
// 0 hop inbound tunnel
if (_onCreated != null) {
if (_isFake)
_onCreated.runJob();
else
getContext().jobQueue().addJob(_onCreated);
}
getContext().statManager().addRateData("tunnel.buildSuccess", 1, 0);
}
} else {
// outbound tunnel, we're the gateway and hence the last person asked
if (_config.getLength() <= 1) {
// pick a random tunnelId which we "send" on
byte id[] = new byte[4];
getContext().random().nextBytes(id);
_config.getConfig(0).setSendTunnelId(id);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Requesting ourselves to join an outbound tunnel, sending on "
+ _config.getConfig(0).getSendTunnel() + ": " + _config);
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Requesting ourselves to join an outbound tunnel, sending on "
+ _config.getConfig(1).getReceiveTunnel() + ": " + _config);
// send to whatever the first remote hop receives on
_config.getConfig(0).setSendTunnelId(_config.getConfig(1).getReceiveTunnelId());
if (_config.getConfig(0).getSendTunnelId() == null) {
_log.error("wtf, next hop: " + _config.getConfig(1)
+ " didn't give us a tunnel to send to, but they passed on to us?");
if (_onFailed != null) {
if (_isFake)
_onFailed.runJob();
else
getContext().jobQueue().addJob(_onFailed);
}
return;
}
}
// we are the outbound gateway, which is the last hop which is
// asked to participate in the tunnel. as such, fire off the
// onCreated immediately
if (_onCreated != null) {
if (_isFake)
_onCreated.runJob();
else
getContext().jobQueue().addJob(_onCreated);
getContext().statManager().addRateData("tunnel.buildSuccess", 1, 0);
}
}
}
private void requestRemote(Hash peer) {
HopConfig nextHop = (_config.getLength() > _currentHop + 1 ? _config.getConfig(_currentHop+1) : null);
Hash nextRouter = (nextHop != null ? _config.getPeer(_currentHop+1) : null);
TunnelId nextTunnel = (nextHop != null ? nextHop.getReceiveTunnel() : null);
TunnelInfo replyTunnel = getContext().tunnelManager().selectInboundTunnel();
if (replyTunnel == null) {
if (_log.shouldLog(Log.ERROR))
_log.error("No inbound tunnels to build tunnels with!");
tunnelFail();
}
Hash replyGateway = replyTunnel.getPeer(0);
SessionKey replyKey = getContext().keyGenerator().generateSessionKey();
SessionTag replyTag = new SessionTag(true);
TunnelCreateMessage msg = new TunnelCreateMessage(getContext());
msg.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null));
msg.setIVKey(_currentConfig.getIVKey());
msg.setLayerKey(_currentConfig.getLayerKey());
msg.setNonce(getContext().random().nextLong(TunnelCreateMessage.MAX_NONCE_VALUE));
msg.setNextRouter(nextRouter);
msg.setNextTunnelId(nextTunnel);
msg.setReplyGateway(replyGateway);
msg.setReplyTunnel(replyTunnel.getReceiveTunnelId(0));
msg.setReplyKey(replyKey);
msg.setReplyTag(replyTag);
int duration = 10*60; // (int)((_config.getExpiration() - getContext().clock().now())/1000);
msg.setDurationSeconds(duration);
if (_currentHop == 0)
msg.setIsGateway(true);
else
msg.setIsGateway(false);
if (_log.shouldLog(Log.DEBUG))
_log.debug("** Send remote request to " + peer.toBase64().substring(0,4) + " using nonce "
+ msg.getNonce() + " with replies on " + replyTunnel);
// now make sure we will decrypt the reply properly
HashSet sessionTags = new HashSet(1);
sessionTags.add(replyTag);
getContext().sessionKeyManager().tagsReceived(replyKey, sessionTags);
HashSet sentTags = new HashSet();
SessionKey sentKey = new SessionKey();
ReplySelector selector = new ReplySelector(msg.getNonce());
ReplyJob onReply = new RequestReplyJob(getContext(), sentKey, sentTags);
Job onTimeout = new RequestTimeoutJob(getContext(), msg.getNonce());
Job j = new SendGarlicMessageJob(getContext(), msg, _currentPeer, selector, onReply, onTimeout, sentKey, sentTags);
getContext().jobQueue().addJob(j);
_lastSendTime = getContext().clock().now();
}
private void peerFail(int howBad) {
if (howBad > 0) {
switch (howBad) {
case TunnelHistory.TUNNEL_REJECT_CRIT:
getContext().statManager().addRateData("tunnel.receiveRejectionCritical", 1, 0);
break;
case TunnelHistory.TUNNEL_REJECT_BANDWIDTH:
getContext().statManager().addRateData("tunnel.receiveRejectionBandwidth", 1, 0);
break;
case TunnelHistory.TUNNEL_REJECT_TRANSIENT_OVERLOAD:
getContext().statManager().addRateData("tunnel.receiveRejectionTransient", 1, 0);
break;
case TunnelHistory.TUNNEL_REJECT_PROBABALISTIC_REJECT:
getContext().statManager().addRateData("tunnel.receiveRejectionProbabalistic", 1, 0);
break;
default:
// ignore
}
// penalize peer based on their bitchiness level
getContext().profileManager().tunnelRejected(_currentPeer.getIdentity().calculateHash(),
getContext().clock().now() - _lastSendTime,
howBad);
}
if (_log.shouldLog(Log.INFO))
_log.info("Tunnel request failed w/ cause=" + howBad + " for peer "
+ _currentPeer.getIdentity().calculateHash().toBase64().substring(0,4));
tunnelFail();
}
private void tunnelFail() {
if (_log.shouldLog(Log.INFO))
_log.info("tunnel building failed: " + _config + " at hop " + _currentHop);
if (_onFailed != null)
getContext().jobQueue().addJob(_onFailed);
getContext().statManager().addRateData("tunnel.buildFailure", 1, 0);
}
private void peerSuccess() {
getContext().profileManager().tunnelJoined(_currentPeer.getIdentity().calculateHash(),
getContext().clock().now() - _lastSendTime);
if (_currentHop > 0) {
RequestTunnelJob j = new RequestTunnelJob(getContext(), _config, _onCreated, _onFailed, _currentHop - 1, _isFake);
getContext().jobQueue().addJob(j);
} else {
if (_onCreated != null)
getContext().jobQueue().addJob(_onCreated);
getContext().statManager().addRateData("tunnel.buildSuccess", 1, 0);
}
}
private class RequestReplyJob extends JobImpl implements ReplyJob {
private SessionKey _sentKey;
private Set _sentTags;
private TunnelCreateStatusMessage _reply;
public RequestReplyJob(RouterContext ctx, SessionKey sentKey, Set sentTags) {
super(ctx);
_sentKey = sentKey;
_sentTags = sentTags;
}
public String getName() { return "handle tunnel request reply"; }
public void runJob() {
if (_log.shouldLog(Log.DEBUG))
_log.debug("reply received: " + _config + " at hop " + _currentHop + ": " + _reply.getStatus());
if (_sentTags.size() > 0) {
PublicKey target = _currentPeer.getIdentity().getPublicKey();
getContext().sessionKeyManager().tagsDelivered(target, _sentKey, _sentTags);
}
if (_reply.getStatus() == TunnelCreateStatusMessage.STATUS_SUCCESS) {
_currentConfig.setReceiveTunnelId(_reply.getReceiveTunnelId());
if (_currentHop >= 1)
_config.getConfig(_currentHop-1).setSendTunnelId(_currentConfig.getReceiveTunnelId());
peerSuccess();
} else {
peerFail(_reply.getStatus());
}
}
public void setMessage(I2NPMessage message) { _reply = (TunnelCreateStatusMessage)message; }
}
private class RequestTimeoutJob extends JobImpl {
private long _nonce;
public RequestTimeoutJob(RouterContext ctx, long nonce) {
super(ctx);
_nonce = nonce;
}
public String getName() { return "tunnel request timeout"; }
public void runJob() {
if (_log.shouldLog(Log.WARN))
_log.warn("request timeout: " + _config + " at hop " + _currentHop
+ " with nonce " + _nonce);
peerFail(0);
}
}
private class ReplySelector implements MessageSelector {
private long _nonce;
private boolean _nonceFound;
private long _expiration;
public ReplySelector(long nonce) {
_nonce = nonce;
_nonceFound = false;
_expiration = getContext().clock().now() + HOP_REQUEST_TIMEOUT;
}
public boolean continueMatching() {
return (!_nonceFound) && (getContext().clock().now() < _expiration);
}
public long getExpiration() { return _expiration; }
public boolean isMatch(I2NPMessage message) {
if (message instanceof TunnelCreateStatusMessage) {
if (_nonce == ((TunnelCreateStatusMessage)message).getNonce()) {
_nonceFound = true;
return true;
}
}
return false;
}
public String toString() {
StringBuffer buf = new StringBuffer(64);
buf.append("request ");
buf.append(_currentPeer.getIdentity().calculateHash().toBase64().substring(0,4));
buf.append(" to join ").append(_config);
buf.append(" (request expired ");
buf.append(DataHelper.formatDuration(_expiration-getContext().clock().now()));
buf.append(" ago)");
return buf.toString();
}
}
}

View File

@ -0,0 +1,88 @@
package net.i2p.router.tunnel.pool;
import java.util.HashSet;
import java.util.Set;
import net.i2p.data.Certificate;
import net.i2p.data.Hash;
import net.i2p.data.RouterInfo;
import net.i2p.data.SessionKey;
import net.i2p.data.SessionTag;
import net.i2p.data.TunnelId;
import net.i2p.data.i2np.DeliveryInstructions;
import net.i2p.data.i2np.GarlicMessage;
import net.i2p.data.i2np.I2NPMessage;
import net.i2p.router.MessageSelector;
import net.i2p.router.Job;
import net.i2p.router.JobImpl;
import net.i2p.router.OutNetMessage;
import net.i2p.router.ReplyJob;
import net.i2p.router.RouterContext;
import net.i2p.router.TunnelInfo;
import net.i2p.router.message.GarlicMessageBuilder;
import net.i2p.router.message.PayloadGarlicConfig;
import net.i2p.util.Log;
/**
* Wrap the tunnel request in a garlic to the participant, and then send it out
* a tunnel.
*
*/
class SendGarlicMessageJob extends JobImpl {
private Log _log;
private I2NPMessage _payload;
private RouterInfo _target;
private MessageSelector _replySelector;
private ReplyJob _onReply;
private Job _onTimeout;
private SessionKey _sentKey;
private Set _sentTags;
private static final int TIMEOUT = RequestTunnelJob.HOP_REQUEST_TIMEOUT;
public SendGarlicMessageJob(RouterContext ctx, I2NPMessage payload, RouterInfo target, MessageSelector selector, ReplyJob onReply, Job onTimeout, SessionKey sentKey, Set sentTags) {
super(ctx);
_log = ctx.logManager().getLog(SendGarlicMessageJob.class);
_payload = payload;
_target = target;
_replySelector = selector;
_onReply = onReply;
_onTimeout = onTimeout;
_sentKey = sentKey;
_sentTags = sentTags;
}
public String getName() { return "build and send request garlic"; }
public void runJob() {
DeliveryInstructions instructions = new DeliveryInstructions();
instructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_LOCAL);
PayloadGarlicConfig payload = new PayloadGarlicConfig();
payload.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null));
payload.setId(getContext().random().nextLong(I2NPMessage.MAX_ID_VALUE));
payload.setPayload(_payload);
payload.setRecipient(_target);
payload.setDeliveryInstructions(instructions);
payload.setRequestAck(false);
payload.setExpiration(getContext().clock().now() + RequestTunnelJob.HOP_REQUEST_TIMEOUT);
GarlicMessage msg = GarlicMessageBuilder.buildMessage(getContext(), payload, _sentKey, _sentTags);
// so we will look for the reply
OutNetMessage dummyMessage = getContext().messageRegistry().registerPending(_replySelector, _onReply, _onTimeout, TIMEOUT);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Scheduling timeout job (" + _onTimeout + ") to be run in " + TIMEOUT + "ms");
// now find an outbound tunnel and send 'er off
TunnelInfo out = getContext().tunnelManager().selectOutboundTunnel();
if (out == null) {
if (_onTimeout != null)
getContext().jobQueue().addJob(_onTimeout);
getContext().messageRegistry().unregisterPending(dummyMessage);
}
TunnelId outId = out.getSendTunnelId(0);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Dispatching the garlic request out " + outId + " targetting " + _target.getIdentity().calculateHash().toBase64().substring(0,4));
getContext().tunnelDispatcher().dispatchOutbound(msg, outId, _target.getIdentity().calculateHash());
}
}

View File

@ -0,0 +1,206 @@
package net.i2p.router.tunnel.pool;
import java.util.Date;
import java.util.HashSet;
import java.util.Set;
import net.i2p.data.Certificate;
import net.i2p.data.SessionKey;
import net.i2p.data.SessionTag;
import net.i2p.data.i2np.DeliveryStatusMessage;
import net.i2p.data.i2np.DeliveryInstructions;
import net.i2p.data.i2np.GarlicMessage;
import net.i2p.data.i2np.I2NPMessage;
import net.i2p.router.JobImpl;
import net.i2p.router.MessageSelector;
import net.i2p.router.ReplyJob;
import net.i2p.router.RouterContext;
import net.i2p.router.TunnelInfo;
import net.i2p.router.message.PayloadGarlicConfig;
import net.i2p.router.message.GarlicMessageBuilder;
import net.i2p.router.tunnel.TunnelCreatorConfig;
import net.i2p.util.Log;
class TestJob extends JobImpl {
private Log _log;
private TunnelPool _pool;
private Object _buildToken;
private PooledTunnelCreatorConfig _cfg;
/** base to randomize the test delay on */
private static final int TEST_DELAY = 60*1000;
public TestJob(RouterContext ctx, PooledTunnelCreatorConfig cfg, TunnelPool pool, Object buildToken) {
super(ctx);
_log = ctx.logManager().getLog(TestJob.class);
_pool = pool;
_cfg = cfg;
_buildToken = buildToken;
getTiming().setStartAfter(getDelay() + ctx.clock().now());
ctx.statManager().createRateStat("tunnel.testFailedTime", "How long did the failure take (max of 60s for full timeout)?", "Tunnels",
new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("tunnel.testSuccessLength", "How long were the tunnels that passed the test?", "Tunnels",
new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("tunnel.testSuccessTime", "How long did tunnel testing take?", "Tunnels",
new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("tunnel.testAborted", "Tunnel test could not occur, since there weren't any tunnels to test with", "Tunnels",
new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
}
public String getName() { return "Test tunnel"; }
public void runJob() {
// note: testing with exploratory tunnels always, even if the tested tunnel
// is a client tunnel (per _cfg.getDestination())
TunnelInfo replyTunnel = null;
TunnelInfo outTunnel = null;
if (_cfg.isInbound()) {
replyTunnel = _cfg;
outTunnel = getContext().tunnelManager().selectOutboundTunnel();
} else {
replyTunnel = getContext().tunnelManager().selectInboundTunnel();
outTunnel = _cfg;
}
if ( (replyTunnel == null) || (outTunnel == null) ) {
if (_log.shouldLog(Log.ERROR))
_log.error("Insufficient tunnels to test " + _cfg + " with: " + replyTunnel + " / " + outTunnel);
getContext().statManager().addRateData("tunnel.testAborted", _cfg.getLength(), 0);
scheduleRetest();
} else {
int testPeriod = getTestPeriod();
long testExpiration = getContext().clock().now() + testPeriod;
DeliveryStatusMessage m = new DeliveryStatusMessage(getContext());
m.setArrival(getContext().clock().now());
m.setMessageExpiration(testExpiration+2*testPeriod);
m.setMessageId(getContext().random().nextLong(I2NPMessage.MAX_ID_VALUE));
// keep an eye out for the message even after we fail the tunnel for another 40s
ReplySelector sel = new ReplySelector(getContext(), m.getMessageId(), testExpiration + 2*testPeriod);
OnTestReply onReply = new OnTestReply(getContext());
OnTestTimeout onTimeout = new OnTestTimeout(getContext());
getContext().messageRegistry().registerPending(sel, onReply, onTimeout, 3*testPeriod);
sendTest(m, outTunnel, replyTunnel);
}
}
private void sendTest(I2NPMessage m, TunnelInfo outTunnel, TunnelInfo replyTunnel) {
if (false) {
getContext().tunnelDispatcher().dispatchOutbound(m, outTunnel.getSendTunnelId(0),
replyTunnel.getReceiveTunnelId(0),
replyTunnel.getPeer(0));
} else {
// garlic route that DeliveryStatusMessage to ourselves so the endpoints and gateways
// can't tell its a test. to simplify this, we encrypt it with a random key and tag,
// remembering that key+tag so that we can decrypt it later. this means we can do the
// garlic encryption without any ElGamal (yay)
DeliveryInstructions instructions = new DeliveryInstructions();
instructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_LOCAL);
PayloadGarlicConfig payload = new PayloadGarlicConfig();
payload.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null));
payload.setId(getContext().random().nextLong(I2NPMessage.MAX_ID_VALUE));
payload.setPayload(m);
payload.setRecipient(getContext().router().getRouterInfo());
payload.setDeliveryInstructions(instructions);
payload.setRequestAck(false);
payload.setExpiration(m.getMessageExpiration());
SessionKey encryptKey = getContext().keyGenerator().generateSessionKey();
SessionTag encryptTag = new SessionTag(true);
SessionKey sentKey = new SessionKey();
Set sentTags = null;
GarlicMessage msg = GarlicMessageBuilder.buildMessage(getContext(), payload, sentKey, sentTags,
getContext().keyManager().getPublicKey(),
encryptKey, encryptTag);
Set encryptTags = new HashSet(1);
encryptTags.add(encryptTag);
getContext().sessionKeyManager().tagsReceived(encryptKey, encryptTags);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Sending garlic test of " + outTunnel + " / " + replyTunnel);
getContext().tunnelDispatcher().dispatchOutbound(msg, outTunnel.getSendTunnelId(0),
replyTunnel.getReceiveTunnelId(0),
replyTunnel.getPeer(0));
}
}
public void testSuccessful(int ms) {
getContext().statManager().addRateData("tunnel.testSuccessLength", _cfg.getLength(), 0);
getContext().statManager().addRateData("tunnel.testSuccessTime", ms, 0);
scheduleRetest();
}
private void testFailed(long timeToFail) {
getContext().statManager().addRateData("tunnel.testFailedTime", timeToFail, timeToFail);
_cfg.tunnelFailed();
if (_log.shouldLog(Log.WARN))
_log.warn("Tunnel test failed in " + timeToFail + "ms: " + _cfg);
}
/** randomized time we should wait before testing */
private int getDelay() { return TEST_DELAY + getContext().random().nextInt(TEST_DELAY); }
/** how long we allow tests to run for before failing them */
private int getTestPeriod() { return 20*1000; }
private void scheduleRetest() {
int delay = getDelay();
if (_cfg.getExpiration() > getContext().clock().now() + delay)
requeue(delay);
}
private class ReplySelector implements MessageSelector {
private RouterContext _context;
private long _id;
private long _expiration;
private boolean _found;
public ReplySelector(RouterContext ctx, long id, long expiration) {
_context = ctx;
_id = id;
_expiration = expiration;
_found = false;
}
public boolean continueMatching() { return _found && _context.clock().now() < _expiration; }
public long getExpiration() { return _expiration; }
public boolean isMatch(I2NPMessage message) {
if (message instanceof DeliveryStatusMessage) {
return ((DeliveryStatusMessage)message).getMessageId() == _id;
}
return false;
}
}
/**
* Test successfull (w00t)
*/
private class OnTestReply extends JobImpl implements ReplyJob {
private long _successTime;
public OnTestReply(RouterContext ctx) { super(ctx); }
public String getName() { return "Tunnel test success"; }
public void runJob() {
if (_successTime < getTestPeriod())
testSuccessful((int)_successTime);
else
testFailed(_successTime);
}
// who cares about the details...
public void setMessage(I2NPMessage message) {
_successTime = getContext().clock().now() - ((DeliveryStatusMessage)message).getArrival();
}
}
/**
* Test failed (boo, hiss)
*/
private class OnTestTimeout extends JobImpl {
private long _started;
public OnTestTimeout(RouterContext ctx) {
super(ctx);
_started = ctx.clock().now();
}
public String getName() { return "Tunnel test timeout"; }
public void runJob() {
testFailed(getContext().clock().now() - _started);
}
}
}

View File

@ -0,0 +1,122 @@
package net.i2p.router.tunnel.pool;
import java.util.ArrayList;
import java.util.List;
import net.i2p.data.Hash;
import net.i2p.data.SessionKey;
import net.i2p.data.SessionTag;
import net.i2p.router.RouterContext;
import net.i2p.router.JobImpl;
import net.i2p.router.tunnel.HopConfig;
import net.i2p.router.tunnel.TunnelCreatorConfig;
import net.i2p.router.tunnel.TunnelGateway;
import net.i2p.router.TunnelPoolSettings;
import net.i2p.util.Log;
/**
*
*/
public class TunnelBuilder {
/**
* Build a new tunnel per the pool's wishes (using the preferred length,
* peers, ordering, etc). After the tunnel is built, it is added to the
* pool as well as the dispatcher, and the necessary test and maintenance
* jobs are built. This call does not block.
*
*/
public void buildTunnel(RouterContext ctx, TunnelPool pool, Object poolToken) {
buildTunnel(ctx, pool, false, poolToken);
}
public void buildTunnel(RouterContext ctx, TunnelPool pool, boolean fake, Object poolToken) {
if (!pool.keepBuilding(poolToken))
return;
// this is probably overkill (ya think?)
pool.refreshSettings();
PooledTunnelCreatorConfig cfg = configTunnel(ctx, pool, fake);
if ( (cfg == null) && (!fake) ) {
RetryJob j = new RetryJob(ctx, pool, poolToken);
j.getTiming().setStartAfter(ctx.clock().now() + ctx.random().nextInt(30*1000));
ctx.jobQueue().addJob(j);
return;
}
OnCreatedJob onCreated = new OnCreatedJob(ctx, pool, cfg, fake, poolToken);
RetryJob onFailed= (fake ? null : new RetryJob(ctx, pool, poolToken));
// queue up a job to request the endpoint to join the tunnel, which then
// requeues up another job for earlier hops, etc, until it reaches the
// gateway. after the gateway is confirmed, onCreated is fired
RequestTunnelJob req = new RequestTunnelJob(ctx, cfg, onCreated, onFailed, cfg.getLength()-1, fake);
if (fake) // lets get it done inline, as we /need/ it asap
req.runJob();
else
ctx.jobQueue().addJob(req);
}
private PooledTunnelCreatorConfig configTunnel(RouterContext ctx, TunnelPool pool, boolean fake) {
Log log = ctx.logManager().getLog(TunnelBuilder.class);
TunnelPoolSettings settings = pool.getSettings();
long expiration = ctx.clock().now() + settings.getDuration();
List peers = null;
if (fake) {
peers = new ArrayList(1);
peers.add(ctx.routerHash());
} else {
peers = pool.getSelector().selectPeers(ctx, settings);
}
if ( (peers == null) || (peers.size() <= 0) ) {
// no inbound or outbound tunnels to send the request through, and
// the pool is refusing 0 hop tunnels
if (peers == null) {
if (log.shouldLog(Log.ERROR))
log.error("No peers to put in the new tunnel! selectPeers returned null! boo, hiss! fake=" + fake);
} else {
if (log.shouldLog(Log.ERROR))
log.error("No peers to put in the new tunnel! selectPeers returned an empty list?! fake=" + fake);
}
return null;
}
PooledTunnelCreatorConfig cfg = new PooledTunnelCreatorConfig(ctx, peers.size(), settings.isInbound(), settings.getDestination());
// peers[] is ordered endpoint first, but cfg.getPeer() is ordered gateway first
for (int i = 0; i < peers.size(); i++) {
int j = peers.size() - 1 - i;
cfg.setPeer(j, (Hash)peers.get(i));
HopConfig hop = cfg.getConfig(j);
hop.setExpiration(expiration);
hop.setIVKey(ctx.keyGenerator().generateSessionKey());
hop.setLayerKey(ctx.keyGenerator().generateSessionKey());
// tunnelIds will be updated during building, and as the creator, we
// don't need to worry about prev/next hop
}
cfg.setExpiration(expiration);
Log l = ctx.logManager().getLog(TunnelBuilder.class);
if (l.shouldLog(Log.DEBUG))
l.debug("Config contains " + peers + ": " + cfg);
return cfg;
}
/**
* If the building fails, try, try again.
*
*/
private class RetryJob extends JobImpl {
private TunnelPool _pool;
private Object _buildToken;
public RetryJob(RouterContext ctx, TunnelPool pool, Object buildToken) {
super(ctx);
_pool = pool;
_buildToken = buildToken;
}
public String getName() { return "tunnel create failed"; }
public void runJob() {
// yikes, nothing left, lets get some backup (if we're allowed)
if ( (_pool.selectTunnel() == null) && (_pool.getSettings().getAllowZeroHop()) )
_pool.buildFake();
buildTunnel(getContext(), _pool, _buildToken);
}
}
}

View File

@ -0,0 +1,48 @@
package net.i2p.router.tunnel.pool;
import net.i2p.data.Hash;
import net.i2p.data.RouterIdentity;
import net.i2p.data.i2np.I2NPMessage;
import net.i2p.data.i2np.TunnelGatewayMessage;
import net.i2p.data.i2np.TunnelDataMessage;
import net.i2p.router.HandlerJobBuilder;
import net.i2p.router.Job;
import net.i2p.router.JobImpl;
import net.i2p.router.RouterContext;
/**
*
*/
public class TunnelMessageHandlerBuilder implements HandlerJobBuilder {
private RouterContext _context;
public TunnelMessageHandlerBuilder(RouterContext ctx) {
_context = ctx;
}
public Job createJob(I2NPMessage receivedMessage, RouterIdentity from, Hash fromHash) {
if ( (fromHash == null) && (from != null) )
fromHash = from.calculateHash();
return new HandleJob(_context, receivedMessage, fromHash);
}
private class HandleJob extends JobImpl {
private I2NPMessage _msg;
private Hash _from;
public HandleJob(RouterContext ctx, I2NPMessage msg, Hash from) {
super(ctx);
_msg = msg;
_from = from;
}
public void runJob() {
if (_msg instanceof TunnelGatewayMessage) {
getContext().tunnelDispatcher().dispatch((TunnelGatewayMessage)_msg);
} else if (_msg instanceof TunnelDataMessage) {
getContext().tunnelDispatcher().dispatch((TunnelDataMessage)_msg, _from);
}
}
public String getName() { return "Dispatch tunnel message"; }
}
}

View File

@ -0,0 +1,53 @@
package net.i2p.router.tunnel.pool;
import java.util.List;
import net.i2p.router.RouterContext;
import net.i2p.router.TunnelPoolSettings;
import net.i2p.util.Log;
/**
* Coordinate the selection of peers to go into a tunnel for one particular
* pool.
*/
abstract class TunnelPeerSelector {
/**
* Which peers should go into the next tunnel for the given settings?
*
* @return ordered list of Hash objects (one per peer) specifying what order
* they should appear in a tunnel (endpoint first). This includes
* the local router in the list. If there are no tunnels or peers
* to build through, and the settings reject 0 hop tunnels, this will
* return null.
*/
public abstract List selectPeers(RouterContext ctx, TunnelPoolSettings settings);
protected int getLength(RouterContext ctx, TunnelPoolSettings settings) {
int length = settings.getLength();
if (settings.getLengthVariance() != 0) {
int skew = settings.getLengthVariance();
if (skew > 0)
length += ctx.random().nextInt(skew);
else {
skew = 0 - skew;
length += ctx.random().nextInt(2*skew) - skew;
}
if (length < 0)
length = 0;
}
if ( (ctx.tunnelManager().getOutboundTunnelCount() <= 0) ||
(ctx.tunnelManager().getFreeTunnelCount() <= 0) ) {
Log log = ctx.logManager().getLog(TunnelPeerSelector.class);
// no tunnels to build tunnels with
if (settings.getAllowZeroHop()) {
if (log.shouldLog(Log.INFO))
log.info("no outbound tunnels or free inbound tunnels, but we do allow zeroHop: " + settings);
return 0;
} else {
if (log.shouldLog(Log.WARN))
log.warn("no outbound tunnels or free inbound tunnels, and we dont allow zeroHop: " + settings);
return -1;
}
}
return length;
}
}

View File

@ -0,0 +1,418 @@
package net.i2p.router.tunnel.pool;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Date;
import java.util.List;
import java.util.Properties;
import net.i2p.data.Hash;
import net.i2p.data.Lease;
import net.i2p.data.LeaseSet;
import net.i2p.data.TunnelId;
import net.i2p.router.RouterContext;
import net.i2p.router.TunnelPoolSettings;
import net.i2p.router.TunnelInfo;
import net.i2p.util.Log;
/**
*
*/
public class TunnelPool {
private RouterContext _context;
private Log _log;
private TunnelPoolSettings _settings;
private ArrayList _tunnels;
private TunnelPeerSelector _peerSelector;
private TunnelBuilder _builder;
private TunnelPoolManager _manager;
private boolean _alive;
private long _lifetimeProcessed;
/**
* list of pool tokens (Object) passed around during building/rebuilding/etc.
* if/when the token is removed from this list, that sequence of building/rebuilding/etc
* should cease (though others may continue).
*
*/
private List _tokens;
public TunnelPool(RouterContext ctx, TunnelPoolManager mgr, TunnelPoolSettings settings, TunnelPeerSelector sel, TunnelBuilder builder) {
_context = ctx;
_log = ctx.logManager().getLog(TunnelPool.class);
_manager = mgr;
_settings = settings;
_tunnels = new ArrayList(settings.getLength() + settings.getBackupQuantity());
_peerSelector = sel;
_builder = builder;
_tokens = new ArrayList(settings.getBackupQuantity() + settings.getQuantity());
_alive = false;
_lifetimeProcessed = 0;
refreshSettings();
}
public void startup() {
_alive = true;
int added = refreshBuilders();
if (added <= 0) {
// we just reconnected and didn't require any new tunnel builders.
// however, we /do/ want a leaseSet, so build one
LeaseSet ls = null;
synchronized (_tunnels) {
if (_settings.isInbound() && (_settings.getDestination() != null) )
ls = locked_buildNewLeaseSet();
}
if (ls != null)
_context.clientManager().requestLeaseSet(_settings.getDestination(), ls);
}
}
public void shutdown() {
_alive = false;
synchronized (_tokens) { _tokens.clear(); }
}
private int refreshBuilders() {
// only start up new build tasks if we need more of 'em
int target = _settings.getQuantity() + _settings.getBackupQuantity();
int oldTokenCount = 0;
List newTokens = null;
synchronized (_tokens) {
oldTokenCount = _tokens.size();
while (_tokens.size() > target)
_tokens.remove(0);
if (_tokens.size() < target) {
int wanted = target - _tokens.size();
newTokens = new ArrayList(wanted);
for (int i = 0; i < wanted; i++) {
Object token = new Object();
newTokens.add(token);
_tokens.add(token);
}
}
}
if (newTokens != null) {
if (_log.shouldLog(Log.INFO))
_log.info(toString() + ": refreshing builders, previously had " + oldTokenCount
+ ", want a total of " + target + ", creating "
+ newTokens.size() + " new ones.");
for (int i = 0; i < newTokens.size(); i++) {
Object token = newTokens.get(i);
if (_log.shouldLog(Log.DEBUG))
_log.debug(toString() + ": Building a tunnel with the token " + token);
_builder.buildTunnel(_context, this, token);
}
return newTokens.size();
} else {
return 0;
}
}
/** do we still need this sequence of build/rebuild/etc to continue? */
public boolean keepBuilding(Object token) {
boolean connected = true;
boolean rv = false;
int remaining = 0;
int wanted = _settings.getQuantity() + _settings.getBackupQuantity();
if ( (_settings.getDestination() != null) && (!_context.clientManager().isLocal(_settings.getDestination())) )
connected = false;
synchronized (_tokens) {
if (!connected) {
// client disconnected, so stop rebuilding this series
_tokens.remove(token);
rv = false;
} else {
rv = _tokens.contains(token);
}
remaining = _tokens.size();
}
if (remaining <= 0) {
_manager.removeTunnels(_settings.getDestination());
}
if (!rv) {
if (_log.shouldLog(Log.INFO))
_log.info(toString() + ": keepBuilding does NOT want building to continue (want "
+ wanted + ", have " + remaining);
}
return rv;
}
void refreshSettings() {
if (_settings.getDestination() != null) {
return; // don't override client specified settings
} else {
if (_settings.isExploratory()) {
Properties props = _context.router().getConfigMap();
if (_settings.isInbound())
_settings.readFromProperties(TunnelPoolSettings.PREFIX_INBOUND_EXPLORATORY, props);
else
_settings.readFromProperties(TunnelPoolSettings.PREFIX_OUTBOUND_EXPLORATORY, props);
}
}
}
/**
* Pull a random tunnel out of the pool. If there are none available but
* the pool is configured to allow 0hop tunnels, this builds a fake one
* and returns it.
*
*/
public TunnelInfo selectTunnel() { return selectTunnel(true); }
private TunnelInfo selectTunnel(boolean allowRecurseOnFail) {
synchronized (_tunnels) {
if (_tunnels.size() <= 0) {
if (_log.shouldLog(Log.WARN))
_log.warn(toString() + ": No tunnels to select from");
} else {
// pick 'em randomly
Collections.shuffle(_tunnels, _context.random());
for (int i = 0; i < _tunnels.size(); i++) {
TunnelInfo info = (TunnelInfo)_tunnels.get(i);
if (info.getExpiration() > _context.clock().now()) {
//_log.debug("Selecting tunnel: " + info + " - " + _tunnels);
return info;
}
}
if (_log.shouldLog(Log.WARN))
_log.warn(toString() + ": after " + _tunnels.size() + " tries, no unexpired ones were found: " + _tunnels);
}
}
if (_alive && _settings.getAllowZeroHop())
buildFake();
if (allowRecurseOnFail)
return selectTunnel(false);
else
return null;
}
public TunnelInfo getTunnel(TunnelId gatewayId) {
synchronized (_tunnels) {
for (int i = 0; i < _tunnels.size(); i++) {
TunnelInfo info = (TunnelInfo)_tunnels.get(i);
if (_settings.isInbound()) {
if (info.getReceiveTunnelId(0).equals(gatewayId))
return info;
} else {
if (info.getSendTunnelId(0).equals(gatewayId))
return info;
}
}
}
return null;
}
/**
* Return a list of tunnels in the pool
*
* @return list of TunnelInfo objects
*/
public List listTunnels() {
synchronized (_tunnels) {
return new ArrayList(_tunnels);
}
}
public TunnelBuilder getBuilder() { return _builder; }
public TunnelPoolSettings getSettings() { return _settings; }
public void setSettings(TunnelPoolSettings settings) {
_settings = settings;
if (_settings != null) {
if (_log.shouldLog(Log.INFO))
_log.info(toString() + ": Settings updated on the pool: " + settings);
refreshBuilders(); // to start/stop new sequences, in case the quantities changed
}
}
public TunnelPeerSelector getSelector() { return _peerSelector; }
public boolean isAlive() { return _alive; }
public int size() {
synchronized (_tunnels) {
return _tunnels.size();
}
}
public void addTunnel(TunnelInfo info) {
if (_log.shouldLog(Log.DEBUG))
_log.debug(toString() + ": Adding tunnel " + info);
LeaseSet ls = null;
synchronized (_tunnels) {
_tunnels.add(info);
if (_settings.isInbound() && (_settings.getDestination() != null) )
ls = locked_buildNewLeaseSet();
}
if (ls != null)
_context.clientManager().requestLeaseSet(_settings.getDestination(), ls);
}
public void removeTunnel(TunnelInfo info) {
if (_log.shouldLog(Log.DEBUG))
_log.debug(toString() + ": Removing tunnel " + info);
int remaining = 0;
LeaseSet ls = null;
synchronized (_tunnels) {
_tunnels.remove(info);
if (_settings.isInbound() && (_settings.getDestination() != null) )
ls = locked_buildNewLeaseSet();
remaining = _tunnels.size();
}
_lifetimeProcessed += info.getProcessedMessagesCount();
if (_settings.isInbound() && (_settings.getDestination() != null) ) {
if (ls != null) {
_context.clientManager().requestLeaseSet(_settings.getDestination(), ls);
} else {
if (_log.shouldLog(Log.WARN))
_log.warn(toString() + ": unable to build a new leaseSet on removal (" + remaining
+ " remaining), request a new tunnel");
if (_settings.getAllowZeroHop())
buildFake();
}
}
refreshBuilders();
}
public void tunnelFailed(PooledTunnelCreatorConfig cfg) {
if (_log.shouldLog(Log.ERROR))
_log.error(toString() + ": Tunnel failed: " + cfg, new Exception("failure cause"));
int remaining = 0;
LeaseSet ls = null;
synchronized (_tunnels) {
_tunnels.remove(cfg);
if (_settings.isInbound() && (_settings.getDestination() != null) )
ls = locked_buildNewLeaseSet();
remaining = _tunnels.size();
}
_lifetimeProcessed += cfg.getProcessedMessagesCount();
if (_settings.isInbound() && (_settings.getDestination() != null) ) {
if (ls != null) {
_context.clientManager().requestLeaseSet(_settings.getDestination(), ls);
} else {
if (_log.shouldLog(Log.WARN))
_log.warn(toString() + ": unable to build a new leaseSet on failure (" + remaining
+ " remaining), request a new tunnel");
buildFake(false);
}
}
refreshBuilders();
}
void refreshLeaseSet() {
if (_log.shouldLog(Log.DEBUG))
_log.debug(toString() + ": refreshing leaseSet on tunnel expiration (but prior to grace timeout)");
int remaining = 0;
LeaseSet ls = null;
if (_settings.isInbound() && (_settings.getDestination() != null) ) {
synchronized (_tunnels) {
ls = locked_buildNewLeaseSet();
remaining = _tunnels.size();
}
if (ls != null) {
_context.clientManager().requestLeaseSet(_settings.getDestination(), ls);
} else {
if (_log.shouldLog(Log.WARN))
_log.warn(toString() + ": unable to build a new leaseSet on expire (" + remaining
+ " remaining), request a new tunnel");
if (_settings.getAllowZeroHop())
buildFake();
}
}
}
void buildFake() { buildFake(true); }
void buildFake(boolean zeroHop) {
if (_log.shouldLog(Log.INFO))
_log.info(toString() + ": building a fake tunnel (allow zeroHop? " + zeroHop + ")");
Object tempToken = new Object();
synchronized (_tokens) {
_tokens.add(tempToken);
}
_builder.buildTunnel(_context, this, zeroHop, tempToken);
synchronized (_tokens) {
_tokens.remove(tempToken);
}
}
/**
* Build a leaseSet with all of the tunnels that aren't about to expire
*
*/
private LeaseSet locked_buildNewLeaseSet() {
long expireAfter = _context.clock().now() + _settings.getRebuildPeriod();
LeaseSet ls = new LeaseSet();
for (int i = 0; i < _tunnels.size(); i++) {
TunnelInfo tunnel = (TunnelInfo)_tunnels.get(i);
if (tunnel.getExpiration() <= expireAfter)
continue; // expires too soon, skip it
TunnelId inId = tunnel.getReceiveTunnelId(0);
Hash gw = tunnel.getPeer(0);
if ( (inId == null) || (gw == null) ) {
_log.error(toString() + ": wtf, tunnel has no inbound gateway/tunnelId? " + tunnel);
continue;
}
Lease lease = new Lease();
lease.setEndDate(new Date(tunnel.getExpiration()));
lease.setTunnelId(inId);
lease.setGateway(gw);
ls.addLease(lease);
}
int wanted = _settings.getQuantity();
if (ls.getLeaseCount() < wanted) {
if (_log.shouldLog(Log.WARN))
_log.warn(toString() + ": Not enough leases (" + ls.getLeaseCount() + ", wanted " + wanted + ")");
return null;
} else {
// linear search to trim down the leaseSet, removing the ones that
// will expire the earliest. cheaper than a tree for this size
while (ls.getLeaseCount() > wanted) {
int earliestIndex = -1;
long earliestExpiration = -1;
for (int i = 0; i < ls.getLeaseCount(); i++) {
Lease cur = ls.getLease(i);
if ( (earliestExpiration < 0) || (cur.getEndDate().getTime() < earliestExpiration) ) {
earliestIndex = i;
earliestExpiration = cur.getEndDate().getTime();
}
}
if (_log.shouldLog(Log.DEBUG))
_log.debug(toString() + ": Dropping older lease from the leaseSet: " + earliestIndex + " out of " + ls.getLeaseCount());
ls.removeLease(earliestIndex);
}
return ls;
}
}
public long getLifetimeProcessed() { return _lifetimeProcessed; }
public String toString() {
if (_settings.isExploratory()) {
if (_settings.isInbound())
return "Inbound exploratory pool";
else
return "Outbound exploratory pool";
} else {
StringBuffer rv = new StringBuffer(32);
if (_settings.isInbound())
rv.append("Inbound client pool for ");
else
rv.append("Outbound client pool for ");
if (_settings.getDestinationNickname() != null)
rv.append(_settings.getDestinationNickname());
else
rv.append(_settings.getDestination().toBase64().substring(0,4));
return rv.toString();
}
}
}

View File

@ -0,0 +1,398 @@
package net.i2p.router.tunnel.pool;
import java.io.IOException;
import java.io.Writer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import net.i2p.data.DataHelper;
import net.i2p.data.Destination;
import net.i2p.data.Hash;
import net.i2p.data.TunnelId;
import net.i2p.data.i2np.TunnelCreateMessage;
import net.i2p.data.i2np.TunnelDataMessage;
import net.i2p.data.i2np.TunnelGatewayMessage;
import net.i2p.stat.RateStat;
import net.i2p.router.ClientTunnelSettings;
import net.i2p.router.HandlerJobBuilder;
import net.i2p.router.JobImpl;
import net.i2p.router.Router;
import net.i2p.router.RouterContext;
import net.i2p.router.TunnelSelectionCriteria;
import net.i2p.router.TunnelInfo;
import net.i2p.router.TunnelManagerFacade;
import net.i2p.router.TunnelPoolSettings;
import net.i2p.router.tunnel.HopConfig;
/**
*
*/
public class TunnelPoolManager implements TunnelManagerFacade {
private RouterContext _context;
/** Hash (destination) to TunnelPool */
private Map _clientInboundPools;
/** Hash (destination) to TunnelPool */
private Map _clientOutboundPools;
private TunnelPool _inboundExploratory;
private TunnelPool _outboundExploratory;
public TunnelPoolManager(RouterContext ctx) {
_context = ctx;
HandlerJobBuilder builder = new HandleTunnelCreateMessageJob.Builder(ctx);
ctx.inNetMessagePool().registerHandlerJobBuilder(TunnelCreateMessage.MESSAGE_TYPE, builder);
//HandlerJobBuilder b = new TunnelMessageHandlerBuilder(ctx);
//ctx.inNetMessagePool().registerHandlerJobBuilder(TunnelGatewayMessage.MESSAGE_TYPE, b);
//ctx.inNetMessagePool().registerHandlerJobBuilder(TunnelDataMessage.MESSAGE_TYPE, b);
_clientInboundPools = new HashMap(4);
_clientOutboundPools = new HashMap(4);
ctx.statManager().createRateStat("tunnel.testSuccessTime",
"How long do successful tunnel tests take?", "Tunnels",
new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("tunnel.participatingTunnels",
"How many tunnels are we participating in?", "Tunnels",
new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
}
/** pick an inbound tunnel not bound to a particular destination */
public TunnelInfo selectInboundTunnel() {
TunnelInfo info = _inboundExploratory.selectTunnel();
if (info == null) {
_inboundExploratory.buildFake();
// still can be null, but probably not
info = _inboundExploratory.selectTunnel();
}
return info;
}
/** pick an inbound tunnel bound to the given destination */
public TunnelInfo selectInboundTunnel(Hash destination) {
if (destination == null) return selectInboundTunnel();
TunnelPool pool = null;
synchronized (_clientInboundPools) {
pool = (TunnelPool)_clientInboundPools.get(destination);
}
if (pool != null) {
return pool.selectTunnel();
}
return null;
}
/** pick an outbound tunnel not bound to a particular destination */
public TunnelInfo selectOutboundTunnel() {
TunnelInfo info = _outboundExploratory.selectTunnel();
if (info == null) {
_outboundExploratory.buildFake();
// still can be null, but probably not
info = _outboundExploratory.selectTunnel();
}
return info;
}
/** pick an outbound tunnel bound to the given destination */
public TunnelInfo selectOutboundTunnel(Hash destination) {
if (destination == null) return selectOutboundTunnel();
TunnelPool pool = null;
synchronized (_clientOutboundPools) {
pool = (TunnelPool)_clientOutboundPools.get(destination);
}
if (pool != null) {
return pool.selectTunnel();
}
return null;
}
public TunnelInfo getTunnelInfo(TunnelId id) {
TunnelInfo info = null;
synchronized (_clientInboundPools) {
for (Iterator iter = _clientInboundPools.values().iterator(); iter.hasNext(); ) {
TunnelPool pool = (TunnelPool)iter.next();
info = pool.getTunnel(id);
if (info != null)
return info;
}
}
return null;
}
public int getFreeTunnelCount() {
if (_inboundExploratory == null)
return 0;
else
return _inboundExploratory.size();
}
public int getOutboundTunnelCount() {
if (_outboundExploratory == null)
return 0;
else
return _outboundExploratory.size();
}
public int getParticipatingCount() { return _context.tunnelDispatcher().getParticipatingCount(); }
public long getLastParticipatingExpiration() { return _context.tunnelDispatcher().getLastParticipatingExpiration(); }
public boolean isInUse(Hash peer) {
// this lets peers that are in our tunnels expire (forcing us to refetch them)
// if the info is old
//!! no, dont. bad.
return true;
}
public TunnelPoolSettings getInboundSettings() { return _inboundExploratory.getSettings(); }
public TunnelPoolSettings getOutboundSettings() { return _outboundExploratory.getSettings(); }
public void setInboundSettings(TunnelPoolSettings settings) { _inboundExploratory.setSettings(settings); }
public void setOutboundSettings(TunnelPoolSettings settings) { _outboundExploratory.setSettings(settings); }
public TunnelPoolSettings getInboundSettings(Hash client) {
TunnelPool pool = null;
synchronized (_clientInboundPools) {
pool = (TunnelPool)_clientInboundPools.get(client);
}
if (pool != null)
return pool.getSettings();
else
return null;
}
public TunnelPoolSettings getOutboundSettings(Hash client) {
TunnelPool pool = null;
synchronized (_clientOutboundPools) {
pool = (TunnelPool)_clientOutboundPools.get(client);
}
if (pool != null)
return pool.getSettings();
else
return null;
}
public void setInboundSettings(Hash client, TunnelPoolSettings settings) {
TunnelPool pool = null;
synchronized (_clientInboundPools) {
pool = (TunnelPool)_clientInboundPools.get(client);
}
if (pool != null)
pool.setSettings(settings);
}
public void setOutboundSettings(Hash client, TunnelPoolSettings settings) {
TunnelPool pool = null;
synchronized (_clientOutboundPools) {
pool = (TunnelPool)_clientOutboundPools.get(client);
}
if (pool != null)
pool.setSettings(settings);
}
public void restart() {
shutdown();
startup();
}
public void buildTunnels(Destination client, ClientTunnelSettings settings) {
Hash dest = client.calculateHash();
settings.getInboundSettings().setDestination(dest);
settings.getOutboundSettings().setDestination(dest);
TunnelPool inbound = null;
TunnelPool outbound = null;
// should we share the clientPeerSelector across both inbound and outbound?
synchronized (_clientInboundPools) {
inbound = (TunnelPool)_clientInboundPools.get(dest);
if (inbound == null) {
inbound = new TunnelPool(_context, this, settings.getInboundSettings(),
new ClientPeerSelector(), new TunnelBuilder());
_clientInboundPools.put(dest, inbound);
} else {
inbound.setSettings(settings.getInboundSettings());
}
}
synchronized (_clientOutboundPools) {
outbound = (TunnelPool)_clientOutboundPools.get(dest);
if (outbound == null) {
outbound = new TunnelPool(_context, this, settings.getOutboundSettings(),
new ClientPeerSelector(), new TunnelBuilder());
_clientOutboundPools.put(dest, outbound);
} else {
outbound.setSettings(settings.getOutboundSettings());
}
}
inbound.startup();
outbound.startup();
}
public void removeTunnels(Hash destination) {
if (destination == null) return;
TunnelPool inbound = null;
TunnelPool outbound = null;
synchronized (_clientInboundPools) {
inbound = (TunnelPool)_clientInboundPools.remove(destination);
}
synchronized (_clientOutboundPools) {
outbound = (TunnelPool)_clientOutboundPools.remove(destination);
}
if (inbound != null)
inbound.shutdown();
if (outbound != null)
outbound.shutdown();
}
public void startup() {
TunnelBuilder builder = new TunnelBuilder();
ExploratoryPeerSelector selector = new ExploratoryPeerSelector();
TunnelPoolSettings inboundSettings = new TunnelPoolSettings();
inboundSettings.setIsExploratory(true);
inboundSettings.setIsInbound(true);
_inboundExploratory = new TunnelPool(_context, this, inboundSettings, selector, builder);
_inboundExploratory.startup();
TunnelPoolSettings outboundSettings = new TunnelPoolSettings();
outboundSettings.setIsExploratory(true);
outboundSettings.setIsInbound(false);
_outboundExploratory = new TunnelPool(_context, this, outboundSettings, selector, builder);
_outboundExploratory.startup();
// try to build up longer tunnels
_context.jobQueue().addJob(new BootstrapPool(_context, _inboundExploratory));
_context.jobQueue().addJob(new BootstrapPool(_context, _outboundExploratory));
}
private class BootstrapPool extends JobImpl {
private TunnelPool _pool;
public BootstrapPool(RouterContext ctx, TunnelPool pool) {
super(ctx);
_pool = pool;
getTiming().setStartAfter(ctx.clock().now() + 30*1000);
}
public String getName() { return "Bootstrap tunnel pool"; }
public void runJob() {
_pool.buildFake(false);
}
}
public void shutdown() {
if (_inboundExploratory != null)
_inboundExploratory.shutdown();
if (_outboundExploratory != null)
_outboundExploratory.shutdown();
}
public void renderStatusHTML(Writer out) throws IOException {
out.write("<h2><a name=\"exploratory\">Exploratory tunnels</a> (<a href=\"/configtunnels.jsp#exploratory\">config</a>):</h2>\n");
renderPool(out, _inboundExploratory, _outboundExploratory);
List destinations = null;
synchronized (_clientInboundPools) {
destinations = new ArrayList(_clientInboundPools.keySet());
}
for (int i = 0; i < destinations.size(); i++) {
Hash client = (Hash)destinations.get(i);
TunnelPool in = null;
TunnelPool outPool = null;
synchronized (_clientInboundPools) {
in = (TunnelPool)_clientInboundPools.get(client);
}
synchronized (_clientOutboundPools) {
outPool = (TunnelPool)_clientOutboundPools.get(client);
}
String name = (in != null ? in.getSettings().getDestinationNickname() : null);
if ( (name == null) && (outPool != null) )
name = outPool.getSettings().getDestinationNickname();
if (name == null)
name = client.toBase64().substring(0,4);
out.write("<h2><a name=\"" + client.toBase64().substring(0,4)
+ "\">Client tunnels</a> for " + name + " (<a href=\"/configtunnels.jsp#"
+ client.toBase64().substring(0,4) +"\">config</a>):</h2>\n");
renderPool(out, in, outPool);
}
List participating = _context.tunnelDispatcher().listParticipatingTunnels();
out.write("<h2><a name=\"participating\">Participating tunnels</a>:</h2><table border=\"1\">\n");
out.write("<tr><td><b>Receive on</b></td><td><b>From</b></td><td>"
+ "<b>Send on</b></td><td><b>To</b></td><td><b>Expiration</b></td>"
+ "<td><b>Usage</b></td></tr>\n");
long processed = 0;
RateStat rs = _context.statManager().getRate("tunnel.participatingMessageCount");
if (rs != null)
processed = (long)rs.getRate(10*60*1000).getLifetimeTotalValue();
for (int i = 0; i < participating.size(); i++) {
HopConfig cfg = (HopConfig)participating.get(i);
out.write("<tr>");
if (cfg.getReceiveTunnel() != null)
out.write("<td>" + cfg.getReceiveTunnel().getTunnelId() +"</td>");
else
out.write("<td>n/a</td>");
if (cfg.getReceiveFrom() != null)
out.write("<td>" + cfg.getReceiveFrom().toBase64().substring(0,4) +"</td>");
else
out.write("<td>n/a</td>");
if (cfg.getSendTunnel() != null)
out.write("<td>" + cfg.getSendTunnel().getTunnelId() +"</td>");
else
out.write("<td>n/a</td>");
if (cfg.getSendTo() != null)
out.write("<td>" + cfg.getSendTo().toBase64().substring(0,4) +"</td>");
else
out.write("<td>n/a</td>");
long timeLeft = cfg.getExpiration()-_context.clock().now();
if (timeLeft > 0)
out.write("<td>" + DataHelper.formatDuration(timeLeft) + "</td>");
else
out.write("<td>(grace period)</td>");
out.write("<td>" + cfg.getProcessedMessagesCount() + "KB</td>");
out.write("</tr>\n");
processed += cfg.getProcessedMessagesCount();
}
out.write("</table>\n");
out.write("Lifetime bandwidth usage: " + processed + "KB<br />\n");
}
private void renderPool(Writer out, TunnelPool in, TunnelPool outPool) throws IOException {
List tunnels = null;
if (in == null)
tunnels = new ArrayList();
else
tunnels = in.listTunnels();
if (outPool != null)
tunnels.addAll(outPool.listTunnels());
long processedIn = (in != null ? in.getLifetimeProcessed() : 0);
long processedOut = (outPool != null ? outPool.getLifetimeProcessed() : 0);
out.write("<table border=\"1\"><tr><td><b>Direction</b></td><td><b>Expiration</b></td><td><b>Usage</b></td><td align=\"left\">Hops (gateway first)</td></tr>\n");
int live = 0;
for (int i = 0; i < tunnels.size(); i++) {
TunnelInfo info = (TunnelInfo)tunnels.get(i);
long timeLeft = info.getExpiration()-_context.clock().now();
if (timeLeft <= 0)
continue; // don't display tunnels in their grace period
live++;
if (info.isInbound())
out.write("<tr><td><b>inbound</b></td>");
else
out.write("<tr><td><b>outbound</b></td>");
out.write("<td>" + DataHelper.formatDuration(timeLeft) + "</td>\n");
out.write("<td>" + info.getProcessedMessagesCount() + "KB</td>\n");
for (int j = 0; j < info.getLength(); j++) {
Hash peer = info.getPeer(j);
TunnelId id = (info.isInbound() ? info.getReceiveTunnelId(j) : info.getSendTunnelId(j));
if (_context.routerHash().equals(peer))
out.write("<td><i>" + peer.toBase64().substring(0,4) + (id == null ? "" : ":" + id) + "</i></td>");
else
out.write("<td>" + peer.toBase64().substring(0,4) + (id == null ? "" : ":" + id) + "</td>");
}
out.write("</tr>\n");
if (info.isInbound())
processedIn += info.getProcessedMessagesCount();
else
processedOut += info.getProcessedMessagesCount();
}
if (live <= 0)
out.write("<tr><td colspan=\"3\">No tunnels, waiting for the grace period to end</td></tr>\n");
out.write("</table>\n");
out.write("Lifetime bandwidth usage: " + processedIn + "KB in, " + processedOut + "KB out<br />");
}
}

View File

@ -1,233 +0,0 @@
package net.i2p.router.tunnelmanager;
import java.util.Date;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Set;
import java.util.TreeMap;
import net.i2p.data.Lease;
import net.i2p.data.LeaseSet;
import net.i2p.data.RouterInfo;
import net.i2p.data.TunnelId;
import net.i2p.router.JobImpl;
import net.i2p.router.RouterContext;
import net.i2p.router.TunnelInfo;
import net.i2p.util.Log;
/**
* Manage the process of requesting a lease set as necessary for a client based
* on the contents of the tunnel pool. Request a new lease set when:
* - # safe inbound tunnels meets or exceeds the client's minimum and
* - no current leaseSet exists
* or
* - one of the tunnels in the current leaseSet has expired
* or
* - it has been N minutes since the current leaseSet was created
* (where N is based off the clientSettings.getInboundDuration)
*
*/
class ClientLeaseSetManagerJob extends JobImpl {
private Log _log;
private ClientTunnelPool _pool;
private LeaseSet _currentLeaseSet;
private long _lastCreated;
private boolean _forceRequestLease;
/**
* Recheck the set every 15 seconds
* todo: this should probably be updated dynamically based on expiration dates / etc.
*
*/
private final static long RECHECK_DELAY = 15*1000;
/**
* How long to wait for the client to approve or reject a leaseSet
*/
private final static long REQUEST_LEASE_TIMEOUT = 30*1000;
public ClientLeaseSetManagerJob(RouterContext context, ClientTunnelPool pool) {
super(context);
_log = context.logManager().getLog(ClientLeaseSetManagerJob.class);
_pool = pool;
_currentLeaseSet = null;
_lastCreated = -1;
context.statManager().createRateStat("client.leaseSetExpired", "How long ago did our leaseSet expire?", "ClientMessages", new long[] { 60*60*1000l, 24*60*60*1000l });
}
public void forceRequestLease() {
_currentLeaseSet = null;
_forceRequestLease = true;
}
public String getName() { return "Manage Client Lease Set"; }
public void runJob() {
if ((!_forceRequestLease) && (_pool.isStopped()) ) {
if ( (_pool.getInactiveInboundTunnelIds().size() <= 0) &&
(_pool.getInboundTunnelIds().size() <= 0) ) {
if (_log.shouldLog(Log.INFO))
_log.info("No more tunnels and the client has stopped, so no need to manage the leaseSet any more for "
+ _pool.getDestination().calculateHash());
return;
} else {
if (_log.shouldLog(Log.INFO))
_log.info("Client " + _pool.getDestination().calculateHash()
+ " is stopped, but they still have some tunnels, so don't stop maintaining the leaseSet");
requeue(RECHECK_DELAY);
return;
}
}
int available = _pool.getSafePoolSize();
if (available >= _pool.getClientSettings().getNumInboundTunnels()) {
if (_forceRequestLease) {
if (_log.shouldLog(Log.INFO))
_log.info("Forced to request a new lease (reconnected client perhaps?)");
_forceRequestLease = false;
requestNewLeaseSet();
} else if (_currentLeaseSet == null) {
if (_log.shouldLog(Log.INFO))
_log.info("No leaseSet is known - request a new one");
requestNewLeaseSet();
} else if (tunnelsChanged()) {
if (_log.shouldLog(Log.INFO))
_log.info("Tunnels changed from the old leaseSet - request a new one: [pool = "
+ _pool.getInboundTunnelIds() + " old leaseSet: " + _currentLeaseSet);
requestNewLeaseSet();
} else if (getContext().clock().now() > _lastCreated + _pool.getClientSettings().getInboundDuration()) {
if (_log.shouldLog(Log.INFO))
_log.info("We've exceeded the client's requested duration (limit = "
+ new Date(_lastCreated + _pool.getClientSettings().getInboundDuration())
+ " / " + _pool.getClientSettings().getInboundDuration()
+ ") - request a new leaseSet");
requestNewLeaseSet();
} else {
_log.debug("The current LeaseSet is fine, noop");
}
} else {
_log.warn("Insufficient safe inbound tunnels exist for the client (" + available
+ " available, " + _pool.getClientSettings().getNumInboundTunnels()
+ " required) - no leaseSet requested");
if (_currentLeaseSet != null) {
long howOld = getContext().clock().now() - _currentLeaseSet.getEarliestLeaseDate();
if (howOld > 0) {
// expired
getContext().statManager().addRateData("client.leaseSetExpired", howOld, 0);
}
}
}
requeue(RECHECK_DELAY);
}
/**
* Determine if the tunnels in the current leaseSet are the same as the
* currently available free tunnels
*
* @return true if the tunnels are /not/ the same, else false if they are the same
*/
private boolean tunnelsChanged() {
long furthestInFuture = 0;
Set currentIds = new HashSet(_currentLeaseSet.getLeaseCount());
for (int i = 0; i < _currentLeaseSet.getLeaseCount(); i++) {
Lease lease = (Lease)_currentLeaseSet.getLease(i);
currentIds.add(lease.getTunnelId());
if (lease.getEndDate().getTime() > furthestInFuture)
furthestInFuture = lease.getEndDate().getTime();
}
Set avail = _pool.getInboundTunnelIds();
avail.removeAll(currentIds);
// check to see if newer ones exist in the available pool
for (Iterator iter = avail.iterator(); iter.hasNext(); ) {
TunnelId id = (TunnelId)iter.next();
TunnelInfo info = _pool.getInboundTunnel(id);
// we need to check this in case the tunnel was deleted since 6 lines up
if ( (id != null) && (info != null) && (info.getSettings() != null) ) {
// if something available but not in the currently published lease will be
// around longer than any of the published leases, we want that tunnel to
// be added to our published lease
if (info.getSettings().getExpiration() > furthestInFuture) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Tunnel " + id.getTunnelId() + " expires "
+ (info.getSettings().getExpiration()-furthestInFuture)
+ "ms after any of the existing ones do");
return true;
}
}
}
if (_log.shouldLog(Log.DEBUG))
_log.debug("None of the available tunnels expire after the existing lease set's tunnels");
return false;
}
/**
* Request a new leaseSet based off the currently available safe tunnels
*/
private void requestNewLeaseSet() {
LeaseSet proposed = buildNewLeaseSet();
getContext().clientManager().requestLeaseSet(_pool.getDestination(), proposed,
REQUEST_LEASE_TIMEOUT, new LeaseSetCreatedJob(getContext()),
null);
}
/**
* Create a new proposed leaseSet with all inbound tunnels
*/
private LeaseSet buildNewLeaseSet() {
LeaseSet ls = new LeaseSet();
TreeMap tunnels = new TreeMap();
long now = getContext().clock().now();
for (Iterator iter = _pool.getInboundTunnelIds().iterator(); iter.hasNext(); ) {
TunnelId id = (TunnelId)iter.next();
TunnelInfo info = _pool.getInboundTunnel(id);
if (!info.getIsReady())
continue;
long exp = info.getSettings().getExpiration();
if (now + RECHECK_DELAY + REQUEST_LEASE_TIMEOUT > exp)
continue;
RouterInfo ri = getContext().netDb().lookupRouterInfoLocally(info.getThisHop());
if (ri == null)
continue;
Lease lease = new Lease();
lease.setEndDate(new Date(exp));
lease.setRouterIdentity(ri.getIdentity());
lease.setTunnelId(id);
tunnels.put(new Long(0-exp), lease);
}
// now pick the N tunnels with the longest time remaining (n = # tunnels the client requested)
// place tunnels.size() - N into the inactive pool
int selected = 0;
int wanted = _pool.getClientSettings().getNumInboundTunnels();
for (Iterator iter = tunnels.values().iterator(); iter.hasNext(); ) {
Lease lease = (Lease)iter.next();
if (selected < wanted) {
ls.addLease(lease);
selected++;
} else {
_pool.moveToInactive(lease.getTunnelId());
}
}
ls.setDestination(_pool.getDestination());
return ls;
}
private class LeaseSetCreatedJob extends JobImpl {
public LeaseSetCreatedJob(RouterContext enclosingContext) {
super(enclosingContext);
}
public String getName() { return "LeaseSet created"; }
public void runJob() {
RouterContext ctx = ClientLeaseSetManagerJob.this.getContext();
LeaseSet ls = ctx.netDb().lookupLeaseSetLocally(_pool.getDestination().calculateHash());
if (ls != null) {
_log.info("New leaseSet completely created");
_lastCreated = ctx.clock().now();
_currentLeaseSet = ls;
} else {
_log.error("New lease set created, but not found locally? wtf?! client="
+ _pool.getDestination().calculateHash().toBase64());
}
}
}
}

View File

@ -1,210 +0,0 @@
package net.i2p.router.tunnelmanager;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import net.i2p.data.Destination;
import net.i2p.data.TunnelId;
import net.i2p.router.ClientTunnelSettings;
import net.i2p.router.Router;
import net.i2p.router.RouterContext;
import net.i2p.router.TunnelInfo;
import net.i2p.util.Log;
class ClientTunnelPool {
private Log _log;
private Destination _dest;
private ClientTunnelSettings _settings;
private TunnelPool _pool;
private Map _inboundTunnels; // TunnelId --> TunnelInfo for inbound tunnels
private Map _inactiveInboundTunnels; // TunnelId --> TunnelInfo for inbound tunnels no longer in use (but not expired)
private ClientTunnelPoolManagerJob _mgrJob;
private ClientLeaseSetManagerJob _leaseMgrJob;
private ClientTunnelPoolExpirationJob _tunnelExpirationJob;
private boolean _isStopped;
private static int __poolId;
private int _poolId;
private RouterContext _context;
public ClientTunnelPool(RouterContext ctx, Destination dest, ClientTunnelSettings settings,
TunnelPool pool) {
_context = ctx;
_log = ctx.logManager().getLog(ClientTunnelPool.class);
_dest = dest;
_settings = settings;
_pool = pool;
_inboundTunnels = new HashMap();
_inactiveInboundTunnels = new HashMap();
_isStopped = true;
_poolId = ++__poolId;
}
public void startPool() {
//if (!_isStopped) {
// if (_log.shouldLog(Log.ERROR))
// _log.error("Pool " + _poolId +": Not starting the pool /again/ (its already running)");
// return;
//} else {
if (_log.shouldLog(Log.INFO))
_log.info("Pool " + _poolId +": Starting up the pool ");
//}
_isStopped = false;
if (_mgrJob == null) {
_mgrJob = new ClientTunnelPoolManagerJob(_context, _pool, this);
_context.jobQueue().addJob(_mgrJob);
} else {
_mgrJob.getTiming().setStartAfter(_context.clock().now());
_context.jobQueue().addJob(_mgrJob);
}
if (_leaseMgrJob == null) {
_leaseMgrJob = new ClientLeaseSetManagerJob(_context, this);
_context.jobQueue().addJob(_leaseMgrJob);
} else {
// we just restarted, so make sure we ask for a new leaseSet ASAP
if (_log.shouldLog(Log.DEBUG))
_log.debug("restarting the client pool and requesting a new leaseSet");
_leaseMgrJob.forceRequestLease();
_leaseMgrJob.getTiming().setStartAfter(_context.clock().now());
_context.jobQueue().addJob(_leaseMgrJob);
}
if (_tunnelExpirationJob == null) {
_tunnelExpirationJob = new ClientTunnelPoolExpirationJob(_context, this, _pool);
_context.jobQueue().addJob(_tunnelExpirationJob);
} else {
_tunnelExpirationJob.getTiming().setStartAfter(_context.clock().now());
_context.jobQueue().addJob(_tunnelExpirationJob);
}
}
public void stopPool() { _isStopped = true; }
public boolean isStopped() { return _isStopped; }
public void setClientSettings(ClientTunnelSettings settings) {
_settings = settings;
if (settings != null) {
_log.info("Client settings specified - the client may have reconnected, so restart the pool");
startPool();
}
}
public ClientTunnelSettings getClientSettings() { return _settings; }
public Destination getDestination() { return _dest; }
public void moveToInactive(TunnelId id) {
TunnelInfo info = removeInboundTunnel(id);
if (info != null) {
_context.messageHistory().tunnelJoined("inactive inbound", info);
synchronized (_inactiveInboundTunnels) {
_inactiveInboundTunnels.put(id, info);
}
_log.info("Marking tunnel " + id + " as inactive");
}
}
void setActiveTunnels(Set activeTunnels) {
for (Iterator iter = activeTunnels.iterator(); iter.hasNext(); ) {
TunnelInfo info = (TunnelInfo)iter.next();
_context.messageHistory().tunnelJoined("active inbound", info);
synchronized (_inboundTunnels) {
_inboundTunnels.put(info.getTunnelId(), info);
}
}
}
void setInactiveTunnels(Set inactiveTunnels) {
for (Iterator iter = inactiveTunnels.iterator(); iter.hasNext(); ) {
TunnelInfo info = (TunnelInfo)iter.next();
_context.messageHistory().tunnelJoined("inactive inbound", info);
synchronized (_inactiveInboundTunnels) {
_inactiveInboundTunnels.put(info.getTunnelId(), info);
}
}
}
/**
* Go through all of the client's inbound tunnels and determine how many are safe for
* use over the next period, either as part of a LeaseSet or as the target for a reply / etc.
*
*/
public int getSafePoolSize() {
return getSafePoolSize(2*60*1000);
}
/**
* Get the safe # pools at some point in the future
*
* @param futureMs number of milliseconds in the future that we want to check safety for
*/
public int getSafePoolSize(long futureMs) {
int numSafe = 0;
long expireAfter = _context.clock().now() + futureMs;
for (Iterator iter = getInboundTunnelIds().iterator(); iter.hasNext(); ) {
TunnelId id = (TunnelId)iter.next();
TunnelInfo info = getInboundTunnel(id);
if ( (info != null) && (info.getIsReady()) && (info.getSettings().getExpiration() > expireAfter) )
numSafe++;
}
return numSafe;
}
/**
* Set of tunnelIds of inbound tunnels
*
*/
public Set getInboundTunnelIds() {
synchronized (_inboundTunnels) {
return new HashSet(_inboundTunnels.keySet());
}
}
public boolean isInboundTunnel(TunnelId id) {
synchronized (_inboundTunnels) {
return _inboundTunnels.containsKey(id);
}
}
public TunnelInfo getInboundTunnel(TunnelId id) {
synchronized (_inboundTunnels) {
return (TunnelInfo)_inboundTunnels.get(id);
}
}
public void addInboundTunnel(TunnelInfo tunnel) {
_context.messageHistory().tunnelJoined("active inbound", tunnel);
synchronized (_inboundTunnels) {
_inboundTunnels.put(tunnel.getTunnelId(), tunnel);
}
}
public TunnelInfo removeInboundTunnel(TunnelId id) {
TunnelInfo info = null;
synchronized (_inboundTunnels) {
info = (TunnelInfo)_inboundTunnels.remove(id);
}
_pool.addTunnelStats(id, info);
return info;
}
public Set getInactiveInboundTunnelIds() {
synchronized (_inactiveInboundTunnels) {
return new HashSet(_inactiveInboundTunnels.keySet());
}
}
public boolean isInactiveInboundTunnel(TunnelId id) {
synchronized (_inactiveInboundTunnels) {
return _inactiveInboundTunnels.containsKey(id);
}
}
public TunnelInfo getInactiveInboundTunnel(TunnelId id) {
synchronized (_inactiveInboundTunnels) {
return (TunnelInfo)_inactiveInboundTunnels.get(id);
}
}
public TunnelInfo removeInactiveInboundTunnel(TunnelId id) {
TunnelInfo info = null;
synchronized (_inactiveInboundTunnels) {
info = (TunnelInfo)_inactiveInboundTunnels.remove(id);
}
_pool.addTunnelStats(id, info);
return info;
}
}

View File

@ -1,116 +0,0 @@
package net.i2p.router.tunnelmanager;
import java.util.Date;
import java.util.Iterator;
import net.i2p.data.TunnelId;
import net.i2p.router.JobImpl;
import net.i2p.router.Router;
import net.i2p.router.RouterContext;
import net.i2p.router.TunnelInfo;
import net.i2p.util.Log;
/**
* Periodically go through all of the tunnels assigned to this client and mark
* them as no longer ready and/or drop them (as appropriate)
*
*/
class ClientTunnelPoolExpirationJob extends JobImpl {
private Log _log;
private ClientTunnelPool _pool;
private TunnelPool _tunnelPool;
/** expire tunnels as necessary every 30 seconds */
private final static long EXPIRE_POOL_DELAY = 30*1000;
/**
* don't hard expire a tunnel until its later than expiration + buffer
*/
private final static long EXPIRE_BUFFER = 30*1000;
public ClientTunnelPoolExpirationJob(RouterContext context, ClientTunnelPool pool, TunnelPool tunnelPool) {
super(context);
_log = context.logManager().getLog(ClientTunnelPoolExpirationJob.class);
_pool = pool;
_tunnelPool = tunnelPool;
getTiming().setStartAfter(getContext().clock().now() + EXPIRE_POOL_DELAY);
}
public String getName() { return "Expire Pooled Client Tunnels"; }
public void runJob() {
if (_pool.isStopped()) {
if ( (_pool.getInactiveInboundTunnelIds().size() <= 0) &&
(_pool.getInboundTunnelIds().size() <= 0) ) {
// this may get called twice - once here, and once by the ClientTunnelPoolManagerJob
// but its safe to do, and passing around messages would be overkill.
_tunnelPool.removeClientPool(_pool.getDestination());
if (_log.shouldLog(Log.INFO))
_log.info("No more tunnels to expire in the client tunnel pool for the stopped client " + _pool.getDestination().calculateHash());
return;
} else {
if (_log.shouldLog(Log.INFO))
_log.info("Client " + _pool.getDestination().calculateHash()
+ " is stopped, but they still have some tunnels, so don't stop expiring");
}
}
expireInactiveTunnels();
expireActiveTunnels();
requeue(EXPIRE_POOL_DELAY);
}
/**
* Drop all inactive tunnels that are expired or are close enough to
* being expired that using them would suck.
*
*/
public void expireInactiveTunnels() {
long now = getContext().clock().now();
long expire = now - EXPIRE_BUFFER - 2*Router.CLOCK_FUDGE_FACTOR;
for (Iterator iter = _pool.getInactiveInboundTunnelIds().iterator(); iter.hasNext(); ) {
TunnelId id = (TunnelId)iter.next();
TunnelInfo info = _pool.getInactiveInboundTunnel(id);
if ( (info != null) && (info.getSettings() != null) ) {
if (info.getSettings().getExpiration() < expire) {
if (_log.shouldLog(Log.INFO))
_log.info("Expiring inactive tunnel " + id + " ["
+ new Date(info.getSettings().getExpiration()) + "]");
_pool.removeInactiveInboundTunnel(id);
} else if (info.getSettings().getExpiration() < now) {
_log.info("It is past the expiration for inactive tunnel " + id
+ " but not yet the buffer, mark it as no longer ready");
info.setIsReady(false);
}
}
}
}
/**
* Drop all active tunnels that are expired or are close enough to
* being expired that using them would suck.
*
*/
public void expireActiveTunnels() {
long now = getContext().clock().now();
long expire = now - EXPIRE_BUFFER - 2*Router.CLOCK_FUDGE_FACTOR;
for (Iterator iter = _pool.getInboundTunnelIds().iterator(); iter.hasNext(); ) {
TunnelId id = (TunnelId)iter.next();
TunnelInfo info = _pool.getInboundTunnel(id);
if ( (info != null) && (info.getSettings() != null) ) {
if (info.getSettings().getExpiration() < expire) {
if (_log.shouldLog(Log.INFO))
_log.info("Expiring active tunnel " + id + " ["
+ new Date(info.getSettings().getExpiration()) + "]");
_pool.removeInboundTunnel(id);
} else if (info.getSettings().getExpiration() < now) {
if (_log.shouldLog(Log.INFO))
_log.info("It is past the expiration for active tunnel " + id
+ " but not yet the buffer, mark it as no longer ready");
info.setIsReady(false);
}
}
}
}
}

View File

@ -1,265 +0,0 @@
package net.i2p.router.tunnelmanager;
import java.util.ArrayList;
import java.util.Date;
import java.util.Iterator;
import java.util.List;
import java.util.TreeMap;
import net.i2p.data.TunnelId;
import net.i2p.router.JobImpl;
import net.i2p.router.RouterContext;
import net.i2p.router.TunnelInfo;
import net.i2p.util.Log;
/**
* refill the client tunnel pool as necessary, either from the TunnelPool's free
* inbound set or by requesting custom tunnels via the RequestInboundTunnelJob.
*
*/
class ClientTunnelPoolManagerJob extends JobImpl {
private Log _log;
private ClientTunnelPool _clientPool;
private TunnelPool _tunnelPool;
private TunnelBuilder _tunnelBuilder;
/** check the pool every 30 seconds to make sure it has enough tunnels */
private final static long POOL_CHECK_DELAY = 30*1000;
public ClientTunnelPoolManagerJob(RouterContext ctx, TunnelPool pool, ClientTunnelPool clientPool) {
super(ctx);
_log = ctx.logManager().getLog(ClientTunnelPoolManagerJob.class);
_clientPool = clientPool;
_tunnelPool = pool;
_tunnelBuilder = new TunnelBuilder(ctx);
}
public String getName() { return "Manage Client Tunnel Pool"; }
public void runJob() {
try {
if (_clientPool.isStopped()) {
handleStopped();
return;
}
if (!getContext().clientManager().isLocal(_clientPool.getDestination())) {
if (_log.shouldLog(Log.INFO))
_log.info("Client " + _clientPool.getDestination().calculateHash()
+ " is no longer connected, stop the pool");
_clientPool.stopPool();
requeue(POOL_CHECK_DELAY);
return;
}
int requestedPoolSize = _clientPool.getClientSettings().getNumInboundTunnels();
int safePoolSize = _clientPool.getSafePoolSize(2*60*1000 + POOL_CHECK_DELAY);
if (safePoolSize < requestedPoolSize) {
requestMoreTunnels(requestedPoolSize-safePoolSize);
}
} catch (Exception t) {
_log.log(Log.CRIT, "Unhandled exception managing the client tunnel pool", t);
}
requeue(POOL_CHECK_DELAY);
}
/**
* The pool is stopped, so lets see if we should keep doing anything
*/
private void handleStopped() {
if (getContext().clientManager().isLocal(_clientPool.getDestination())) {
// it was stopped, but they've reconnected, so boot 'er up again
if (_log.shouldLog(Log.INFO))
_log.info("Client " + _clientPool.getDestination().calculateHash().toBase64()
+ " was stopped, but reconnected! restarting it");
_clientPool.startPool();
// we return directly, since it'll queue up jobs again, etc
} else {
// not currently connected - check to see whether all of the tunnels have expired
if ((_clientPool.getInactiveInboundTunnelIds().size() > 0) ||
(_clientPool.getInboundTunnelIds().size() > 0) ) {
// there are tunnels left, requeue until later (in case the client reconnects
if (_log.shouldLog(Log.DEBUG))
_log.debug("There are tunnels left, though the client is still disconnected: "
+ _clientPool.getDestination().calculateHash());
requeue(POOL_CHECK_DELAY);
} else {
// no tunnels left and the client is still disconnected, screw the pool
if (_log.shouldLog(Log.INFO))
_log.info("No more tunnels left and the client has disconnected: "
+ _clientPool.getDestination().calculateHash());
_tunnelPool.removeClientPool(_clientPool.getDestination());
}
}
}
/**
* Request num more inbound tunnels - either from the free pool or custom built ones
*
*/
private void requestMoreTunnels(int numTunnels) {
if (_clientPool.getClientSettings().getDepthInbound() < 1) {
// the client wants 0-hop tunnels, so don't waste longer tunnels on them
if (_log.shouldLog(Log.DEBUG))
_log.debug("0 hop tunnels wanted - create custom ones");
requestCustomTunnels(numTunnels);
return;
}
int allocated = allocateExisting(numTunnels);
if (allocated < numTunnels) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("insufficient tunnels available (wanted: " + numTunnels
+ ", allocated: " + allocated + ", requesting custom ones");
requestCustomTunnels(numTunnels - allocated);
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Sufficient tunnels exist in the client pool for "
+ _clientPool.getDestination().calculateHash() + " w3wt");
// done! w00t
}
}
/**
* Grab any existing tunnels that can be allocated to the client and do so
*
* @return number of tunnels allocated
*/
private int allocateExisting(int numTunnels) {
int allocated = 0;
Iterator iter = selectGoodEnough(numTunnels).iterator();
// good enough tunnels, ordered with the longest from now duration first
while (iter.hasNext() && allocated < numTunnels) {
TunnelId id = (TunnelId)iter.next();
TunnelInfo info = _tunnelPool.getTunnelInfo(id);
if (info.getLength() < _clientPool.getClientSettings().getDepthInbound()) {
// this aint good 'nuff...
continue;
}
boolean ok = _tunnelPool.allocateTunnel(id, _clientPool);
if (ok) {
allocated++;
}
}
return allocated;
}
/**
* Find up to the specified number of existing free inbound tunnels that meet
* the client's conditions.
*
* @return list of TunnelId values of qualified tunnels
*/
private List selectGoodEnough(int numTunnels) {
TreeMap rv = new TreeMap();
int maxLength = _tunnelPool.getLongestTunnelLength();
Iterator iter = _tunnelPool.getFreeTunnels().iterator();
while(iter.hasNext() && rv.size() < numTunnels) {
TunnelId id = (TunnelId)iter.next();
TunnelInfo info = _tunnelPool.getFreeTunnel(id);
if (info != null) {
if (isGoodEnough(info, maxLength)) {
rv.put(new Long(0 - info.getSettings().getExpiration()), id);
}
}
}
return new ArrayList(rv.values());
}
/**
* Determine if the tunnel will meet the client's requirements.
*
*/
private boolean isGoodEnough(TunnelInfo info, int max) {
if (!info.getIsReady()) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Refusing tunnel " + info.getTunnelId() + " because it isn't ready");
return false;
}
// (furthest in the future) - (rebuild buffer time)
long expireAfter = getContext().clock().now() + _tunnelPool.getPoolSettings().getInboundDuration()
- POOL_CHECK_DELAY - _tunnelPool.getTunnelCreationTimeout()*2;
if (info.getSettings().getExpiration() <= expireAfter) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Refusing tunnel " + info.getTunnelId() + " because it is going to expire soon ("
+ new Date(info.getSettings().getExpiration())
+ ", before " + new Date(expireAfter) + ")");
return false;
}
int length = info.getLength() - 1; // -1 because .getLength() includes us
if (_clientPool.getClientSettings().getEnforceStrictMinimumLength()) {
if (length < _clientPool.getClientSettings().getDepthInbound()) {
// we will require at least the client's length, but they dont meet it
if (_log.shouldLog(Log.DEBUG))
_log.debug("Strictly refusing tunnel " + info.getTunnelId()
+ " because it is too short (length = " + length
+ ", wanted = " + _clientPool.getClientSettings().getDepthInbound()
+ ")");
return false;
} else {
// its long enough. w00t
}
} else {
if (length < _clientPool.getClientSettings().getDepthInbound() && (length < max)) {
// while we will still strive to meet the client's needs, we will be satisfied with
// the best we have on hand (which may be less that their requested length)
// this tunnel however meets neither criteria
if (_log.shouldLog(Log.DEBUG))
_log.debug("Loosely refusing tunnel " + info.getTunnelId()
+ " because it is too short (length = " + length
+ ", wanted = " + _clientPool.getClientSettings().getDepthInbound()
+ ")");
return false;
} else {
// either its long enough, or its the longest we have.
// if we want to be strict, specify tunnels.enforceStrictMinimumLength either
// in the JVM environment via
// -Dtunnels.enforceStrictMinimumLength=true or in the router.config
// (tunnels.enforceStrictMinimumLength=true)
}
}
if (info.getDestination() != null) {
if (!_clientPool.getDestination().equals(info.getDestination())) {
if (_log.shouldLog(Log.INFO))
_log.info("Refusing tunnel " + info.getTunnelId()
+ " because it was requested specifically for another destination ["
+ info.getDestination().calculateHash() + "]");
return false;
}
}
if (_log.shouldLog(Log.DEBUG))
_log.debug("Accepting tunnel for length=" + _clientPool.getClientSettings().getDepthInbound() +
" and dest=" + _clientPool.getDestination().calculateHash().toBase64().substring(0,6)
+ " for " + info.getTunnelId());
return true;
}
/**
* Request numTunnels more tunnels (the free pool doesnt have enough satisfactory ones).
* This fires off a series of RequestCustomTunnelJobs
*/
private void requestCustomTunnels(int numTunnels) {
for (int i = 0; i < numTunnels; i++) {
getContext().jobQueue().addJob(new RequestCustomTunnelJob(getContext()));
}
}
/**
* Request a new tunnel specifically to the client's requirements, marked as for them so other
* ClientTunnelPool's wont take it
*
*/
private class RequestCustomTunnelJob extends JobImpl {
public RequestCustomTunnelJob(RouterContext enclosingContext) {
super(enclosingContext);
}
public String getName() { return "Request Custom Client Tunnel"; }
public void runJob() {
TunnelInfo tunnelGateway = _tunnelBuilder.configureInboundTunnel(_clientPool.getDestination(), _clientPool.getClientSettings());
RequestTunnelJob reqJob = new RequestTunnelJob(RequestCustomTunnelJob.this.getContext(), _tunnelPool, tunnelGateway, true, _tunnelPool.getTunnelCreationTimeout());
RequestCustomTunnelJob.this.getContext().jobQueue().addJob(reqJob);
}
}
}

View File

@ -1,274 +0,0 @@
package net.i2p.router.tunnelmanager;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
import java.util.Date;
import java.util.List;
import net.i2p.data.Certificate;
import net.i2p.data.Hash;
import net.i2p.data.RouterIdentity;
import net.i2p.data.RouterInfo;
import net.i2p.data.TunnelId;
import net.i2p.data.i2np.DeliveryInstructions;
import net.i2p.data.i2np.GarlicMessage;
import net.i2p.data.i2np.I2NPMessage;
import net.i2p.data.i2np.TunnelCreateMessage;
import net.i2p.data.i2np.TunnelCreateStatusMessage;
import net.i2p.router.Job;
import net.i2p.router.JobImpl;
import net.i2p.router.MessageSelector;
import net.i2p.router.ReplyJob;
import net.i2p.router.RouterContext;
import net.i2p.router.TunnelInfo;
import net.i2p.router.TunnelSelectionCriteria;
import net.i2p.router.TunnelSettings;
import net.i2p.router.message.BuildTestMessageJob;
import net.i2p.router.message.GarlicConfig;
import net.i2p.router.message.GarlicMessageBuilder;
import net.i2p.router.message.PayloadGarlicConfig;
import net.i2p.router.message.SendTunnelMessageJob;
import net.i2p.util.Log;
public class HandleTunnelCreateMessageJob extends JobImpl {
private Log _log;
private TunnelCreateMessage _message;
private RouterIdentity _from;
private Hash _fromHash;
private final static long TIMEOUT = 30*1000; // 30 secs to contact a peer that will be our next hop
private final static int PRIORITY = 123;
HandleTunnelCreateMessageJob(RouterContext ctx, TunnelCreateMessage receivedMessage,
RouterIdentity from, Hash fromHash) {
super(ctx);
_log = ctx.logManager().getLog(HandleTunnelCreateMessageJob.class);
ctx.statManager().createRateStat("tunnel.rejectOverloaded", "How many tunnels did we deny due to throttling?", "Tunnels", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
_message = receivedMessage;
_from = from;
_fromHash = fromHash;
}
public void runJob() {
if (_log.shouldLog(Log.DEBUG)) _log.debug("Handling tunnel create");
if (isOverloaded()) {
sendReply(false);
return;
}
TunnelInfo info = new TunnelInfo(getContext());
info.setConfigurationKey(_message.getConfigurationKey());
info.setEncryptionKey(_message.getTunnelKey());
info.setNextHop(_message.getNextRouter());
info.setNextHopId(_message.getNextTunnelId());
TunnelSettings settings = new TunnelSettings(getContext());
settings.setBytesPerMinuteAverage(_message.getMaxAvgBytesPerMin());
settings.setBytesPerMinutePeak(_message.getMaxPeakBytesPerMin());
settings.setMessagesPerMinuteAverage(_message.getMaxAvgMessagesPerMin());
settings.setMessagesPerMinutePeak(_message.getMaxPeakMessagesPerMin());
settings.setExpiration(_message.getTunnelDurationSeconds()*1000+getContext().clock().now());
settings.setIncludeDummy(_message.getIncludeDummyTraffic());
settings.setReorder(_message.getReorderMessages());
info.setSettings(settings);
info.setSigningKey(_message.getVerificationPrivateKey());
info.setThisHop(getContext().routerHash());
info.setTunnelId(_message.getTunnelId());
info.setVerificationKey(_message.getVerificationPublicKey());
info.getTunnelId().setType(TunnelId.TYPE_PARTICIPANT);
if (_message.getNextRouter() == null) {
if (_log.shouldLog(Log.DEBUG)) _log.debug("We're the endpoint, don't test the \"next\" peer [duh]");
boolean ok = getContext().tunnelManager().joinTunnel(info);
sendReply(ok);
} else {
getContext().netDb().lookupRouterInfo(info.getNextHop(), new TestJob(getContext(), info), new JoinJob(getContext(), info, false), TIMEOUT);
}
}
private boolean isOverloaded() {
boolean shouldAccept = getContext().throttle().acceptTunnelRequest(_message);
if (!shouldAccept) {
getContext().statManager().addRateData("tunnel.rejectOverloaded", 1, 1);
if (_log.shouldLog(Log.INFO))
_log.info("Refusing tunnel request due to overload");
}
return !shouldAccept;
}
private class TestJob extends JobImpl {
private TunnelInfo _target;
public TestJob(RouterContext enclosingContext, TunnelInfo target) {
super(enclosingContext);
_target = target;
}
public String getName() { return "Run a test for peer reachability"; }
public void runJob() {
RouterInfo info = TestJob.this.getContext().netDb().lookupRouterInfoLocally(_target.getNextHop());
if (info == null) {
if (_log.shouldLog(Log.ERROR))
_log.error("Error - unable to look up peer " + _target.toBase64() + ", even though we were queued up via onSuccess??");
return;
} else {
if (_log.shouldLog(Log.INFO))
_log.info("Lookup successful for tested peer " + _target.toBase64() + ", now continue with the test");
Hash peer = TestJob.this.getContext().routerHash();
JoinJob success = new JoinJob(getContext(), _target, true);
JoinJob failure = new JoinJob(getContext(), _target, false);
BuildTestMessageJob test = new BuildTestMessageJob(TestJob.this.getContext(), info, peer, success, failure, TIMEOUT, PRIORITY);
TestJob.this.getContext().jobQueue().addJob(test);
}
}
}
private void sendReply(boolean ok) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Sending reply to a tunnel create of id " + _message.getTunnelId()
+ " with ok (" + ok + ") to tunnel " + _message.getReplyTunnel()
+ " on router " + _message.getReplyPeer());
getContext().messageHistory().receiveTunnelCreate(_message.getTunnelId(), _message.getNextRouter(),
new Date(getContext().clock().now() + 1000*_message.getTunnelDurationSeconds()),
ok, _message.getReplyPeer());
TunnelCreateStatusMessage msg = new TunnelCreateStatusMessage(getContext());
msg.setFromHash(getContext().routerHash());
msg.setTunnelId(_message.getTunnelId());
if (ok) {
msg.setStatus(TunnelCreateStatusMessage.STATUS_SUCCESS);
} else {
// since we don't actually check anything, this is a catch all
msg.setStatus(TunnelCreateStatusMessage.STATUS_FAILED_OVERLOADED);
}
msg.setMessageExpiration(new Date(getContext().clock().now()+TIMEOUT));
// put that message into a garlic
GarlicMessage reply = createReply(msg);
TunnelId outTunnelId = selectReplyTunnel();
if (outTunnelId == null) {
if (_log.shouldLog(Log.ERROR))
_log.error("No tunnel to send reply through");
return;
}
SendTunnelMessageJob job = new SendTunnelMessageJob(getContext(), reply, outTunnelId,
_message.getReplyPeer(),
_message.getReplyTunnel(),
(Job)null, (ReplyJob)null,
(Job)null, (MessageSelector)null,
TIMEOUT, PRIORITY);
getContext().jobQueue().addJob(job);
}
private GarlicMessage createReply(TunnelCreateStatusMessage body) {
GarlicConfig cfg = createReplyConfig(body);
return GarlicMessageBuilder.buildMessage(getContext(), cfg, null, null, null,
_message.getReplyKey(), _message.getReplyTag());
}
private GarlicConfig createReplyConfig(TunnelCreateStatusMessage body) {
GarlicConfig config = new GarlicConfig();
PayloadGarlicConfig replyClove = buildReplyClove(body);
config.addClove(replyClove);
DeliveryInstructions instructions = new DeliveryInstructions();
instructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_LOCAL);
instructions.setDelayRequested(false);
instructions.setDelaySeconds(0);
instructions.setEncrypted(false);
instructions.setEncryptionKey(null);
instructions.setRouter(null);
instructions.setTunnelId(null);
config.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null));
config.setDeliveryInstructions(instructions);
config.setId(getContext().random().nextLong(I2NPMessage.MAX_ID_VALUE));
config.setExpiration(TIMEOUT+getContext().clock().now());
config.setRecipient(null);
config.setRequestAck(false);
return config;
}
/**
* Build a clove that sends the tunnel create reply
*/
private PayloadGarlicConfig buildReplyClove(TunnelCreateStatusMessage body) {
PayloadGarlicConfig replyClove = new PayloadGarlicConfig();
DeliveryInstructions instructions = new DeliveryInstructions();
instructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_LOCAL);
instructions.setRouter(null);
instructions.setDelayRequested(false);
instructions.setDelaySeconds(0);
instructions.setEncrypted(false);
replyClove.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null));
replyClove.setDeliveryInstructions(instructions);
replyClove.setExpiration(TIMEOUT+getContext().clock().now());
replyClove.setId(getContext().random().nextLong(I2NPMessage.MAX_ID_VALUE));
replyClove.setPayload(body);
replyClove.setRecipient(null);
replyClove.setRequestAck(false);
return replyClove;
}
private TunnelId selectReplyTunnel() {
TunnelSelectionCriteria crit = new TunnelSelectionCriteria();
crit.setMinimumTunnelsRequired(1);
crit.setMaximumTunnelsRequired(1);
List ids = getContext().tunnelManager().selectOutboundTunnelIds(crit);
if ( (ids != null) && (ids.size() > 0) )
return (TunnelId)ids.get(0);
else
return null;
}
public String getName() { return "Handle Tunnel Create Message"; }
private class JoinJob extends JobImpl {
private TunnelInfo _info;
private boolean _isReachable;
public JoinJob(RouterContext enclosingContext, TunnelInfo info, boolean isReachable) {
super(enclosingContext);
_info = info;
_isReachable = isReachable;
}
public void runJob() {
if (!_isReachable) {
long before = JoinJob.this.getContext().clock().now();
sendReply(false);
long after = JoinJob.this.getContext().clock().now();
if (_log.shouldLog(Log.DEBUG))
_log.debug("JoinJob .refuse took " + (after-before) + "ms to refuse " + _info);
} else {
long before = JoinJob.this.getContext().clock().now();
boolean ok = JoinJob.this.getContext().tunnelManager().joinTunnel(_info);
long afterJoin = JoinJob.this.getContext().clock().now();
sendReply(ok);
long after = JoinJob.this.getContext().clock().now();
if (_log.shouldLog(Log.DEBUG))
_log.debug("JoinJob .joinTunnel took " + (afterJoin-before) + "ms and sendReply took " + (after-afterJoin) + "ms");
}
}
public String getName() { return "Process the tunnel join after testing the nextHop"; }
}
public void dropped() {
getContext().messageHistory().messageProcessingError(_message.getUniqueId(),
_message.getClass().getName(),
"Dropped due to overload");
}
}

View File

@ -1,248 +0,0 @@
package net.i2p.router.tunnelmanager;
import java.io.IOException;
import java.io.Writer;
import java.util.Date;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import net.i2p.data.Destination;
import net.i2p.data.Hash;
import net.i2p.data.TunnelId;
import net.i2p.data.i2np.TunnelCreateMessage;
import net.i2p.router.ClientTunnelSettings;
import net.i2p.router.RouterContext;
import net.i2p.router.TunnelInfo;
import net.i2p.router.TunnelManagerFacade;
import net.i2p.router.TunnelSelectionCriteria;
import net.i2p.util.Log;
/**
* Main interface to the pool
*
*/
public class PoolingTunnelManagerFacade implements TunnelManagerFacade {
private Log _log;
private TunnelPool _pool;
private TunnelTestManager _testManager;
private RouterContext _context;
private PoolingTunnelSelector _selector;
public PoolingTunnelManagerFacade(RouterContext context) {
if (context == null) throw new IllegalArgumentException("Null routerContext is not supported");
_context = context;
_log = context.logManager().getLog(PoolingTunnelManagerFacade.class);
_context.statManager().createFrequencyStat("tunnel.acceptRequestFrequency", "How often do we accept requests to join a tunnel?", "Tunnels", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createFrequencyStat("tunnel.rejectRequestFrequency", "How often do we reject requests to join a tunnel?", "Tunnels", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("tunnel.participatingTunnels", "How many tunnels are we participating in?", "Tunnels", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.inNetMessagePool().registerHandlerJobBuilder(TunnelCreateMessage.MESSAGE_TYPE, new TunnelCreateMessageHandler(_context));
_selector = new PoolingTunnelSelector(context);
}
public void startup() {
if (_pool == null) {
_pool = new TunnelPool(_context);
if (_log.shouldLog(Log.DEBUG))
_log.debug(toString() + ": New tunnel pool created: " + _pool.toString());
}
_pool.startup();
_testManager = new TunnelTestManager(_context, _pool);
}
public void shutdown() {
_pool.shutdown();
_testManager.stopTesting();
_testManager = null;
}
public void restart() {
_pool.restart();
}
/**
* React to a request to join the specified tunnel.
*
* @return true if the router will accept participation, else false.
*/
public boolean joinTunnel(TunnelInfo info) {
if (info == null) {
if (_log.shouldLog(Log.ERROR))
_log.error("Null tunnel", new Exception("Null tunnel"));
_context.statManager().updateFrequency("tunnel.rejectRequestFrequency");
return false;
}
if (info.getSettings() == null) {
if (_log.shouldLog(Log.ERROR))
_log.error("Null settings!", new Exception("settings are null"));
_context.statManager().updateFrequency("tunnel.rejectRequestFrequency");
return false;
}
if (info.getSettings().getExpiration() == 0) {
if (_log.shouldLog(Log.INFO))
_log.info("No expiration for tunnel " + info.getTunnelId().getTunnelId(),
new Exception("No expiration"));
_context.statManager().updateFrequency("tunnel.rejectRequestFrequency");
return false;
} else {
if (info.getSettings().getExpiration() < _context.clock().now()) {
if (_log.shouldLog(Log.WARN))
_log.warn("Already expired - " + new Date(info.getSettings().getExpiration()),
new Exception("Already expired"));
_context.statManager().updateFrequency("tunnel.rejectRequestFrequency");
return false;
}
}
boolean ok = _pool.addParticipatingTunnel(info);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Joining tunnel (" + ok + "): " + info);
if (!ok)
_context.statManager().updateFrequency("tunnel.rejectRequestFrequency");
else
_context.statManager().updateFrequency("tunnel.acceptRequestFrequency");
_context.statManager().addRateData("tunnel.participatingTunnels", _pool.getParticipatingTunnelCount(), 0);
return ok;
}
/**
* Retrieve the information related to a particular tunnel
*
*/
public TunnelInfo getTunnelInfo(TunnelId id) {
return _pool.getTunnelInfo(id);
}
/**
* Retrieve a set of tunnels from the existing ones for various purposes
*/
public List selectOutboundTunnelIds(TunnelSelectionCriteria criteria) {
return _selector.selectOutboundTunnelIds(_pool, criteria);
}
/**
* Retrieve a set of tunnels from the existing ones for various purposes
*/
public List selectInboundTunnelIds(TunnelSelectionCriteria criteria) {
return _selector.selectInboundTunnelIds(_pool, criteria);
}
/**
* Make sure appropriate outbound tunnels are in place, builds requested
* inbound tunnels, then fire off a job to ask the ClientManagerFacade to
* validate the leaseSet, then publish it in the network database.
*
*/
public void createTunnels(Destination destination, ClientTunnelSettings clientSettings, long timeoutMs) {
ClientTunnelPool pool = _pool.getClientPool(destination);
if (pool != null) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("createTunnels for destination " + destination.calculateHash().toBase64() + " where the client pool exists");
pool.setClientSettings(clientSettings);
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("createTunnels for destination " + destination.calculateHash().toBase64() + " where the client pool does NOT exist");
_pool.createClientPool(destination, clientSettings);
}
}
/**
* Called when a peer becomes unreachable - go through all of the current
* tunnels and rebuild them if we can, or drop them if we can't.
*
*/
public void peerFailed(Hash peer) {
if (_pool == null) return; // just initialized
int numFailed = 0;
boolean shouldKill = false;
for (Iterator iter = _pool.getManagedTunnelIds().iterator(); iter.hasNext(); ) {
TunnelId id = (TunnelId)iter.next();
TunnelInfo info = (TunnelInfo)_pool.getTunnelInfo(id);
if (isParticipant(info, peer)) {
_log.info("Peer " + peer.toBase64() + " failed and they participate in tunnel "
+ id.getTunnelId() + ". Marking the tunnel as not ready!");
if (shouldKill) {
info.setIsReady(false);
long lifetime = _context.clock().now() - info.getCreated();
_context.statManager().addRateData("tunnel.failAfterTime", lifetime, lifetime);
}
numFailed++;
}
}
if (_log.shouldLog(Log.INFO))
_log.info("On peer " + peer.toBase64() + " failure, " + numFailed + " tunnels were "
+ (shouldKill ? "" : "NOT ") + "killed");
}
private boolean isParticipant(TunnelInfo info, Hash peer) {
if ( (info == null) || (peer == null) ) return false;
TunnelInfo cur = info;
while (cur != null) {
if (peer.equals(cur.getThisHop())) return true;
if (peer.equals(cur.getNextHop())) return true;
cur = cur.getNextHopInfo();
}
return false;
}
/**
* True if the peer currently part of a tunnel
*
*/
public boolean isInUse(Hash peer) {
if (isInUse(peer, _pool.getManagedTunnelIds())) {
if (_log.shouldLog(Log.INFO))
_log.debug("Peer is in a managed tunnel: " + peer.toBase64());
return true;
}
if (isInUse(peer, _pool.getPendingTunnels())) {
if (_log.shouldLog(Log.INFO))
_log.debug("Peer is in a pending tunnel: " + peer.toBase64());
return true;
}
if (isInUse(peer, _pool.getParticipatingTunnels())) {
if (_log.shouldLog(Log.INFO))
_log.debug("Peer is in a participating tunnel: " + peer.toBase64());
return true;
}
return false;
}
private boolean isInUse(Hash peer, Set tunnelIds) {
for (Iterator iter = tunnelIds.iterator(); iter.hasNext(); ) {
TunnelId id = (TunnelId)iter.next();
TunnelInfo info = _pool.getTunnelInfo(id);
if (isParticipant(info, peer))
return true;
}
return false;
}
public int getParticipatingCount() {
return _pool.getParticipatingTunnelCount();
}
public int getFreeTunnelCount() {
return _pool.getFreeTunnelCount();
}
public int getOutboundTunnelCount() {
return _pool.getOutboundTunnelCount();
}
/**
* Aint she pretty?
*
*/
public void renderStatusHTML(Writer out) throws IOException {
if (_pool != null)
_pool.renderStatusHTML(out);
}
public long getLastParticipatingExpiration() {
long last = -1;
for (Iterator iter = _pool.getParticipatingTunnels().iterator(); iter.hasNext(); ) {
TunnelId id = (TunnelId)iter.next();
TunnelInfo info = _pool.getParticipatingTunnel(id);
if ( (info != null) && (info.getSettings().getExpiration() > last) )
last = info.getSettings().getExpiration();
}
return last;
}
}

View File

@ -1,167 +0,0 @@
package net.i2p.router.tunnelmanager;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Date;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import net.i2p.data.TunnelId;
import net.i2p.router.RouterContext;
import net.i2p.router.TunnelInfo;
import net.i2p.router.TunnelSelectionCriteria;
import net.i2p.util.Log;
/**
* Implement the tunnel selection algorithms
*
*/
class PoolingTunnelSelector {
private Log _log;
private RouterContext _context;
/** don't use a tunnel thats about to expire */
private static long POOL_USE_SAFETY_MARGIN = 10*1000;
public PoolingTunnelSelector(RouterContext context) {
_context = context;
_log = context.logManager().getLog(PoolingTunnelSelector.class);
}
public List selectOutboundTunnelIds(TunnelPool pool, TunnelSelectionCriteria criteria) {
return selectOutboundTunnelIds(pool, criteria, true);
}
public List selectOutboundTunnelIds(TunnelPool pool, TunnelSelectionCriteria criteria, boolean recurse) {
List tunnelIds = new ArrayList(criteria.getMinimumTunnelsRequired());
Set outIds = pool.getOutboundTunnels();
for (Iterator iter = outIds.iterator(); iter.hasNext(); ) {
TunnelId id = (TunnelId)iter.next();
TunnelInfo info = pool.getOutboundTunnel(id);
if ( (info != null) && (info.getIsReady()) ) {
if (isAlmostExpired(pool, id, POOL_USE_SAFETY_MARGIN)) {
if (_log.shouldLog(Log.INFO))
_log.info("Tunnel " + id + " is almost expired");
} else {
tunnelIds.add(id);
}
} else {
if (info == null) {
if (_log.shouldLog(Log.WARN))
_log.warn("Outbound tunnel " + id + " was not found?! expire race perhaps?");
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Outbound tunnel " + id + " was not ready?! " + new Date(info.getSettings().getExpiration()));
}
}
}
boolean rebuilt = false;
for (int i = outIds.size(); i < criteria.getMinimumTunnelsRequired(); i++) {
if (_log.shouldLog(Log.WARN))
_log.warn("Building fake tunnels because the outbound tunnels weren't sufficient");
pool.buildFakeTunnels(true);
rebuilt = true;
}
if (rebuilt && recurse)
return selectOutboundTunnelIds(pool, criteria, false);
List ordered = randomize(pool, tunnelIds);
List rv = new ArrayList(criteria.getMinimumTunnelsRequired());
for (Iterator iter = ordered.iterator(); iter.hasNext() && (rv.size() < criteria.getMinimumTunnelsRequired()); ) {
rv.add(iter.next());
}
if (_log.shouldLog(Log.INFO))
_log.info("Selecting outbound tunnelIds [all outbound tunnels: " + outIds.size()
+ ", tunnelIds ready: " + ordered.size() + ", rv: " + rv + "]");
return rv;
}
public List selectInboundTunnelIds(TunnelPool pool, TunnelSelectionCriteria criteria) {
return selectInboundTunnelIds(pool, criteria, true);
}
public List selectInboundTunnelIds(TunnelPool pool, TunnelSelectionCriteria criteria, boolean recurse) {
List tunnels = new ArrayList(criteria.getMinimumTunnelsRequired());
for (Iterator iter = pool.getFreeTunnels().iterator(); iter.hasNext(); ) {
TunnelId id = (TunnelId)iter.next();
TunnelInfo info = pool.getFreeTunnel(id);
if (info == null) continue;
if (info.getIsReady()) {
if (isAlmostExpired(pool, id, POOL_USE_SAFETY_MARGIN)) {
if (_log.shouldLog(Log.INFO))
_log.info("Tunnel " + id + " is almost expired");
} else {
tunnels.add(id);
}
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Inbound tunnel " + id + " is not ready?! "
+ new Date(info.getSettings().getExpiration()));
}
}
boolean rebuilt = false;
for (int i = tunnels.size(); i < criteria.getMinimumTunnelsRequired(); i++) {
if (_log.shouldLog(Log.WARN))
_log.warn("Building fake tunnels because the inbound tunnels weren't sufficient");
pool.buildFakeTunnels(true);
rebuilt = true;
}
if (rebuilt && recurse)
return selectInboundTunnelIds(pool, criteria, false);
List ordered = randomize(pool, tunnels);
List rv = new ArrayList(criteria.getMinimumTunnelsRequired());
for (Iterator iter = ordered.iterator(); iter.hasNext() && (rv.size() < criteria.getMinimumTunnelsRequired()); ) {
rv.add(iter.next());
}
if (_log.shouldLog(Log.INFO))
_log.info("Selecting inbound tunnelIds [tunnelIds ready: "
+ tunnels.size() + ", rv: " + rv + "]");
return rv;
}
////
// helpers
////
private List randomize(TunnelPool pool, List tunnelIds) {
List rv = new ArrayList(tunnelIds.size());
for (Iterator iter = tunnelIds.iterator(); iter.hasNext(); ) {
TunnelId id = (TunnelId)iter.next();
rv.add(id);
}
Collections.shuffle(rv, _context.random());
return rv;
}
private boolean isAlmostExpired(TunnelPool pool, TunnelId id, long safetyMargin) {
TunnelInfo info = pool.getTunnelInfo(id);
if (info == null) {
if (_log.shouldLog(Log.ERROR))
_log.error("Tunnel " + id.getTunnelId() + " is not known");
return true;
}
if (info.getSettings() == null) {
if (_log.shouldLog(Log.ERROR))
_log.error("Tunnel " + id.getTunnelId() + " has no settings");
return true;
}
if (info.getSettings().getExpiration() <= 0) {
if (_log.shouldLog(Log.ERROR))
_log.error("Tunnel " + id.getTunnelId() + " has no expiration");
return true;
}
if (info.getSettings().getExpiration() - safetyMargin <= _context.clock().now()) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Expiration of tunnel " + id.getTunnelId()
+ " has almost been reached ["
+ new Date(info.getSettings().getExpiration()) + "]");
return true;
} else {
return false;
}
}
}

View File

@ -1,32 +0,0 @@
package net.i2p.router.tunnelmanager;
import net.i2p.router.JobImpl;
import net.i2p.router.RouterContext;
import net.i2p.router.TunnelInfo;
import net.i2p.util.Log;
class RequestInboundTunnelJob extends JobImpl {
private Log _log;
private TunnelPool _pool;
private boolean _useFake;
private TunnelBuilder _builder;
public RequestInboundTunnelJob(RouterContext context, TunnelPool pool) {
this(context, pool, false);
}
public RequestInboundTunnelJob(RouterContext context, TunnelPool pool, boolean useFake) {
super(context);
_log = context.logManager().getLog(RequestInboundTunnelJob.class);
_pool = pool;
_useFake = useFake;
_builder = new TunnelBuilder(context);
}
public String getName() { return "Request Inbound Tunnel"; }
public void runJob() {
_log.debug("Client pool settings: " + _pool.getPoolSettings().toString());
TunnelInfo tunnelGateway = _builder.configureInboundTunnel(null, _pool.getPoolSettings(), _useFake);
RequestTunnelJob reqJob = new RequestTunnelJob(getContext(), _pool, tunnelGateway, true, _pool.getTunnelCreationTimeout());
getContext().jobQueue().addJob(reqJob);
}
}

View File

@ -1,25 +0,0 @@
package net.i2p.router.tunnelmanager;
import net.i2p.router.JobImpl;
import net.i2p.router.RouterContext;
import net.i2p.router.TunnelInfo;
class RequestOutboundTunnelJob extends JobImpl {
private TunnelPool _pool;
private boolean _useFake;
private TunnelBuilder _builder;
public RequestOutboundTunnelJob(RouterContext context, TunnelPool pool, boolean useFake) {
super(context);
_pool = pool;
_useFake = useFake;
_builder = new TunnelBuilder(context);
}
public String getName() { return "Request Outbound Tunnel"; }
public void runJob() {
TunnelInfo tunnelGateway = _builder.configureOutboundTunnel(_pool.getPoolSettings(), _useFake);
RequestTunnelJob reqJob = new RequestTunnelJob(getContext(), _pool, tunnelGateway, false, _pool.getTunnelCreationTimeout());
getContext().jobQueue().addJob(reqJob);
}
}

View File

@ -1,748 +0,0 @@
package net.i2p.router.tunnelmanager;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
import java.util.ArrayList;
import java.util.Date;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Set;
import net.i2p.data.Certificate;
import net.i2p.data.PublicKey;
import net.i2p.data.RouterInfo;
import net.i2p.data.SessionKey;
import net.i2p.data.SessionTag;
import net.i2p.data.TunnelId;
import net.i2p.data.i2np.DeliveryInstructions;
import net.i2p.data.i2np.DeliveryStatusMessage;
import net.i2p.data.i2np.GarlicMessage;
import net.i2p.data.i2np.I2NPMessage;
import net.i2p.data.i2np.TunnelCreateMessage;
import net.i2p.data.i2np.TunnelCreateStatusMessage;
import net.i2p.router.Job;
import net.i2p.router.JobImpl;
import net.i2p.router.MessageHistory;
import net.i2p.router.MessageSelector;
import net.i2p.router.ReplyJob;
import net.i2p.router.RouterContext;
import net.i2p.router.TunnelInfo;
import net.i2p.router.TunnelSelectionCriteria;
import net.i2p.router.message.GarlicConfig;
import net.i2p.router.message.GarlicMessageBuilder;
import net.i2p.router.message.PayloadGarlicConfig;
import net.i2p.router.message.SendTunnelMessageJob;
import net.i2p.util.Log;
/**
* Request the creation of a new tunnel
*
*/
public class RequestTunnelJob extends JobImpl {
private Log _log;
private TunnelPool _pool;
private boolean _complete;
private long _timeoutMs;
private long _expiration;
private TunnelInfo _tunnelGateway;
private List _toBeRequested; // list of participants, from endpoint to gateway
private Set _failedTunnelParticipants; // set of Hash of the RouterIdentity of participants who timed out or rejected
private boolean _isInbound;
private final static int PRIORITY = 300; // high since we are creating tunnels for a client
RequestTunnelJob(RouterContext context, TunnelPool pool, TunnelInfo tunnelGateway, boolean isInbound, long timeoutMs) {
super(context);
_log = context.logManager().getLog(RequestTunnelJob.class);
context.statManager().createFrequencyStat("tunnel.buildFrequency", "How often does the router build a tunnel?", "Tunnels", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
context.statManager().createFrequencyStat("tunnel.buildFailFrequency", "How often does a peer in the tunnel fail to join??", "Tunnels", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
_pool = pool;
_tunnelGateway = tunnelGateway;
_timeoutMs = timeoutMs;
_expiration = -1;
_isInbound = isInbound;
_failedTunnelParticipants = new HashSet();
_complete = false;
List participants = new ArrayList();
TunnelInfo cur = _tunnelGateway;
while (cur != null) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Tunnel " + cur.getTunnelId() + " includes " + cur.getThisHop().toBase64());
participants.add(cur);
cur = cur.getNextHopInfo();
}
if (isInbound) {
if (_log.shouldLog(Log.INFO))
_log.info("Requesting inbound tunnel " + _tunnelGateway.getTunnelId() + " with "
+ participants.size() + " participants in it");
} else {
if (_log.shouldLog(Log.INFO))
_log.info("Requesting outbound tunnel " + _tunnelGateway.getTunnelId() + " with " + participants.size() + " participants in it");
}
// work backwards (end point, then the router pointing at the endpoint, then the router pointing at that, etc, until the gateway
_toBeRequested = new ArrayList(participants.size());
for (int i = participants.size()-1; i >= 0; i--) {
TunnelInfo peer = (TunnelInfo)participants.get(i);
if (null != getContext().netDb().lookupRouterInfoLocally(peer.getThisHop())) {
_toBeRequested.add(participants.get(i));
} else {
if (_log.shouldLog(Log.WARN))
_log.warn("ok who the fuck requested someone we don't know about? (dont answer that");
}
}
// since we request serially, we need to up the timeout serially
// change this once we go parallel
//_timeoutMs *= participants.size()+1;
_expiration = (_timeoutMs * _toBeRequested.size()) + getContext().clock().now();
}
public String getName() { return "Request Tunnel"; }
public void runJob() {
if (getContext().clock().now() > _expiration) {
if (_log.shouldLog(Log.WARN))
_log.warn("Timeout reached building tunnel (timeout = " + _timeoutMs + " expiration = " + new Date(_expiration) + ")");
fail();
return;
}
TunnelInfo peer = null;
synchronized (_toBeRequested) {
if (_toBeRequested.size() > 0) {
_pool.addPendingTunnel(_tunnelGateway);
peer = (TunnelInfo)_toBeRequested.remove(0);
if ( (peer == null) || (peer.getThisHop() == null) ) {
return;
} else {
// jump out of the synchronized block to request
}
}
}
if (peer != null)
requestParticipation(peer);
}
private void requestParticipation(TunnelInfo participant) {
// find the info about who we're looking for
RouterInfo target = getContext().netDb().lookupRouterInfoLocally(participant.getThisHop());
if (target == null) {
if (_log.shouldLog(Log.ERROR))
_log.error("Error - no db info known for participant " + participant.getThisHop());
fail();
return;
}
if (target.getIdentity().getHash().equals(getContext().routerHash())) {
// short circuit the ok
okLocalParticipation(participant);
return;
}
// select send method [outbound tunnel or garlic through peers]
TunnelId outboundTunnel = selectOutboundTunnel();
if (outboundTunnel == null) {
if (_log.shouldLog(Log.WARN))
_log.warn("No outbound tunnels! unable to request a new tunnel!");
fail();
return;
}
// select inbound tunnel gateway
TunnelGateway inboundGateway = selectInboundGateway(participant);
if (inboundGateway == null) {
if (_log.shouldLog(Log.ERROR))
_log.error("Unable to find an inbound gateway");
fail();
return;
}
SessionKey wrappedKey = new SessionKey();
Set wrappedTags = new HashSet(64);
PublicKey wrappedTo = new PublicKey();
RequestState state = new RequestState(wrappedKey, wrappedTags, wrappedTo,
participant, inboundGateway,
outboundTunnel, target);
Request r = new Request(getContext(), state);
getContext().jobQueue().addJob(r);
}
/**
* The request job steps through the RequestState, pushing it along one pass
* at a time, all with each pass occurring as a seperate sequential job. This
* is useful since the RequestTunnelJob can otherwise take upwards of 3+ seconds,
* since the various steps may involve full ElGamal encryption (for source route
* blocks, the garlic, etc).
*/
public class Request extends JobImpl {
private RequestState _state;
Request(RouterContext enclosingContext, RequestState state) {
super(enclosingContext);
_state = state;
}
public void runJob() {
boolean needsMore = _state.doNext();
if (needsMore) {
requeue(0);
} else {
MessageHistory hist = Request.this.getContext().messageHistory();
hist.requestTunnelCreate(_tunnelGateway.getTunnelId(),
_state.getOutboundTunnel(),
_state.getParticipant().getThisHop(),
_state.getParticipant().getNextHop(),
_state.getInboundGateway().getTunnelId(),
_state.getInboundGateway().getGateway());
}
}
public String getName() { return "Request Tunnel (partial)"; }
}
/**
* Contain the partial state for preparing the request - doNext starts by
* building a TunnelCreateMessage, and on the next pass it builds a
* DeliveryStatusMessage, and on the pass after that, it builds a GarlicMessage
* containing those two, and on its final pass, it sends everything out through
* a tunnel with appropriate handling jobs
*
*/
private class RequestState {
private SessionKey _wrappedKey;
private Set _wrappedTags;
private PublicKey _wrappedTo;
private TunnelCreateMessage _createMsg;
private GarlicMessage _garlicMessage;
private TunnelInfo _participant;
private TunnelGateway _inboundGateway;
private TunnelId _outboundTunnel;
private RouterInfo _target;
public RequestState(SessionKey wrappedKey, Set wrappedTags, PublicKey wrappedTo,
TunnelInfo participant, TunnelGateway inboundGateway,
TunnelId outboundTunnel, RouterInfo target) {
_wrappedKey = wrappedKey;
_wrappedTags = wrappedTags;
_wrappedTo = wrappedTo;
_participant = participant;
_inboundGateway = inboundGateway;
_outboundTunnel = outboundTunnel;
_target = target;
}
public TunnelId getOutboundTunnel() { return _outboundTunnel; }
public TunnelInfo getParticipant() { return _participant; }
public TunnelGateway getInboundGateway() { return _inboundGateway; }
public boolean doNext() {
if (_createMsg == null) {
_createMsg = buildTunnelCreate(_participant, _inboundGateway);
return true;
} else if (_garlicMessage == null) {
_garlicMessage = buildGarlicMessage(_createMsg, _inboundGateway, _target,
_wrappedKey, _wrappedTags, _wrappedTo);
return true;
} else {
// send the GarlicMessage
if (_log.shouldLog(Log.INFO))
_log.info("Sending tunnel create to " + _target.getIdentity().getHash().toBase64() +
" to inbound gateway " + _inboundGateway.getGateway().toBase64() +
" : " + _inboundGateway.getTunnelId().getTunnelId());
ReplyJob onReply = new Success(getContext(), _participant, _wrappedKey, _wrappedTags, _wrappedTo, _inboundGateway.getTunnelId(), _outboundTunnel);
Job onFail = new Failure(getContext(), _participant, _inboundGateway.getTunnelId(), _outboundTunnel);
MessageSelector selector = new Selector(_participant);
SendTunnelMessageJob j = new SendTunnelMessageJob(getContext(), _garlicMessage,
_outboundTunnel, _target.getIdentity().getHash(),
null, null, onReply, onFail,
selector, _timeoutMs, PRIORITY);
getContext().jobQueue().addJob(j);
return false;
}
}
}
/**
* Handle the "will you participate" request that we would send to ourselves in a special case (aka fast) manner,
* as, chances are, we'll always agree ;)
*
*/
private void okLocalParticipation(TunnelInfo info) {
if (_log.shouldLog(Log.INFO))
_log.info("Short circuiting the local join to tunnel " + info.getTunnelId());
peerSuccess(info);
}
/**
* Select an outbound tunnel for sending the tunnel create status message
*
*/
private TunnelId selectOutboundTunnel() {
TunnelSelectionCriteria crit = new TunnelSelectionCriteria();
crit.setMaximumTunnelsRequired(1);
crit.setMinimumTunnelsRequired(1);
crit.setAnonymityPriority(50); // arbitrary
crit.setLatencyPriority(50); // arbitrary
crit.setReliabilityPriority(50); // arbitrary
List tunnelIds = getContext().tunnelManager().selectOutboundTunnelIds(crit);
TunnelId id = null;
if (tunnelIds.size() > 0)
id = (TunnelId)tunnelIds.get(0);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Outbound tunnel selected: " + id);
return id;
}
/**
* Select an inbound tunnel to receive replies and acks from the participant
*
*/
private TunnelGateway selectInboundGateway(TunnelInfo participant) {
TunnelSelectionCriteria criteria = new TunnelSelectionCriteria();
criteria.setAnonymityPriority(66);
criteria.setReliabilityPriority(66);
criteria.setLatencyPriority(33);
criteria.setMaximumTunnelsRequired(1);
criteria.setMinimumTunnelsRequired(1);
List ids = getContext().tunnelManager().selectInboundTunnelIds(criteria);
if (ids.size() <= 0) {
if (_log.shouldLog(Log.ERROR))
_log.error("No inbound tunnels to receive the tunnel create messages. Argh",
new Exception("Tunnels suck. whats up?"));
return null;
} else {
TunnelInfo gateway = null;
TunnelId id = null;
for (int i = 0; i < ids.size(); i++) {
id = (TunnelId)ids.get(i);
gateway = getContext().tunnelManager().getTunnelInfo(id);
if (gateway != null)
break;
}
if (gateway != null) {
TunnelGateway gw = new TunnelGateway(id, gateway.getThisHop());
if (_log.shouldLog(Log.DEBUG))
_log.debug("Inbound tunnel gateway: " + id + " on router " + gateway.getThisHop());
return gw;
} else {
if (_log.shouldLog(Log.ERROR))
_log.error("No gateway found?!", new Exception("No gateway"));
return null;
}
}
}
/**
* Build a TunnelCreateMessage to the participant
*/
private TunnelCreateMessage buildTunnelCreate(TunnelInfo participant, TunnelGateway replyGateway) {
TunnelCreateMessage msg = new TunnelCreateMessage(getContext());
msg.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null));
msg.setConfigurationKey(participant.getConfigurationKey());
msg.setIncludeDummyTraffic(participant.getSettings().getIncludeDummy());
msg.setMaxAvgBytesPerMin(participant.getSettings().getBytesPerMinuteAverage());
msg.setMaxAvgMessagesPerMin(participant.getSettings().getMessagesPerMinuteAverage());
msg.setMaxPeakBytesPerMin(participant.getSettings().getBytesPerMinutePeak());
msg.setMaxPeakMessagesPerMin(participant.getSettings().getMessagesPerMinutePeak());
msg.setNextRouter(participant.getNextHop());
// TODO: update the TunnelInfo structure so we can have the tunnel contain
// different tunnelIds per hop
msg.setNextTunnelId(participant.getTunnelId());
if (participant.getNextHop() == null)
msg.setParticipantType(TunnelCreateMessage.PARTICIPANT_TYPE_ENDPOINT);
else if (participant.getSigningKey() != null)
msg.setParticipantType(TunnelCreateMessage.PARTICIPANT_TYPE_GATEWAY);
else
msg.setParticipantType(TunnelCreateMessage.PARTICIPANT_TYPE_OTHER);
msg.setReorderMessages(participant.getSettings().getReorder());
SessionKey replySessionKey = getContext().keyGenerator().generateSessionKey();
SessionTag tag = new SessionTag(true);
Set tags = new HashSet();
tags.add(tag);
// make it so we'll read the session tag correctly and use the right session key
getContext().sessionKeyManager().tagsReceived(replySessionKey, tags);
msg.setReplyPeer(replyGateway.getGateway());
msg.setReplyTunnel(replyGateway.getTunnelId());
msg.setReplyKey(replySessionKey);
msg.setReplyTag(tag);
long duration = participant.getSettings().getExpiration() - getContext().clock().now();
if (duration == 0) duration = 1;
msg.setTunnelDurationSeconds(duration/1000);
msg.setTunnelId(participant.getTunnelId());
msg.setTunnelKey(participant.getEncryptionKey());
msg.setVerificationPrivateKey(participant.getSigningKey());
msg.setVerificationPublicKey(participant.getVerificationKey());
return msg;
}
/**
* Build a garlic message wrapping the data and status as cloves with both to be routed
* through the target, where the data is destined.
*
*/
private GarlicMessage buildGarlicMessage(I2NPMessage data,
TunnelGateway replyTunnel,
RouterInfo target, SessionKey wrappedKey,
Set wrappedTags, PublicKey wrappedTo) {
GarlicConfig config = buildGarlicConfig(data, replyTunnel, target);
PublicKey rcptKey = config.getRecipientPublicKey();
if (rcptKey == null) {
if (config.getRecipient() == null) {
throw new IllegalArgumentException("Null recipient specified");
} else if (config.getRecipient().getIdentity() == null) {
throw new IllegalArgumentException("Null recipient.identity specified");
} else if (config.getRecipient().getIdentity().getPublicKey() == null) {
throw new IllegalArgumentException("Null recipient.identity.publicKey specified");
} else
rcptKey = config.getRecipient().getIdentity().getPublicKey();
}
if (wrappedTo != null)
wrappedTo.setData(rcptKey.getData());
long start = getContext().clock().now();
GarlicMessage message = GarlicMessageBuilder.buildMessage(getContext(), config, wrappedKey, wrappedTags, 10);
long end = getContext().clock().now();
if ( (end - start) > 1000) {
if (_log.shouldLog(Log.WARN))
_log.warn("Took more than a second (" + (end-start) + "ms) to create the garlic for the tunnel");
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Took LESS than a second (" + (end-start) + "ms) to create the garlic for the tunnel!");
}
return message;
}
private GarlicConfig buildGarlicConfig(I2NPMessage data,
TunnelGateway replyTunnel, RouterInfo target) {
GarlicConfig config = new GarlicConfig();
long garlicExpiration = getContext().clock().now() + _timeoutMs;
PayloadGarlicConfig dataClove = buildDataClove(data, target, garlicExpiration);
config.addClove(dataClove);
DeliveryInstructions instructions = new DeliveryInstructions();
instructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_ROUTER);
instructions.setDelayRequested(false);
instructions.setDelaySeconds(0);
instructions.setEncrypted(false);
instructions.setEncryptionKey(null);
instructions.setRouter(target.getIdentity().getHash());
instructions.setTunnelId(null);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Setting the expiration on the garlic config to " + (new Date(garlicExpiration)));
config.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null));
config.setDeliveryInstructions(instructions);
config.setId(getContext().random().nextLong(I2NPMessage.MAX_ID_VALUE));
config.setExpiration(garlicExpiration);
config.setRecipientPublicKey(target.getIdentity().getPublicKey());
return config;
}
/**
* Build a clove that sends the data to the target (which is local)
*/
private PayloadGarlicConfig buildDataClove(I2NPMessage data, RouterInfo target, long expiration) {
PayloadGarlicConfig clove = new PayloadGarlicConfig();
DeliveryInstructions instructions = new DeliveryInstructions();
instructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_LOCAL);
instructions.setRouter(target.getIdentity().getHash());
instructions.setTunnelId(null);
instructions.setDelayRequested(false);
instructions.setDelaySeconds(0);
instructions.setEncrypted(false);
clove.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null));
clove.setDeliveryInstructions(instructions);
clove.setExpiration(expiration);
clove.setId(getContext().random().nextLong(I2NPMessage.MAX_ID_VALUE));
clove.setPayload(data);
clove.setRecipientPublicKey(null);
clove.setRequestAck(false);
return clove;
}
private void fail() {
if (_complete) {
if (_log.shouldLog(Log.WARN))
_log.warn("Build tunnel failed via " + _tunnelGateway.getThisHop().toBase64()
+ ", but we've already completed, so fuck off: " + _tunnelGateway,
new Exception("Fail aborted"));
} else {
if (_log.shouldLog(Log.WARN))
_log.warn("Build tunnel " + _tunnelGateway.getTunnelId().getTunnelId()
+ " with gateway " + _tunnelGateway.getThisHop().toBase64()
+ " FAILED: " + _failedTunnelParticipants + " - " + _tunnelGateway,
new Exception("Why did we fail building?"));
synchronized (_toBeRequested) {
_toBeRequested.clear();
}
synchronized (_failedTunnelParticipants) {
_failedTunnelParticipants.clear();
}
_complete = true;
}
}
private void peerSuccess(TunnelInfo peer) {
int numLeft = 0;
synchronized (_toBeRequested) {
numLeft = _toBeRequested.size();
}
if (numLeft <= 0) {
if (_log.shouldLog(Log.INFO))
_log.info("Peer (" + peer.getThisHop().toBase64()
+ ") successful: mark the tunnel as completely ready [inbound? "
+ _isInbound + "]");
_complete = true;
if (_isInbound)
_pool.addFreeTunnel(_tunnelGateway);
else
_pool.addOutboundTunnel(_tunnelGateway);
_tunnelGateway.setIsReady(true);
getContext().statManager().updateFrequency("tunnel.buildFrequency");
} else {
if (_log.shouldLog(Log.DEBUG)) {
StringBuffer buf = new StringBuffer(128);
buf.append("Hop to ").append(peer.getThisHop().toBase64());
buf.append(" successful for tunnel ").append(peer.getTunnelId().getTunnelId());
buf.append(", but ").append(numLeft).append(" are pending");
_log.debug(buf.toString());
}
getContext().jobQueue().addJob(this);
}
}
public void dropped() {
_pool.buildFakeTunnels();
if (_log.shouldLog(Log.WARN))
_log.warn("Dropping request to create a new tunnel, so we may have manually created a new fake inbound and a new fake outbound, just in case we needed that...");
}
private class Success extends JobImpl implements ReplyJob {
private TunnelInfo _tunnel;
private List _messages;
private boolean _successCompleted;
private SessionKey _wrappedKey;
private Set _wrappedTags;
private PublicKey _wrappedTo;
private TunnelId _replyTunnelId;
private TunnelId _outboundTunnelId;
private long _started;
public Success(RouterContext enclosingContext, TunnelInfo tunnel, SessionKey wrappedKey, Set wrappedTags, PublicKey wrappedTo, TunnelId replyTunnelId, TunnelId outboundTunnelId) {
super(enclosingContext);
_tunnel = tunnel;
_messages = new LinkedList();
_successCompleted = false;
_wrappedKey = wrappedKey;
_wrappedTags = wrappedTags;
_wrappedTo = wrappedTo;
_replyTunnelId = replyTunnelId;
_outboundTunnelId = outboundTunnelId;
_started = getContext().clock().now();
}
public String getName() { return "Create Tunnel Status Received"; }
public void runJob() {
List toProc = null;
synchronized (_messages) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("# messages received for successs: " + _messages.size());
toProc = new ArrayList(_messages);
_messages.clear();
}
long responseTime = getContext().clock().now() - _started;
for (Iterator iter = toProc.iterator(); iter.hasNext(); ) {
I2NPMessage msg = (I2NPMessage)iter.next();
process(msg, responseTime);
}
}
private void process(I2NPMessage message, long responseTime) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Running success status job (tunnel = " + _tunnel + " msg = " + message + ")");
if (message.getType() == DeliveryStatusMessage.MESSAGE_TYPE) {
if (_log.shouldLog(Log.INFO))
_log.info("Tunnel creation message acknowledged for tunnel " + _tunnel.getTunnelId()
+ " at router " + _tunnel.getThisHop().toBase64());
} else {
TunnelCreateStatusMessage msg = (TunnelCreateStatusMessage)message;
if (_successCompleted) {
_log.info("Already completed in the Success task [skipping " + msg.getStatus() + "]");
return;
}
switch (msg.getStatus()) {
case TunnelCreateStatusMessage.STATUS_FAILED_CERTIFICATE:
case TunnelCreateStatusMessage.STATUS_FAILED_DELETED:
case TunnelCreateStatusMessage.STATUS_FAILED_DUPLICATE_ID:
case TunnelCreateStatusMessage.STATUS_FAILED_OVERLOADED:
if (_log.shouldLog(Log.WARN))
_log.warn("Tunnel creation failed for tunnel " + _tunnel.getTunnelId()
+ " at router " + _tunnel.getThisHop().toBase64()
+ " with status " + msg.getStatus());
getContext().profileManager().tunnelRejected(_tunnel.getThisHop(), responseTime, true);
Success.this.getContext().messageHistory().tunnelRejected(_tunnel.getThisHop(),
_tunnel.getTunnelId(),
null, "refused");
fail();
_successCompleted = true;
break;
case TunnelCreateStatusMessage.STATUS_SUCCESS:
if (_log.shouldLog(Log.DEBUG))
_log.debug("Tunnel creation succeeded for tunnel " + _tunnel.getTunnelId()
+ " at router " + _tunnel.getThisHop().toBase64());
if ( (_wrappedKey != null) && (_wrappedKey.getData() != null) &&
(_wrappedTags != null) && (_wrappedTags.size() > 0) &&
(_wrappedTo != null) ) {
Success.this.getContext().sessionKeyManager().tagsDelivered(_wrappedTo, _wrappedKey, _wrappedTags);
if (_log.shouldLog(Log.INFO))
_log.info("Delivered tags successfully to " + _tunnel.getThisHop().toBase64()
+ "! # tags: " + _wrappedTags.size());
}
_tunnel.setIsReady(true);
getContext().profileManager().tunnelJoined(_tunnel.getThisHop(), responseTime);
peerSuccess(_tunnel);
_successCompleted = true;
break;
}
}
}
public void setMessage(I2NPMessage message) {
synchronized (_messages) {
_messages.add(message);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Reply message " + _messages.size() + " received "
+ message.getClass().getName(), new Exception("Received from"));
}
}
}
private class Failure extends JobImpl {
private TunnelInfo _tunnel;
private TunnelId _outboundTunnelId;
private TunnelId _replyTunnelId;
private long _started;
public Failure(RouterContext enclosingContext, TunnelInfo tunnel, TunnelId replyTunnelId, TunnelId outboundTunnelId) {
super(enclosingContext);
_tunnel = tunnel;
_replyTunnelId = replyTunnelId;
_outboundTunnelId = outboundTunnelId;
_started = getContext().clock().now();
}
public String getName() { return "Create Tunnel Failed"; }
public void runJob() {
// update the tunnel so its known to be not working
if (_log.shouldLog(Log.WARN)) {
_log.warn("Tunnel creation timed out for tunnel " + _tunnel.getTunnelId() + " at router "
+ _tunnel.getThisHop().toBase64() + " from router "
+ getContext().routerHash().toBase64() + " after waiting "
+ (getContext().clock().now()-_started) + "ms");
_log.warn("Added by", Failure.this.getAddedBy());
}
synchronized (_failedTunnelParticipants) {
_failedTunnelParticipants.add(_tunnel.getThisHop());
}
Failure.this.getContext().messageHistory().tunnelRequestTimedOut(_tunnel.getThisHop(), _tunnel.getTunnelId());
long responseTime = getContext().clock().now() - _started;
// perhaps not an explicit reject, but an implicit one (due to dropped messages, tunnel failure, etc)
getContext().profileManager().tunnelRejected(_tunnel.getThisHop(), responseTime, false);
getContext().profileManager().messageFailed(_tunnel.getThisHop());
// one (or both) of the tunnels used to send the request / receive
// a reply failed, or the peer failed, or the peer's tunnels failed
//_pool.tunnelFailed(_replyTunnelId);
//_pool.tunnelFailed(_outboundTunnelId);
Failure.this.getContext().statManager().updateFrequency("tunnel.buildFailFrequency");
fail();
}
}
private class Selector implements MessageSelector {
private TunnelInfo _tunnel;
private boolean _statusFound;
private long _attemptExpiration;
public Selector(TunnelInfo tunnel) {
_tunnel = tunnel;
_statusFound = false;
_attemptExpiration = getContext().clock().now() + _timeoutMs;
}
public boolean continueMatching() {
if (_log.shouldLog(Log.DEBUG))
_log.debug("ContinueMatching looking for tunnel " + _tunnel.getTunnelId().getTunnelId()
+ " from " + _tunnel.getThisHop().toBase64() + ": found? " + _statusFound);
return !_statusFound;
}
public long getExpiration() { return _attemptExpiration; }
public boolean isMatch(I2NPMessage message) {
if (message.getType() == TunnelCreateStatusMessage.MESSAGE_TYPE) {
TunnelCreateStatusMessage msg = (TunnelCreateStatusMessage)message;
if (_tunnel.getThisHop().equals(msg.getFromHash())) {
if (_tunnel.getTunnelId().equals(msg.getTunnelId())) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Matches the tunnel create status message");
_statusFound = true;
return true;
} else {
// hmm another tunnel through the peer...
if (_log.shouldLog(Log.DEBUG))
_log.debug("Status message from peer [" + msg.getFromHash().toBase64()
+ "], with wrong tunnelId [" + msg.getTunnelId()
+ "] not [" + _tunnel.getTunnelId().getTunnelId() + "]");
return false;
}
} else {
// status message but from the wrong peer
if (_log.shouldLog(Log.DEBUG))
_log.debug("Status message from the wrong peer ["
+ msg.getFromHash().toBase64() + "], not ["
+ _tunnel.getThisHop().toBase64() + "]");
return false;
}
} else {
//_log.debug("Message " + message.getClass().getName()
// + " is not a delivery status or tunnel create status message [waiting for ok for tunnel "
// + _tunnel.getTunnelId() + " so we can fire " + _onCreated + "]");
return false;
}
}
public String toString() {
return "Build Tunnel Job Selector for tunnel " + _tunnel.getTunnelId().getTunnelId()
+ " at " + _tunnel.getThisHop().toBase64() + " [found=" + _statusFound + "] (@"
+ (new Date(getExpiration())) + ")";
}
}
}

View File

@ -1,388 +0,0 @@
package net.i2p.router.tunnelmanager;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
import java.util.Date;
import java.util.List;
import net.i2p.data.Hash;
import net.i2p.data.TunnelId;
import net.i2p.data.i2np.DeliveryStatusMessage;
import net.i2p.data.i2np.I2NPMessage;
import net.i2p.router.JobImpl;
import net.i2p.router.MessageSelector;
import net.i2p.router.ReplyJob;
import net.i2p.router.RouterContext;
import net.i2p.router.TunnelInfo;
import net.i2p.router.TunnelSelectionCriteria;
import net.i2p.router.message.SendTunnelMessageJob;
import net.i2p.stat.RateStat;
import net.i2p.stat.Rate;
import net.i2p.util.Log;
class TestTunnelJob extends JobImpl {
private Log _log;
/** tunnel that we want to test */
private TunnelId _primaryId;
/** tunnel that is used to help test the primary id */
private TunnelId _secondaryId;
private TunnelPool _pool;
private long _nonce;
public TestTunnelJob(RouterContext ctx, TunnelId id, TunnelPool pool) {
super(ctx);
_log = ctx.logManager().getLog(TestTunnelJob.class);
_primaryId = id;
_pool = pool;
_nonce = ctx.random().nextLong(I2NPMessage.MAX_ID_VALUE);
}
public String getName() { return "Test Tunnel"; }
public void runJob() {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Testing tunnel " + _primaryId.getTunnelId());
TunnelInfo info = _pool.getTunnelInfo(_primaryId);
if (info == null) {
_log.error("wtf, why are we testing a tunnel that we do not know about? ["
+ _primaryId.getTunnelId() + "]", getAddedBy());
return;
}
// mark it as something we're testing
info.setLastTested(getContext().clock().now());
if (isOutbound(info)) {
testOutbound(info);
} else {
testInbound(info);
}
}
private boolean isOutbound(TunnelInfo info) {
if (info == null) {
_log.error("wtf, null info?", new Exception("Who checked a null tunnel info?"));
return false;
}
if (getContext().routerHash().equals(info.getThisHop()))
return true;
else
return false;
}
private final static long DEFAULT_TEST_TIMEOUT = 10*1000; // 10 seconds for a test to succeed
private final static long DEFAULT_MINIMUM_TEST_TIMEOUT = 10*1000; // 5 second min
private final static long MAXIMUM_TEST_TIMEOUT = 60*1000; // 60 second max
private final static int TEST_PRIORITY = 100;
/**
* how long should we let tunnel tests go on for?
*/
private long getTunnelTestTimeout() {
long rv = DEFAULT_TEST_TIMEOUT;
RateStat rs = getContext().statManager().getRate("tunnel.testSuccessTime");
if (rs != null) {
Rate r = rs.getRate(10*60*1000);
if (r != null) {
if (r.getLifetimeEventCount() > 10) {
if (r.getLastEventCount() <= 0)
rv = (long)(r.getLifetimeAverageValue() * getTunnelTestDeviationLimit());
else
rv = (long)(r.getAverageValue() * getTunnelTestDeviationLimit());
}
}
}
// lets back off if we're failing
rs = getContext().statManager().getRate("tunnel.failAfterTime");
if (rs != null) {
Rate r = rs.getRate(5*60*1000);
if (r != null) {
long failures = r.getLastEventCount() + r.getCurrentEventCount();
if (failures > 0) {
rv <<= failures;
if (_log.shouldLog(Log.WARN))
_log.warn("Tunnels are failing (" + failures + "), so set the timeout to " + rv);
}
}
}
if (rv > MAXIMUM_TEST_TIMEOUT) {
rv = MAXIMUM_TEST_TIMEOUT;
} else {
long min = getMinimumTestTimeout();
if (rv < min)
rv = min;
}
return rv;
}
/**
* How much greater than the current average tunnel test time should we accept?
*/
private double getTunnelTestDeviationLimit() {
try {
return Double.parseDouble(getContext().getProperty("router.tunnelTestDeviation", "2.0"));
} catch (NumberFormatException nfe) {
return 2.0;
}
}
private long getMinimumTestTimeout() {
String timeout = getContext().getProperty("router.tunnelTestMinimum", ""+DEFAULT_MINIMUM_TEST_TIMEOUT);
if (timeout != null) {
try {
return Long.parseLong(timeout);
} catch (NumberFormatException nfe) {
return DEFAULT_MINIMUM_TEST_TIMEOUT;
}
} else {
return DEFAULT_MINIMUM_TEST_TIMEOUT;
}
}
/**
* Send a message out the tunnel with instructions to send the message back
* to ourselves and wait for it to arrive.
*/
private void testOutbound(TunnelInfo info) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Testing outbound tunnel " + info);
DeliveryStatusMessage msg = new DeliveryStatusMessage(getContext());
msg.setArrival(new Date(getContext().clock().now()));
msg.setMessageId(_nonce);
Hash us = getContext().routerHash();
_secondaryId = getReplyTunnel();
if (_secondaryId == null) {
getContext().jobQueue().addJob(new TestFailedJob(getContext()));
return;
}
TunnelInfo inboundInfo = _pool.getTunnelInfo(_secondaryId);
inboundInfo.setLastTested(getContext().clock().now());
long timeout = getTunnelTestTimeout();
TestFailedJob failureJob = new TestFailedJob(getContext());
MessageSelector selector = new TestMessageSelector(msg.getMessageId(), info.getTunnelId().getTunnelId(), timeout);
SendTunnelMessageJob testJob = new SendTunnelMessageJob(getContext(), msg, info.getTunnelId(), us, _secondaryId, null, new TestSuccessfulJob(getContext(), timeout), failureJob, selector, timeout, TEST_PRIORITY);
getContext().jobQueue().addJob(testJob);
}
/**
* Send a message to the gateway and wait for it to arrive.
*/
private void testInbound(TunnelInfo info) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Testing inbound tunnel " + info);
DeliveryStatusMessage msg = new DeliveryStatusMessage(getContext());
msg.setArrival(new Date(getContext().clock().now()));
msg.setMessageId(_nonce);
_secondaryId = getOutboundTunnel();
if (_secondaryId == null) {
getContext().jobQueue().addJob(new TestFailedJob(getContext()));
return;
}
TunnelInfo outboundInfo = _pool.getTunnelInfo(_secondaryId);
outboundInfo.setLastTested(getContext().clock().now());
long timeout = getTunnelTestTimeout();
TestFailedJob failureJob = new TestFailedJob(getContext());
MessageSelector selector = new TestMessageSelector(msg.getMessageId(), info.getTunnelId().getTunnelId(), timeout);
SendTunnelMessageJob j = new SendTunnelMessageJob(getContext(), msg, _secondaryId, info.getThisHop(), info.getTunnelId(), null, new TestSuccessfulJob(getContext(), timeout), failureJob, selector, timeout, TEST_PRIORITY);
getContext().jobQueue().addJob(j);
}
/**
* Get the tunnel for replies to be sent down when testing outbound tunnels
*
*/
private TunnelId getReplyTunnel() {
TunnelSelectionCriteria crit = new TunnelSelectionCriteria();
crit.setMinimumTunnelsRequired(2);
crit.setMaximumTunnelsRequired(2);
// arbitrary priorities
crit.setAnonymityPriority(50);
crit.setLatencyPriority(50);
crit.setReliabilityPriority(50);
List tunnelIds = getContext().tunnelManager().selectInboundTunnelIds(crit);
for (int i = 0; i < tunnelIds.size(); i++) {
TunnelId id = (TunnelId)tunnelIds.get(i);
if (id.equals(_primaryId)) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Not testing a tunnel with itself [duh]");
} else {
return id;
}
}
_log.error("Unable to test tunnel " + _primaryId + ", since there are NO OTHER INBOUND TUNNELS to receive the ack through");
return null;
}
/**
* Get the tunnel to send thte message out when testing inbound tunnels
*
*/
private TunnelId getOutboundTunnel() {
TunnelSelectionCriteria crit = new TunnelSelectionCriteria();
crit.setMinimumTunnelsRequired(2);
crit.setMaximumTunnelsRequired(2);
// arbitrary priorities
crit.setAnonymityPriority(50);
crit.setLatencyPriority(50);
crit.setReliabilityPriority(50);
List tunnelIds = getContext().tunnelManager().selectOutboundTunnelIds(crit);
for (int i = 0; i < tunnelIds.size(); i++) {
TunnelId id = (TunnelId)tunnelIds.get(i);
if (id.equals(_primaryId)) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Not testing a tunnel with itself [duh]");
} else {
return id;
}
}
_log.error("Unable to test tunnel " + _primaryId + ", since there are NO OTHER OUTBOUND TUNNELS to send the ack through");
return null;
}
private class TestFailedJob extends JobImpl {
public TestFailedJob(RouterContext enclosingContext) {
super(enclosingContext);
}
public String getName() { return "Tunnel Test Failed"; }
public void runJob() {
if (_log.shouldLog(Log.WARN))
_log.warn("Test of tunnel " + _primaryId.getTunnelId()
+ " failed while waiting for nonce " + _nonce + ": "
+ _pool.getTunnelInfo(_primaryId), getAddedBy());
_pool.tunnelFailed(_primaryId);
if (_secondaryId != null) {
if (_log.shouldLog(Log.WARN))
_log.warn("Secondary test of tunnel " + _secondaryId.getTunnelId()
+ " failed while waiting for nonce " + _nonce + ": "
+ _pool.getTunnelInfo(_secondaryId), getAddedBy());
//_pool.tunnelFailed(_secondaryId);
}
}
}
private class TestSuccessfulJob extends JobImpl implements ReplyJob {
private DeliveryStatusMessage _msg;
private long _timeout;
public TestSuccessfulJob(RouterContext enclosingContext, long timeout) {
super(enclosingContext);
_msg = null;
_timeout = timeout;
}
public String getName() { return "Tunnel Test Successful"; }
public void runJob() {
long time = (getContext().clock().now() - _msg.getArrival().getTime());
if (_log.shouldLog(Log.INFO))
_log.info("Test of tunnel " + _primaryId+ " successfull after "
+ time + "ms waiting for " + _nonce);
if (time > _timeout) {
return; // the test failed job should already have run
}
TunnelInfo info = _pool.getTunnelInfo(_primaryId);
if (info != null) {
TestTunnelJob.this.getContext().messageHistory().tunnelValid(info, time);
updateProfiles(info, time);
}
info = _pool.getTunnelInfo(_secondaryId);
if (info != null) {
TestTunnelJob.this.getContext().messageHistory().tunnelValid(info, time);
updateProfiles(info, time);
}
getContext().statManager().addRateData("tunnel.testSuccessTime", time, time);
}
private void updateProfiles(TunnelInfo info, long time) {
TunnelInfo cur = info;
while (cur != null) {
Hash peer = cur.getThisHop();
if ( (peer != null) && (!getContext().routerHash().equals(peer)) )
getContext().profileManager().tunnelTestSucceeded(peer, time);
cur = cur.getNextHopInfo();
}
}
public void setMessage(I2NPMessage message) {
_msg = (DeliveryStatusMessage)message;
}
}
private class TestMessageSelector implements MessageSelector {
private long _id;
private long _tunnelId;
private boolean _found;
private long _expiration;
public TestMessageSelector(long id, long tunnelId, long timeoutMs) {
_id = id;
_tunnelId = tunnelId;
_found = false;
_expiration = getContext().clock().now() + timeoutMs;
if (_log.shouldLog(Log.DEBUG))
_log.debug("the expiration while testing tunnel " + tunnelId
+ " waiting for nonce " + id + ": " + new Date(_expiration));
}
public boolean continueMatching() {
if (!_found) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Continue matching while looking for nonce for tunnel " + _tunnelId);
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Don't continue matching for tunnel " + _tunnelId + " / " + _id);
}
return !_found;
}
public long getExpiration() {
if (_expiration < getContext().clock().now()) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("EXPIRED while looking for nonce " + _id + " for tunnel " + _tunnelId);
}
return _expiration;
}
public boolean isMatch(I2NPMessage message) {
if ( (message != null) && (message instanceof DeliveryStatusMessage) ) {
DeliveryStatusMessage msg = (DeliveryStatusMessage)message;
if (msg.getMessageId() == _id) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Found successful test of tunnel " + _tunnelId + " after "
+ (getContext().clock().now() - msg.getArrival().getTime())
+ "ms waiting for " + _id);
_found = true;
return true;
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Found a delivery status message, but it contains nonce "
+ msg.getMessageId() + " and not " + _id);
}
} else {
//_log.debug("Not a match while looking to test tunnel " + _tunnelId + " with nonce " + _id + " (" + message + ")");
}
return false;
}
public String toString() {
StringBuffer buf = new StringBuffer(256);
buf.append(super.toString());
buf.append(": TestMessageSelector: tunnel ").append(_tunnelId);
buf.append(" looking for ").append(_id).append(" expiring in ");
buf.append(_expiration - getContext().clock().now());
buf.append("ms");
return buf.toString();
}
}
}

View File

@ -1,404 +0,0 @@
package net.i2p.router.tunnelmanager;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
import java.util.ArrayList;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import net.i2p.data.Destination;
import net.i2p.data.Hash;
import net.i2p.data.SessionKey;
import net.i2p.data.SigningPrivateKey;
import net.i2p.data.SigningPublicKey;
import net.i2p.data.TunnelId;
import net.i2p.router.ClientTunnelSettings;
import net.i2p.router.PeerSelectionCriteria;
import net.i2p.router.RouterContext;
import net.i2p.router.TunnelInfo;
import net.i2p.router.TunnelSettings;
import net.i2p.util.Log;
class TunnelBuilder {
private Log _log;
private RouterContext _context;
private final static long DEFAULT_TUNNEL_DURATION = 10*60*1000; // 10 minutes
/**
* Chance that the tunnel build will be 0 hop, on a PROBABILITY_LOCAL_SCALE
*/
private final static int PROBABILITY_LOCAL = -1;
private final static int PROBABILITY_LOCAL_SCALE = 10;
public TunnelBuilder(RouterContext context) {
_context = context;
_log = context.logManager().getLog(TunnelBuilder.class);
}
public TunnelInfo configureInboundTunnel(Destination dest, ClientTunnelSettings settings) {
return configureInboundTunnel(dest, settings, false);
}
public TunnelInfo configureInboundTunnel(Destination dest, ClientTunnelSettings settings, boolean useFake) {
boolean randFake = (_context.random().nextInt(PROBABILITY_LOCAL_SCALE) <= PROBABILITY_LOCAL);
List peerLists = null;
if (useFake || randFake) {
peerLists = new ArrayList(0);
} else {
List peerHashes = selectInboundPeers(1, settings.getDepthInbound());
peerLists = randomizeLists(peerHashes, 1, settings.getDepthInbound());
}
if (peerLists.size() <= 0) {
if (_log.shouldLog(Log.INFO))
_log.info("Configuring local inbound tunnel");
return configureInboundTunnel(dest, settings, new ArrayList());
} else {
List peerHashList = (List)peerLists.get(0);
return configureInboundTunnel(dest, settings, peerHashList);
}
}
public TunnelInfo configureOutboundTunnel(ClientTunnelSettings settings) {
return configureOutboundTunnel(settings, false);
}
public TunnelInfo configureOutboundTunnel(ClientTunnelSettings settings, boolean useFake) {
boolean randFake = (_context.random().nextInt(PROBABILITY_LOCAL_SCALE) <= PROBABILITY_LOCAL);
List peerLists = null;
if (useFake || randFake) {
peerLists = new ArrayList(0);
} else {
List peerHashes = selectOutboundPeers(1, settings.getDepthOutbound());
peerLists = randomizeLists(peerHashes, 1, settings.getDepthOutbound());
}
if (peerLists.size() <= 0) {
if (_log.shouldLog(Log.INFO))
_log.info("Configuring local outbound tunnel");
return configureOutboundTunnel(settings, new ArrayList());
} else {
List peerHashList = (List)peerLists.get(0);
return configureOutboundTunnel(settings, peerHashList);
}
}
/**
* Select a series of participants for the inbound tunnel, define each of
* their operating characteristics, and return them as a chain of TunnelInfo
* structures. The first TunnelInfo in each chain is the inbound gateway
* to which the lease should be attached, and the last is the local router.
*
* @return set of TunnelInfo structures, where each value is the gateway of
* a different tunnel (and these TunnelInfo structures are chained
* via getNextHopInfo())
*/
public Set configureInboundTunnels(Destination dest, ClientTunnelSettings settings) {
return configureInboundTunnels(dest, settings, false);
}
/**
* @param useFake if true, make this tunnel include no remote peers (so it'll always succeed)
*
*/
public Set configureInboundTunnels(Destination dest, ClientTunnelSettings settings, boolean useFake) {
Set tunnels = new HashSet();
int numIn = settings.getNumInboundTunnels();
if (numIn <= 0) {
if (_log.shouldLog(Log.INFO))
_log.info("No inbound tunnels requested, but we're creating one anyway");
numIn = 1;
}
List peerLists = null;
if (!useFake) {
List peerHashes = selectInboundPeers(numIn, settings.getDepthInbound());
if (_log.shouldLog(Log.DEBUG))
_log.debug("Peer hashes selected: " + peerHashes.size());
peerLists = randomizeLists(peerHashes, settings.getNumInboundTunnels(), settings.getDepthInbound());
} else {
peerLists = new ArrayList(0);
}
if (peerLists.size() <= 0) {
for (int i = 0; i < numIn; i++) {
TunnelInfo tunnel = configureInboundTunnel(dest, settings, new ArrayList());
tunnels.add(tunnel);
if (_log.shouldLog(Log.INFO))
_log.info("Dummy inbound tunnel " + tunnel.getTunnelId() + " configured (" + tunnel + ")");
}
} else {
for (Iterator iter = peerLists.iterator(); iter.hasNext();) {
List peerList = (List)iter.next();
TunnelInfo tunnel = configureInboundTunnel(dest, settings, peerList);
tunnels.add(tunnel);
if (_log.shouldLog(Log.INFO))
_log.info("Real inbound tunnel " + tunnel.getTunnelId() + " configured (" + tunnel + ")");
}
}
return tunnels;
}
public Set configureOutboundTunnels(ClientTunnelSettings settings) {
return configureOutboundTunnels(settings, false);
}
/**
* @param useFake if true, make this tunnel include no remote peers (so it'll always succeed)
*
*/
public Set configureOutboundTunnels(ClientTunnelSettings settings, boolean useFake) {
Set tunnels = new HashSet();
List peerLists = null;
if (!useFake) {
List peerHashes = selectOutboundPeers(settings.getNumOutboundTunnels(), settings.getDepthOutbound());
if (_log.shouldLog(Log.DEBUG))
_log.debug("Peer hashes selected: " + peerHashes.size());
peerLists = randomizeLists(peerHashes, settings.getNumOutboundTunnels(), settings.getDepthOutbound());
} else {
peerLists = new ArrayList(0);
}
if (peerLists.size() <= 0) {
for (int i = 0; i < settings.getNumOutboundTunnels(); i++) {
TunnelInfo tunnel = configureOutboundTunnel(settings, new ArrayList());
tunnels.add(tunnel);
if (_log.shouldLog(Log.INFO))
_log.info("Dummy outbound tunnel " + tunnel.getTunnelId() + " configured (" + tunnel + ")");
}
} else {
for (Iterator iter = peerLists.iterator(); iter.hasNext();) {
List peerList = (List)iter.next();
TunnelInfo tunnel = configureOutboundTunnel(settings, peerList);
tunnels.add(tunnel);
if (_log.shouldLog(Log.INFO))
_log.info("Real outbound tunnel " + tunnel.getTunnelId() + " configured (" + tunnel + ")");
}
}
return tunnels;
}
private List selectInboundPeers(int numTunnels, int numPerTunnel) {
return selectPeers(numTunnels, numPerTunnel);
}
private List selectOutboundPeers(int numTunnels, int numPerTunnel) {
return selectPeers(numTunnels, numPerTunnel);
}
/**
* Retrieve a list of Hash structures (from RouterIdentity) for routers that
* should be used for the tunnels. A sufficient number should be retrieved so
* that there are enough for the specified numTunnels where each tunnel has numPerTunnel
* hops in it.
*
*/
private List selectPeers(int numTunnels, int numPerTunnel) {
PeerSelectionCriteria criteria = new PeerSelectionCriteria();
int maxNeeded = numTunnels * numPerTunnel;
int minNeeded = numPerTunnel;
criteria.setMaximumRequired(maxNeeded);
criteria.setMinimumRequired(minNeeded);
criteria.setPurpose(PeerSelectionCriteria.PURPOSE_TUNNEL);
List peers = _context.peerManager().selectPeers(criteria);
List rv = new ArrayList(peers.size());
for (Iterator iter = peers.iterator(); iter.hasNext(); ) {
Hash peer = (Hash)iter.next();
if (null != _context.netDb().lookupRouterInfoLocally(peer))
rv.add(peer);
else {
if (_log.shouldLog(Log.WARN))
_log.warn("peer manager selected a peer we don't know about - drop it");
}
}
return rv;
}
/**
* Take the router hashes and organize them into numTunnels lists where each
* list contains numPerTunnel hashes.
*
* @return Set of List of Hash objects, where the each list contains an ordered
* group of routers to participate in the tunnel. Note that these lists
* do NOT include the local router at the end, so numPerTunnel = 0 (and
* hence, an empty list) is a valid (albeit insecure) length
*/
private List randomizeLists(List peerHashes, int numTunnels, int numPerTunnel) {
List tunnels = new ArrayList(numTunnels);
if (peerHashes.size() == 0) {
if (_log.shouldLog(Log.INFO))
_log.info("No peer hashes provided");
return tunnels;
} else {
if (_log.shouldLog(Log.INFO))
_log.info("# peers randomizing: " + peerHashes + " into " + numTunnels + " tunnels");
}
for (int i = 0; i < numTunnels; i++) {
int startOn = _context.random().nextInt(peerHashes.size());
List peers = new ArrayList();
for (int j = 0; j < numPerTunnel; j++) {
int k = (j + startOn) % peerHashes.size();
Hash peer = (Hash)peerHashes.get(k);
if (!peers.contains(peer))
peers.add(peer);
}
if (_log.shouldLog(Log.INFO))
_log.info("Tunnel " + i + " [" + numPerTunnel + "/(" + startOn+ ")]: " + peers);
tunnels.add(peers);
}
if (_log.shouldLog(Log.INFO))
_log.info("Tunnels: " + tunnels);
return tunnels;
}
/**
* Create a chain of TunnelInfo structures with the appropriate settings using
* the supplied routers for each hop, as well as a final hop ending with the current
* router
*/
private TunnelInfo configureInboundTunnel(Destination dest, ClientTunnelSettings settings, List peerHashList) {
SessionKey encryptionKey = _context.keyGenerator().generateSessionKey();
Object kp[] = _context.keyGenerator().generateSigningKeypair();
SigningPublicKey pubkey = (SigningPublicKey)kp[0];
SigningPrivateKey privkey = (SigningPrivateKey)kp[1];
long duration = settings.getInboundDuration();
if (duration <= 0)
duration = DEFAULT_TUNNEL_DURATION;
long expiration = _context.clock().now() + duration;
TunnelSettings tunnelSettings = new TunnelSettings(_context);
tunnelSettings.setBytesPerMinuteAverage(settings.getBytesPerMinuteInboundAverage());
tunnelSettings.setBytesPerMinutePeak(settings.getBytesPerMinuteInboundPeak());
tunnelSettings.setDepth(peerHashList.size()+1);
tunnelSettings.setExpiration(expiration);
tunnelSettings.setIncludeDummy(settings.getIncludeDummyInbound());
tunnelSettings.setMessagesPerMinuteAverage(settings.getMessagesPerMinuteInboundAverage());
tunnelSettings.setMessagesPerMinutePeak(settings.getMessagesPerMinuteInboundPeak());
tunnelSettings.setReorder(settings.getReorderInbound());
TunnelId id = new TunnelId();
id.setTunnelId(_context.random().nextLong(TunnelId.MAX_ID_VALUE));
id.setType(TunnelId.TYPE_INBOUND);
TunnelInfo first = null;
TunnelInfo prev = null;
for (int i = 0; i < peerHashList.size(); i++) {
Hash peer = (Hash)peerHashList.get(i);
TunnelInfo cur = new TunnelInfo(_context);
cur.setThisHop(peer);
cur.setConfigurationKey(_context.keyGenerator().generateSessionKey());
cur.setDestination(null);
if (i == 0) {
// gateway
cur.setEncryptionKey(encryptionKey);
cur.setSigningKey(privkey);
}
cur.setSettings(tunnelSettings);
cur.setTunnelId(id);
cur.setVerificationKey(pubkey);
if (prev != null) {
prev.setNextHop(peer);
prev.setNextHopInfo(cur);
prev.setNextHopId(cur.getTunnelId());
} else {
first = cur;
}
prev = cur;
}
TunnelInfo last = new TunnelInfo(_context);
last.setThisHop(_context.routerHash());
last.setDestination(dest);
last.setEncryptionKey(encryptionKey);
last.setSettings(tunnelSettings);
last.setTunnelId(id);
last.setVerificationKey(pubkey);
last.setSigningKey(privkey);
last.setConfigurationKey(_context.keyGenerator().generateSessionKey());
TunnelInfo cur = first;
if (cur == null) {
first = last;
} else {
while (cur.getNextHopInfo() != null)
cur = cur.getNextHopInfo();
cur.setNextHop(last.getThisHop());
cur.setNextHopInfo(last);
cur.setNextHopId(last.getTunnelId());
}
return first;
}
/**
* Create a chain of TunnelInfo structures with the appropriate settings using
* the supplied routers for each hop, starting with the current router
*/
private TunnelInfo configureOutboundTunnel(ClientTunnelSettings settings, List peerHashList) {
SessionKey encryptionKey = _context.keyGenerator().generateSessionKey();
Object kp[] = _context.keyGenerator().generateSigningKeypair();
SigningPublicKey pubkey = (SigningPublicKey)kp[0];
SigningPrivateKey privkey = (SigningPrivateKey)kp[1];
long duration = settings.getInboundDuration(); // uses inbound duration for symmetry
if (duration <= 0)
duration = DEFAULT_TUNNEL_DURATION;
long expiration = _context.clock().now() + duration;
TunnelSettings tunnelSettings = new TunnelSettings(_context);
tunnelSettings.setBytesPerMinuteAverage(settings.getBytesPerMinuteInboundAverage());
tunnelSettings.setBytesPerMinutePeak(settings.getBytesPerMinuteInboundPeak());
tunnelSettings.setDepth(peerHashList.size()+1);
tunnelSettings.setExpiration(expiration);
tunnelSettings.setIncludeDummy(settings.getIncludeDummyInbound());
tunnelSettings.setMessagesPerMinuteAverage(settings.getMessagesPerMinuteInboundAverage());
tunnelSettings.setMessagesPerMinutePeak(settings.getMessagesPerMinuteInboundPeak());
tunnelSettings.setReorder(settings.getReorderInbound());
TunnelId id = new TunnelId();
id.setTunnelId(_context.random().nextLong(TunnelId.MAX_ID_VALUE));
id.setType(TunnelId.TYPE_OUTBOUND);
TunnelInfo first = new TunnelInfo(_context);
first.setThisHop(_context.routerHash());
first.setDestination(null);
first.setEncryptionKey(encryptionKey);
first.setSettings(tunnelSettings);
first.setTunnelId(id);
first.setVerificationKey(pubkey);
first.setSigningKey(privkey);
first.setConfigurationKey(_context.keyGenerator().generateSessionKey());
TunnelInfo prev = first;
for (int i = 0; i < peerHashList.size(); i++) {
Hash peer = (Hash)peerHashList.get(i);
TunnelInfo cur = new TunnelInfo(_context);
cur.setThisHop(peer);
cur.setConfigurationKey(_context.keyGenerator().generateSessionKey());
cur.setDestination(null);
if (i == peerHashList.size() -1) {
// endpoint
cur.setEncryptionKey(encryptionKey);
}
cur.setSettings(tunnelSettings);
cur.setTunnelId(id);
cur.setVerificationKey(pubkey);
prev.setNextHop(peer);
prev.setNextHopInfo(cur);
prev.setNextHopId(cur.getTunnelId());
prev = cur;
}
return first;
}
}

View File

@ -1,28 +0,0 @@
package net.i2p.router.tunnelmanager;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
import net.i2p.data.Hash;
import net.i2p.data.RouterIdentity;
import net.i2p.data.i2np.I2NPMessage;
import net.i2p.data.i2np.TunnelCreateMessage;
import net.i2p.router.HandlerJobBuilder;
import net.i2p.router.Job;
import net.i2p.router.RouterContext;
class TunnelCreateMessageHandler implements HandlerJobBuilder {
private RouterContext _context;
public TunnelCreateMessageHandler(RouterContext context) {
_context = context;
}
public Job createJob(I2NPMessage receivedMessage, RouterIdentity from, Hash fromHash) {
return new HandleTunnelCreateMessageJob(_context, (TunnelCreateMessage)receivedMessage, from, fromHash);
}
}

View File

@ -1,23 +0,0 @@
package net.i2p.router.tunnelmanager;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
import net.i2p.data.Hash;
import net.i2p.data.TunnelId;
class TunnelGateway {
private TunnelId _tunnel;
private Hash _gateway;
public TunnelGateway(TunnelId id, Hash gateway) {
_tunnel = id;
_gateway = gateway;
}
public TunnelId getTunnelId() { return _tunnel; }
public Hash getGateway() { return _gateway; }
}

View File

@ -1,794 +0,0 @@
package net.i2p.router.tunnelmanager;
import java.io.IOException;
import java.io.Writer;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import net.i2p.data.Destination;
import net.i2p.data.Hash;
import net.i2p.data.TunnelId;
import net.i2p.router.ClientTunnelSettings;
import net.i2p.router.JobImpl;
import net.i2p.router.RouterContext;
import net.i2p.router.TunnelInfo;
import net.i2p.util.Log;
/**
* Store the data for free inbound, outbound, and client pooled tunnels, and serve
* as the central coordination point
*
*/
class TunnelPool {
private Log _log;
private RouterContext _context;
/** TunnelId --> TunnelInfo of outbound tunnels */
private Map _outboundTunnels;
/** TunnelId --> TunnelInfo of free inbound tunnels */
private Map _freeInboundTunnels;
/** Destination --> ClientTunnelPool */
private Map _clientPools;
/** TunnelId --> TunnelInfo structures of non-local tunnels we're participating in */
private Map _participatingTunnels;
/** TunnelId --> TunnelInfo of tunnels being built (but not ready yet) */
private Map _pendingTunnels;
/** defines pool settings: # inbound / outbound, length, etc */
private ClientTunnelSettings _poolSettings;
private TunnelPoolPersistenceHelper _persistenceHelper;
/** how long will each tunnel create take? */
private long _tunnelCreationTimeout;
/** how many clients should we stock the pool in support of */
private int _targetClients;
/** active or has it been shutdown? */
private boolean _isLive;
private TunnelBuilder _tunnelBuilder;
/** write out the current state every 60 seconds */
private final static long WRITE_POOL_DELAY = 60*1000;
/** allow the tunnel create timeout to be overridden, default is 60 seconds [but really slow computers should be larger] */
public final static String TUNNEL_CREATION_TIMEOUT_PARAM = "tunnel.creationTimeoutMs";
public final static long TUNNEL_CREATION_TIMEOUT_DEFAULT = 60*1000;
public final static String TARGET_CLIENTS_PARAM = "router.targetClients";
public final static int TARGET_CLIENTS_DEFAULT = 3;
public TunnelPool(RouterContext ctx) {
_context = ctx;
_log = ctx.logManager().getLog(TunnelPool.class);
_context.statManager().createFrequencyStat("tunnel.failFrequency", "How often do tunnels prematurely fail (after being successfully built)?", "Tunnels", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("tunnel.failAfterTime", "How long do tunnels that fail prematurely last before failing?", "Tunnels", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("tunnel.inboundMessagesProcessed", "How many messages does an inbound tunnel process in its lifetime?", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("tunnel.outboundMessagesProcessed", "How many messages does an inbound tunnel process in its lifetime?", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("tunnel.participatingMessagesProcessed", "How many messages does an inbound tunnel process in its lifetime?", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("tunnel.participatingMessagesProcessedActive", "How many messages beyond the average were processed in a more-than-average tunnel's lifetime?", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("tunnel.participatingBytesProcessed", "How many bytes does an inbound tunnel process in its lifetime?", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("tunnel.participatingBytesProcessedActive", "How many bytes beyond the average were processed in a more-than-average tunnel's lifetime?", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
_isLive = true;
_persistenceHelper = new TunnelPoolPersistenceHelper(_context);
_tunnelBuilder = new TunnelBuilder(_context);
}
/**
* If the tunnel is known in any way, fetch it, else return null
*
*/
public TunnelInfo getTunnelInfo(TunnelId id) {
if (!_isLive) {
if (_log.shouldLog(Log.ERROR))
_log.error(toString() + ": Not live, unable to search for tunnel " + id);
return null;
}
if (id == null) {
if (_log.shouldLog(Log.ERROR))
_log.error(toString() + ": Id requested is null", new Exception("wtf, why do you want a null?"));
return null;
}
boolean typeKnown = id.getType() != TunnelId.TYPE_UNSPECIFIED;
if ( (!typeKnown) || (id.getType() == TunnelId.TYPE_PARTICIPANT) ) {
synchronized (_participatingTunnels) {
if (_participatingTunnels.containsKey(id)) {
if (_log.shouldLog(Log.DEBUG))
_log.debug(toString() + ": Found tunnel " + id + " as a participant");
return (TunnelInfo)_participatingTunnels.get(id);
}
}
}
if ( (!typeKnown) || (id.getType() == TunnelId.TYPE_OUTBOUND) ) {
synchronized (_outboundTunnels) {
if (_outboundTunnels.containsKey(id)) {
if (_log.shouldLog(Log.DEBUG))
_log.debug(toString() + ": Found tunnel " + id + " as outbound");
return (TunnelInfo)_outboundTunnels.get(id);
}
}
}
if ( (!typeKnown) || (id.getType() == TunnelId.TYPE_INBOUND) ) {
synchronized (_freeInboundTunnels) {
if (_freeInboundTunnels.containsKey(id)) {
if (_log.shouldLog(Log.DEBUG))
_log.debug(toString() + ": Found tunnel " + id + " as a free inbound");
return (TunnelInfo)_freeInboundTunnels.get(id);
}
}
}
synchronized (_pendingTunnels) {
if (_pendingTunnels.containsKey(id)) {
if (_log.shouldLog(Log.DEBUG))
_log.debug(toString() + ": Found tunnel " + id + " as a pending tunnel");
return (TunnelInfo)_pendingTunnels.get(id);
}
}
if ( (!typeKnown) || (id.getType() == TunnelId.TYPE_INBOUND) ) {
synchronized (_clientPools) {
for (Iterator iter = _clientPools.values().iterator(); iter.hasNext(); ) {
ClientTunnelPool pool = (ClientTunnelPool)iter.next();
if (pool.isInboundTunnel(id)) {
if (_log.shouldLog(Log.DEBUG))
_log.debug(toString() + ": Found tunnel " + id + " as an inbound tunnel for the client " + pool.getDestination().calculateHash().toBase64());
return pool.getInboundTunnel(id);
} else if (pool.isInactiveInboundTunnel(id)) {
if (_log.shouldLog(Log.DEBUG))
_log.debug(toString() + ": Found tunnel " + id + " as an inactive inbound tunnel for the client " + pool.getDestination().calculateHash().toBase64());
return pool.getInactiveInboundTunnel(id);
}
}
}
}
if (_log.shouldLog(Log.DEBUG))
_log.debug(toString() + ": Did NOT find the tunnel " + id);
return null;
}
/**
* Get the tunnelId of all tunnels we are managing (not ones we are merely
* participating in)
*
*/
public Set getManagedTunnelIds() {
if (!_isLive) return Collections.EMPTY_SET;
Set ids = new HashSet(64);
synchronized (_outboundTunnels) {
ids.addAll(_outboundTunnels.keySet());
}
synchronized (_freeInboundTunnels) {
ids.addAll(_freeInboundTunnels.keySet());
}
synchronized (_clientPools) {
for (Iterator iter = _clientPools.values().iterator(); iter.hasNext(); ) {
ClientTunnelPool pool = (ClientTunnelPool)iter.next();
ids.addAll(pool.getInboundTunnelIds());
}
}
return ids;
}
/**
* Allocate a free tunnel for use by the destination
*
* @return true if the tunnel was allocated successfully, false if an error occurred
*/
public boolean allocateTunnel(TunnelId id, Destination dest) {
return allocateTunnel(id, getClientPool(dest));
}
public boolean allocateTunnel(TunnelId id, ClientTunnelPool pool) {
if (!_isLive) return false;
if (pool == null) {
if (_log.shouldLog(Log.ERROR))
_log.error("Error allocating tunnel " + id + " to " + pool.getDestination() + ": no pool for the client known");
return false;
}
TunnelInfo tunnel = removeFreeTunnel(id);
if (tunnel == null) {
if (_log.shouldLog(Log.ERROR))
_log.error("Error allocating tunnel " + id + " to " + pool.getDestination() + ": tunnel is no longer free?");
return false;
}
TunnelInfo t = tunnel;
while (t != null) {
t.setDestination(pool.getDestination());
t = t.getNextHopInfo();
}
pool.addInboundTunnel(tunnel);
return true;
}
/**
* Set of tunnelIds for outbound tunnels
*/
public Set getOutboundTunnels() {
if (!_isLive) return null;
synchronized (_outboundTunnels) {
return new HashSet(_outboundTunnels.keySet());
}
}
public int getOutboundTunnelCount() {
if (!_isLive) return 0;
synchronized (_outboundTunnels) {
return _outboundTunnels.size();
}
}
public TunnelInfo getOutboundTunnel(TunnelId id) {
if (!_isLive) return null;
synchronized (_outboundTunnels) {
return (TunnelInfo)_outboundTunnels.get(id);
}
}
public void addOutboundTunnel(TunnelInfo tunnel) {
if (!_isLive) return;
if (_log.shouldLog(Log.DEBUG)) _log.debug(toString() + ": Add outbound tunnel " + tunnel.getTunnelId());
_context.messageHistory().tunnelJoined("outbound", tunnel);
synchronized (_outboundTunnels) {
_outboundTunnels.put(tunnel.getTunnelId(), tunnel);
}
synchronized (_pendingTunnels) {
_pendingTunnels.remove(tunnel.getTunnelId());
}
}
public void removeOutboundTunnel(TunnelId id) {
if (!_isLive) return;
if (_log.shouldLog(Log.DEBUG)) _log.debug(toString() + ": Removing outbound tunnel " + id);
int remaining = 0;
TunnelInfo info = null;
synchronized (_outboundTunnels) {
info = (TunnelInfo)_outboundTunnels.remove(id);
remaining = _outboundTunnels.size();
}
addTunnelStats(id, info);
if (remaining <= 0) {
buildFakeTunnels();
}
}
/**
* Set of tunnelIds that this router has available for consumption
*/
public Set getFreeTunnels() {
if (!_isLive) return null;
synchronized (_freeInboundTunnels) {
return new HashSet(_freeInboundTunnels.keySet());
}
}
public int getFreeTunnelCount() {
if (!_isLive) return 0;
synchronized (_freeInboundTunnels) {
return _freeInboundTunnels.size();
}
}
public TunnelInfo getFreeTunnel(TunnelId id) {
if (!_isLive) return null;
synchronized (_freeInboundTunnels) {
return (TunnelInfo)_freeInboundTunnels.get(id);
}
}
public void addFreeTunnel(TunnelInfo tunnel) {
if (!_isLive) return;
if (_log.shouldLog(Log.DEBUG)) _log.debug(toString() + ": Add free inbound tunnel " + tunnel.getTunnelId());
_context.messageHistory().tunnelJoined("free inbound", tunnel);
synchronized (_freeInboundTunnels) {
_freeInboundTunnels.put(tunnel.getTunnelId(), tunnel);
}
synchronized (_pendingTunnels) {
_pendingTunnels.remove(tunnel.getTunnelId());
}
if (tunnel.getDestination() != null) {
// this one was custom built, so tack 'er on directly
allocateTunnel(tunnel.getTunnelId(), tunnel.getDestination());
}
}
public TunnelInfo removeFreeTunnel(TunnelId id) {
if (!_isLive) return null;
if (_log.shouldLog(Log.DEBUG)) _log.debug(toString() + ": Removing free inbound tunnel " + id);
int remaining = 0;
TunnelInfo rv = null;
synchronized (_freeInboundTunnels) {
rv = (TunnelInfo)_freeInboundTunnels.remove(id);
remaining = _freeInboundTunnels.size();
}
addTunnelStats(id, rv);
if (remaining <= 0)
buildFakeTunnels();
return rv;
}
/**
* set of tunnelIds that this router is participating in (but not managing)
*/
public Set getParticipatingTunnels() {
if (!_isLive) return null;
synchronized (_participatingTunnels) {
return new HashSet(_participatingTunnels.keySet());
}
}
public int getParticipatingTunnelCount() {
if (!_isLive) return 0;
synchronized (_participatingTunnels) {
return _participatingTunnels.size();
}
}
public TunnelInfo getParticipatingTunnel(TunnelId id) {
if (!_isLive) return null;
synchronized (_participatingTunnels) {
return (TunnelInfo)_participatingTunnels.get(id);
}
}
public boolean addParticipatingTunnel(TunnelInfo tunnel) {
if (!_isLive) return false;
if (_log.shouldLog(Log.DEBUG)) _log.debug(toString() + ": Add participating tunnel " + tunnel.getTunnelId());
_context.messageHistory().tunnelJoined("participant", tunnel);
synchronized (_participatingTunnels) {
if (_participatingTunnels.containsKey(tunnel.getTunnelId())) {
return false;
} else {
_participatingTunnels.put(tunnel.getTunnelId(), tunnel);
tunnel.setIsReady(true);
return true;
}
}
}
public TunnelInfo removeParticipatingTunnel(TunnelId id) {
if (!_isLive) return null;
if (_log.shouldLog(Log.DEBUG)) _log.debug(toString() + ": Removing participating tunnel " + id);
TunnelInfo info = null;
synchronized (_participatingTunnels) {
info = (TunnelInfo)_participatingTunnels.remove(id);
}
addTunnelStats(id, info);
return info;
}
/**
* Set of Destinations for clients currently being managed
*
*/
public Set getClientPools() {
if (!_isLive) return null;
synchronized (_clientPools) {
return new HashSet(_clientPools.keySet());
}
}
/**
* Create and start up a client pool for the destination
*
*/
public void createClientPool(Destination dest, ClientTunnelSettings settings) {
if (!_isLive) return;
ClientTunnelPool pool = null;
synchronized (_clientPools) {
if (_clientPools.containsKey(dest)) {
pool = (ClientTunnelPool)_clientPools.get(dest);
if (_log.shouldLog(Log.INFO))
_log.info("Reusing an existing client tunnel pool for " + dest.calculateHash());
} else {
pool = new ClientTunnelPool(_context, dest, settings, this);
if (_log.shouldLog(Log.INFO))
_log.info("New client tunnel pool created for " + dest.calculateHash());
_clientPools.put(dest, pool);
}
}
pool.startPool();
}
ClientTunnelPool addClientPool(ClientTunnelPool pool) {
if (!_isLive) return null;
ClientTunnelPool old = null;
if (_log.shouldLog(Log.INFO))
_log.info("Client tunnel pool added for " + pool.getDestination().calculateHash());
synchronized (_clientPools) {
old = (ClientTunnelPool)_clientPools.put(pool.getDestination(), pool);
}
return old;
}
public ClientTunnelPool getClientPool(Destination dest) {
if (!_isLive) return null;
synchronized (_clientPools) {
return (ClientTunnelPool)_clientPools.get(dest);
}
}
public void removeClientPool(Destination dest) {
if (!_isLive) return;
if (_log.shouldLog(Log.DEBUG)) _log.debug("Removing client tunnel pool for " + dest.calculateHash());
ClientTunnelPool pool = null;
synchronized (_clientPools) {
pool = (ClientTunnelPool)_clientPools.remove(dest);
}
if (pool != null)
pool.stopPool();
}
public Set getPendingTunnels() {
if (!_isLive) return null;
synchronized (_pendingTunnels) {
return new HashSet(_pendingTunnels.keySet());
}
}
public TunnelInfo getPendingTunnel(TunnelId id) {
if (!_isLive) return null;
synchronized (_pendingTunnels) {
return (TunnelInfo)_pendingTunnels.get(id);
}
}
public void addPendingTunnel(TunnelInfo info) {
if (!_isLive) return;
_context.messageHistory().tunnelJoined("pending", info);
synchronized (_pendingTunnels) {
_pendingTunnels.put(info.getTunnelId(), info);
}
}
public void removePendingTunnel(TunnelId id) {
if (!_isLive) return;
if (_log.shouldLog(Log.DEBUG)) _log.debug(toString() + ": Removing pending tunnel " + id);
TunnelInfo info = null;
synchronized (_pendingTunnels) {
info = (TunnelInfo)_pendingTunnels.remove(id);
}
addTunnelStats(id, info);
}
/** fetch the settings for the pool (tunnel settings and quantities) */
public ClientTunnelSettings getPoolSettings() { return _poolSettings; }
public void setPoolSettings(ClientTunnelSettings settings) { _poolSettings = settings; }
/** how many clients the router should expect to handle at once (so it can build sufficient tunnels */
public int getTargetClients() { return _targetClients; }
public void setTargetClients(int numConcurrentClients) { _targetClients = numConcurrentClients; }
/** max time for any tunnel creation to take (in milliseconds) */
public long getTunnelCreationTimeout() { return _tunnelCreationTimeout; }
public void setTunnelCreationTimeout(long timeout) { _tunnelCreationTimeout = timeout; }
/** determine the number of hops in the longest tunnel we have */
public int getLongestTunnelLength() {
int max = 0;
synchronized (_freeInboundTunnels) {
for (Iterator iter = _freeInboundTunnels.values().iterator(); iter.hasNext(); ) {
TunnelInfo info = (TunnelInfo)iter.next();
int len = info.getLength();
if (len > max)
max = len;
}
}
return max;
}
/**
* Shit has hit the fan, so lets build a pair of failsafe 0-hop tunnels - one inbound,
* and one outbound. This method blocks until those tunnels are built, and does not
* make use of the JobQueue.
*
*/
public void buildFakeTunnels() {
buildFakeTunnels(false);
}
public void buildFakeTunnels(boolean force) {
if (force || getFreeValidTunnelCount() < 3) {
if (_log.shouldLog(Log.WARN))
_log.warn("Running low on valid inbound tunnels, building another");
TunnelInfo inTunnelGateway = _tunnelBuilder.configureInboundTunnel(null, getPoolSettings(), true);
RequestTunnelJob inReqJob = new RequestTunnelJob(_context, this, inTunnelGateway, true, getTunnelCreationTimeout());
inReqJob.runJob();
}
if (force || getOutboundValidTunnelCount() < 3) {
if (_log.shouldLog(Log.WARN))
_log.warn("Running low on valid outbound tunnels, building another");
TunnelInfo outTunnelGateway = _tunnelBuilder.configureOutboundTunnel(getPoolSettings(), true);
RequestTunnelJob outReqJob = new RequestTunnelJob(_context, this, outTunnelGateway, false, getTunnelCreationTimeout());
outReqJob.runJob();
}
}
private int getFreeValidTunnelCount() {
int found = 0;
Set ids = getFreeTunnels();
long mustExpireAfter = _context.clock().now();
for (Iterator iter = ids.iterator(); iter.hasNext(); ) {
TunnelId id = (TunnelId)iter.next();
TunnelInfo info = getFreeTunnel(id);
if ( (info != null) && (info.getIsReady()) ) {
if (info.getSettings().getExpiration() > mustExpireAfter) {
if (info.getDestination() == null) {
found++;
}
}
}
}
return found;
}
private int getOutboundValidTunnelCount() {
int found = 0;
Set ids = getOutboundTunnels();
long mustExpireAfter = _context.clock().now();
for (Iterator iter = ids.iterator(); iter.hasNext(); ) {
TunnelId id = (TunnelId)iter.next();
TunnelInfo info = getOutboundTunnel(id);
if ( (info != null) && (info.getIsReady()) ) {
if (info.getSettings().getExpiration() > mustExpireAfter) {
found++;
}
}
}
return found;
}
private static final int MAX_FAILURES_PER_TUNNEL = 0;
public static final String PROP_MAX_TUNNEL_FAILURES = "tunnel.maxTunnelFailures";
private int getMaxTunnelFailures() {
String max = _context.getProperty(PROP_MAX_TUNNEL_FAILURES);
if (max != null) {
try {
return Integer.parseInt(max);
} catch (NumberFormatException nfe) {
if (_log.shouldLog(Log.WARN))
_log.warn("Max tunnel failures property is invalid [" + max + "]");
}
}
return MAX_FAILURES_PER_TUNNEL;
}
public void tunnelFailed(TunnelId id) {
if (!_isLive) return;
TunnelInfo info = getTunnelInfo(id);
if (info == null)
return;
int failures = info.incrementFailures();
if (failures <= getMaxTunnelFailures()) {
if (_log.shouldLog(Log.INFO))
_log.info("Tunnel " + id + " failure " + failures + ", but not fatal yet");
return;
}
if (_log.shouldLog(Log.WARN))
_log.warn("Tunnel " + id + " marked as not ready, since it /failed/: " + info.toString(), new Exception("Failed tunnel"));
_context.messageHistory().tunnelFailed(info.getTunnelId());
info.setIsReady(false);
Hash us = _context.routerHash();
long lifetime = _context.clock().now() - info.getCreated();
while (info != null) {
if (!info.getThisHop().equals(us)) {
_context.profileManager().tunnelFailed(info.getThisHop());
}
info = info.getNextHopInfo();
}
_context.statManager().addRateData("tunnel.failAfterTime", lifetime, lifetime);
_context.statManager().updateFrequency("tunnel.failFrequency");
buildFakeTunnels();
}
public void startup() {
if (_log.shouldLog(Log.INFO)) _log.info("Starting up tunnel pool");
_isLive = true;
_outboundTunnels = new HashMap(16);
_freeInboundTunnels = new HashMap(16);
_clientPools = new HashMap(8);
_participatingTunnels = new HashMap(64);
_pendingTunnels = new HashMap(8);
_poolSettings = createPoolSettings();
_persistenceHelper.loadPool(this);
_tunnelCreationTimeout = -1;
try {
String str = _context.router().getConfigSetting(TUNNEL_CREATION_TIMEOUT_PARAM);
_tunnelCreationTimeout = Long.parseLong(str);
} catch (Throwable t) {
_tunnelCreationTimeout = TUNNEL_CREATION_TIMEOUT_DEFAULT;
}
_targetClients = TARGET_CLIENTS_DEFAULT;
try {
String str = _context.router().getConfigSetting(TARGET_CLIENTS_PARAM);
_targetClients = Integer.parseInt(str);
} catch (Throwable t) {
_targetClients = TARGET_CLIENTS_DEFAULT;
}
buildFakeTunnels();
//_context.jobQueue().addJob(new WritePoolJob());
_context.jobQueue().addJob(new TunnelPoolManagerJob(_context, this));
_context.jobQueue().addJob(new TunnelPoolExpirationJob(_context, this));
}
public void restart() {
try {
String str = _context.router().getConfigSetting(TUNNEL_CREATION_TIMEOUT_PARAM);
_tunnelCreationTimeout = Long.parseLong(str);
} catch (Throwable t) {
_tunnelCreationTimeout = TUNNEL_CREATION_TIMEOUT_DEFAULT;
}
_targetClients = TARGET_CLIENTS_DEFAULT;
try {
String str = _context.router().getConfigSetting(TARGET_CLIENTS_PARAM);
_targetClients = Integer.parseInt(str);
} catch (Throwable t) {
_targetClients = TARGET_CLIENTS_DEFAULT;
}
}
public void shutdown() {
if (_log.shouldLog(Log.INFO)) _log.info("Shutting down tunnel pool");
//if (_persistenceHelper != null)
// _persistenceHelper.writePool(this);
_isLive = false; // the subjobs [should] check getIsLive() on each run
_outboundTunnels = null;
_freeInboundTunnels = null;
_clientPools = null;
_participatingTunnels = null;
_poolSettings = null;
_persistenceHelper = null;
_tunnelCreationTimeout = -1;
}
public boolean isLive() { return _isLive; }
void addTunnelStats(TunnelId id, TunnelInfo info) {
if ( (info != null) && (id != null) ) {
switch (id.getType()) {
case TunnelId.TYPE_INBOUND:
_context.statManager().addRateData("tunnel.inboundMessagesProcessed",
info.getMessagesProcessed(),
info.getSettings().getExpiration() -
info.getSettings().getCreated());
break;
case TunnelId.TYPE_OUTBOUND:
_context.statManager().addRateData("tunnel.outboundMessagesProcessed",
info.getMessagesProcessed(),
info.getSettings().getExpiration() -
info.getSettings().getCreated());
break;
case TunnelId.TYPE_PARTICIPANT:
long numMsgs = info.getMessagesProcessed();
long lastAvg = (long)_context.statManager().getRate("tunnel.participatingMessagesProcessed").getRate(10*60*1000l).getAverageValue();
_context.statManager().addRateData("tunnel.participatingMessagesProcessed",
numMsgs,
info.getSettings().getExpiration() -
info.getSettings().getCreated());
if (numMsgs > lastAvg)
_context.statManager().addRateData("tunnel.participatingMessagesProcessedActive",
numMsgs-lastAvg,
info.getSettings().getExpiration() -
info.getSettings().getCreated());
long numBytes = info.getBytesProcessed();
lastAvg = (long)_context.statManager().getRate("tunnel.participatingBytesProcessed").getRate(10*60*1000l).getAverageValue();
_context.statManager().addRateData("tunnel.participatingBytesProcessed", numBytes, numMsgs);
if (numBytes > lastAvg)
_context.statManager().addRateData("tunnel.participatingBytesProcessedActive", numBytes-lastAvg, numMsgs);
break;
case TunnelId.TYPE_UNSPECIFIED:
default:
break;
}
}
}
private ClientTunnelSettings createPoolSettings() {
ClientTunnelSettings settings = new ClientTunnelSettings();
settings.readFromProperties(_context.router().getConfigMap());
return settings;
}
public void renderStatusHTML(Writer out) throws IOException {
if (!_isLive) return;
out.write("<h2>Tunnel Pool</h2>\n");
StringBuffer buf = new StringBuffer(4096);
renderTunnels(out, buf, "Free inbound tunnels", getFreeTunnels());
renderTunnels(out, buf, "Outbound tunnels", getOutboundTunnels());
renderTunnels(out, buf, "Participating tunnels", getParticipatingTunnels());
for (Iterator iter = getClientPools().iterator(); iter.hasNext(); ) {
Destination dest = (Destination)iter.next();
ClientTunnelPool pool = getClientPool(dest);
renderTunnels(out, buf, "Inbound tunnels for " + dest.calculateHash() + " - (still connected? " + (!pool.isStopped()) + ")", pool.getInboundTunnelIds());
}
out.flush();
}
private void renderTunnels(Writer out, StringBuffer buf, String msg, Set tunnelIds) throws IOException {
buf.append("<b>").append(msg).append(":</b> <i>(").append(tunnelIds.size()).append(" tunnels)</i><ul>\n");
out.write(buf.toString());
buf.setLength(0);
for (Iterator iter = tunnelIds.iterator(); iter.hasNext(); ) {
TunnelId id = (TunnelId)iter.next();
TunnelInfo tunnel = getTunnelInfo(id);
renderTunnel(out, buf, id, tunnel);
}
out.write("</ul>\n");
}
private final void renderTunnel(Writer out, StringBuffer buf, TunnelId id, TunnelInfo tunnel) throws IOException {
buf.setLength(0);
if (tunnel == null) {
buf.append("<li>Tunnel: ").append(id.getTunnelId()).append(" is not known</li>\n");
} else {
buf.append("<li>Tunnel: ").append(tunnel.getTunnelId()).append("</li><pre>");
buf.append("\n\tStyle: ").append(getStyle(id));
buf.append("\n\tReady? ").append(tunnel.getIsReady());
buf.append("\n\tMessages processed: ").append(tunnel.getMessagesProcessed());
long timeSinceTest = _context.clock().now() - tunnel.getLastTested();
if (timeSinceTest < 60*60*1000)
buf.append("\n\tLast tested: ").append(timeSinceTest/1000).append(" seconds ago");
else
buf.append("\n\tLast tested: never");
buf.append("\n\tDest? ").append(getDestination(tunnel));
if (tunnel.getSettings() != null)
buf.append("\n\tExpiration: ").append(new Date(tunnel.getSettings().getExpiration()));
else
buf.append("\n\tExpiration: none");
buf.append("\n\tStart router: ").append(tunnel.getThisHop().toBase64()).append("\n");
TunnelInfo t = tunnel.getNextHopInfo();
if (t != null) {
int hop = 1;
while (t != null) {
buf.append("\tHop ").append(hop).append(": ").append(t.getThisHop().toBase64()).append("\n");
t = t.getNextHopInfo();
hop++;
}
} else {
if (tunnel.getNextHop() != null)
buf.append("\tNext: ").append(tunnel.getNextHop().toBase64()).append("\n");
}
buf.append("\n</pre>");
}
out.write(buf.toString());
buf.setLength(0);
}
private final static String getStyle(TunnelId id) {
switch (id.getType()) {
case TunnelId.TYPE_INBOUND:
return "Inbound";
case TunnelId.TYPE_OUTBOUND:
return "Outbound";
case TunnelId.TYPE_PARTICIPANT:
return "Participant";
case TunnelId.TYPE_UNSPECIFIED:
return "Unspecified";
default:
return "Other! - " + id.getType();
}
}
private final static String getDestination(TunnelInfo info) {
while (info != null) {
if (info.getDestination() != null)
return info.getDestination().calculateHash().toString();
else
info = info.getNextHopInfo();
}
return "none";
}
/**
* This job instructs the troops to invade mars with a spork.
*/
private class WritePoolJob extends JobImpl {
public WritePoolJob() {
super(TunnelPool.this._context);
getTiming().setStartAfter(TunnelPool.this._context.clock().now() + WRITE_POOL_DELAY);
}
public String getName() { return "Write Out Tunnel Pool"; }
public void runJob() {
if (!isLive())
return;
_persistenceHelper.writePool(TunnelPool.this);
requeue(WRITE_POOL_DELAY);
}
}
}

View File

@ -1,146 +0,0 @@
package net.i2p.router.tunnelmanager;
import java.util.Date;
import java.util.Iterator;
import net.i2p.data.TunnelId;
import net.i2p.router.JobImpl;
import net.i2p.router.Router;
import net.i2p.router.RouterContext;
import net.i2p.router.TunnelInfo;
import net.i2p.util.Log;
/**
* Periodically go through all of the tunnels not assigned to a client and mark
* them as no longer ready and/or drop them (as appropriate)
*
*/
class TunnelPoolExpirationJob extends JobImpl {
private Log _log;
private TunnelPool _pool;
/** expire tunnels as necessary every 30 seconds */
private final static long EXPIRE_POOL_DELAY = 30*1000;
/**
* don't hard expire a tunnel until its later than expiration + buffer
*/
private final static long EXPIRE_BUFFER = 30*1000;
public TunnelPoolExpirationJob(RouterContext ctx, TunnelPool pool) {
super(ctx);
_log = ctx.logManager().getLog(TunnelPoolExpirationJob.class);
_pool = pool;
getTiming().setStartAfter(getContext().clock().now() + EXPIRE_POOL_DELAY);
}
public String getName() { return "Expire Pooled Tunnels"; }
public void runJob() {
if (!_pool.isLive())
return;
expireFree();
expireOutbound();
expireParticipants();
expirePending();
requeue(EXPIRE_POOL_DELAY);
}
/**
* Drop all pooled free tunnels that are expired or are close enough to
* being expired that allocating them to a client would suck.
*
*/
public void expireFree() {
long now = getContext().clock().now();
long expire = now - EXPIRE_BUFFER - Router.CLOCK_FUDGE_FACTOR;
for (Iterator iter = _pool.getFreeTunnels().iterator(); iter.hasNext(); ) {
TunnelId id = (TunnelId)iter.next();
TunnelInfo info = _pool.getFreeTunnel(id);
if ( (info != null) && (info.getSettings() != null) ) {
if (info.getSettings().getExpiration() < expire) {
if (_log.shouldLog(Log.INFO))
_log.info("Expiring free inbound tunnel " + id + " ["
+ new Date(info.getSettings().getExpiration())
+ "] (expire = " + new Date(expire) + ")");
_pool.removeFreeTunnel(id);
} else if (info.getSettings().getExpiration() < now) {
if (_log.shouldLog(Log.INFO))
_log.info("It is past the expiration for free inbound tunnel " + id
+ " but not yet the buffer, mark it as no longer ready");
info.setIsReady(false);
}
}
}
}
/**
* Drop all pooled outbound tunnels that are expired
*
*/
public void expireOutbound() {
long now = getContext().clock().now();
long expire = now - EXPIRE_BUFFER - Router.CLOCK_FUDGE_FACTOR;
for (Iterator iter = _pool.getOutboundTunnels().iterator(); iter.hasNext(); ) {
TunnelId id = (TunnelId)iter.next();
TunnelInfo info = _pool.getOutboundTunnel(id);
if ( (info != null) && (info.getSettings() != null) ) {
if (info.getSettings().getExpiration() < expire) {
if (_log.shouldLog(Log.INFO))
_log.info("Expiring outbound tunnel " + id + " ["
+ new Date(info.getSettings().getExpiration()) + "]");
_pool.removeOutboundTunnel(id);
} else if (info.getSettings().getExpiration() < now) {
if (_log.shouldLog(Log.INFO))
_log.info("It is past the expiration for outbound tunnel " + id
+ " but not yet the buffer, mark it as no longer ready");
info.setIsReady(false);
}
}
}
}
/**
* Drop all tunnels we are participating in (but not managing) that are expired
*
*/
public void expireParticipants() {
long now = getContext().clock().now();
long expire = now - EXPIRE_BUFFER - Router.CLOCK_FUDGE_FACTOR;
for (Iterator iter = _pool.getParticipatingTunnels().iterator(); iter.hasNext(); ) {
TunnelId id = (TunnelId)iter.next();
TunnelInfo info = _pool.getParticipatingTunnel(id);
if ( (info != null) && (info.getSettings() != null) ) {
if (info.getSettings().getExpiration() < expire) {
if (_log.shouldLog(Log.INFO))
_log.info("Expiring participation in tunnel " + id + " ["
+ new Date(info.getSettings().getExpiration()) + "]");
_pool.removeParticipatingTunnel(id);
}
}
}
}
/**
* Drop all tunnels that were in the process of being built, but expired before being handled
*
*/
public void expirePending() {
long now = getContext().clock().now();
long expire = now - EXPIRE_BUFFER - Router.CLOCK_FUDGE_FACTOR;
for (Iterator iter = _pool.getPendingTunnels().iterator(); iter.hasNext(); ) {
TunnelId id = (TunnelId)iter.next();
TunnelInfo info = _pool.getPendingTunnel(id);
if ( (info != null) && (info.getSettings() != null) ) {
if (info.getSettings().getExpiration() < expire) {
if (_log.shouldLog(Log.INFO))
_log.info("Expiring pending tunnel " + id + " ["
+ new Date(info.getSettings().getExpiration()) + "]");
_pool.removePendingTunnel(id);
}
}
}
}
}

View File

@ -1,195 +0,0 @@
package net.i2p.router.tunnelmanager;
import java.util.Iterator;
import java.util.Set;
import net.i2p.data.TunnelId;
import net.i2p.router.ClientTunnelSettings;
import net.i2p.router.JobImpl;
import net.i2p.router.RouterContext;
import net.i2p.router.TunnelInfo;
import net.i2p.util.Log;
/**
* Request new tunnels to be created if insufficient free inbound tunnels or
* valid outbound tunnels exist.
*
*/
class TunnelPoolManagerJob extends JobImpl {
private Log _log;
private TunnelPool _pool;
/**
* How frequently to check the pool (and fire appropriate refill jobs)
*
*/
private final static long POOL_CHECK_DELAY = 30*1000;
/**
* treat tunnels that are going to expire in the next minute as pretty much
* expired (for the purpose of building new ones)
*/
private final static long EXPIRE_FUDGE_PERIOD = 60*1000;
public TunnelPoolManagerJob(RouterContext ctx, TunnelPool pool) {
super(ctx);
_log = ctx.logManager().getLog(TunnelPoolManagerJob.class);
_pool = pool;
}
public String getName() { return "Manage Tunnel Pool"; }
public void runJob() {
try {
if (!_pool.isLive())
return;
boolean built = false;
ClientTunnelSettings settings = new ClientTunnelSettings();
settings.readFromProperties(getContext().router().getConfigMap());
_pool.setPoolSettings(settings);
try {
String str = getContext().router().getConfigSetting(TunnelPool.TARGET_CLIENTS_PARAM);
int clients = Integer.parseInt(str);
_pool.setTargetClients(clients);
} catch (NumberFormatException nfe) {
// ignore
}
int targetClients = _pool.getTargetClients();
int targetInboundTunnels = targetClients*_pool.getPoolSettings().getNumInboundTunnels() + 1;
int targetOutboundTunnels = targetClients*_pool.getPoolSettings().getNumOutboundTunnels() + 1;
int curFreeInboundTunnels = getFreeTunnelCount();
if (curFreeInboundTunnels < targetInboundTunnels) {
if (_log.shouldLog(Log.INFO))
_log.info("Insufficient free inbound tunnels (" + curFreeInboundTunnels + ", not "
+ targetInboundTunnels + "), requesting more");
requestInboundTunnels(2);
//requestFakeInboundTunnels(1);
built = true;
} else {
// 10% chance of building a new tunnel
if (getContext().random().nextInt(10) > 0) {
// all good, no need for more inbound tunnels
if (_log.shouldLog(Log.DEBUG))
_log.debug("Sufficient inbound tunnels (" + curFreeInboundTunnels + ")");
} else {
if (_log.shouldLog(Log.INFO))
_log.info("Building another inbound tunnel, cuz tunnels r k00l");
requestInboundTunnels(1);
built = true;
}
}
int curOutboundTunnels = getOutboundTunnelCount();
if (curOutboundTunnels < targetOutboundTunnels) {
if (_log.shouldLog(Log.INFO))
_log.info("Insufficient outbound tunnels (" + curOutboundTunnels + ", not "
+ targetOutboundTunnels + "), requesting more");
requestOutboundTunnels(2);
//requestFakeOutboundTunnels(1);
built = true;
} else {
// 10% chance of building a new tunnel
if (getContext().random().nextInt(10) > 0) {
// all good, no need for more outbound tunnels
if (_log.shouldLog(Log.DEBUG))
_log.debug("Sufficient outbound tunnels (" + curOutboundTunnels + ")");
} else {
if (_log.shouldLog(Log.INFO))
_log.info("Building another outbound tunnel, since gravity still works");
requestOutboundTunnels(1);
built = true;
}
}
_pool.buildFakeTunnels();
} catch (Throwable t) {
_log.log(Log.CRIT, "Unhandled exception managing the tunnel pool", t);
}
requeue(POOL_CHECK_DELAY);
}
/**
* How many free inbound tunnels are available for use (safely)
*
*/
private int getFreeTunnelCount() {
Set freeTunnels = _pool.getFreeTunnels();
int free = 0;
int tooShort = 0;
int minLength = _pool.getPoolSettings().getDepthInbound();
long mustExpireAfter = getContext().clock().now() + EXPIRE_FUDGE_PERIOD;
for (Iterator iter = freeTunnels.iterator(); iter.hasNext(); ) {
TunnelId id = (TunnelId)iter.next();
TunnelInfo info = _pool.getFreeTunnel(id);
if ( (info != null) && (info.getIsReady()) ) {
if (info.getSettings().getExpiration() > mustExpireAfter) {
if (info.getLength() >= minLength) {
if (info.getDestination() == null) {
free++;
} else {
// already alloc'ed
_log.error("Why is a free inbound tunnel allocated to a destination? ["
+ info.getTunnelId().getTunnelId() + " to "
+ info.getDestination().toBase64() + "]");
}
} else {
// its valid, sure, but its not long enough *cough*
// for the moment we'll keep these around so that we can use them
// for tunnel management and db messages, rather than force all
// tunnels to be the 2+ hop length as required for clients
tooShort++; // free++;
}
} else {
_log.info("Inbound tunnel " + id + " is expiring in the upcoming period, consider it not-free");
}
}
}
if (free <= 0) {
if (_log.shouldLog(Log.WARN))
_log.warn("No free tunnels that are long enough, but there are " + tooShort + " shorter ones");
return tooShort;
} else {
return free;
}
}
/**
* How many outbound tunnels are available for use (safely)
*
*/
private int getOutboundTunnelCount() {
Set outboundTunnels = _pool.getOutboundTunnels();
int outbound = 0;
long mustExpireAfter = getContext().clock().now() + EXPIRE_FUDGE_PERIOD;
for (Iterator iter = outboundTunnels.iterator(); iter.hasNext(); ) {
TunnelId id = (TunnelId)iter.next();
TunnelInfo info = _pool.getOutboundTunnel(id);
if ( (info != null) && (info.getIsReady()) ) {
if (info.getSettings().getExpiration() > mustExpireAfter) {
outbound++;
} else {
_log.info("Outbound tunnel " + id + " is expiring in the upcoming period, consider it not-free");
}
}
}
return outbound;
}
private void requestInboundTunnels(int numTunnelsToRequest) {
_log.info("Requesting " + numTunnelsToRequest + " inbound tunnels");
for (int i = 0; i < numTunnelsToRequest; i++)
getContext().jobQueue().addJob(new RequestInboundTunnelJob(getContext(), _pool, false));
}
private void requestOutboundTunnels(int numTunnelsToRequest) {
_log.info("Requesting " + numTunnelsToRequest + " outbound tunnels");
for (int i = 0; i < numTunnelsToRequest; i++)
getContext().jobQueue().addJob(new RequestOutboundTunnelJob(getContext(), _pool, false));
}
}

View File

@ -1,207 +0,0 @@
package net.i2p.router.tunnelmanager;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Properties;
import net.i2p.data.DataFormatException;
import net.i2p.data.DataHelper;
import net.i2p.data.Destination;
import net.i2p.data.TunnelId;
import net.i2p.router.ClientTunnelSettings;
import net.i2p.router.RouterContext;
import net.i2p.router.TunnelInfo;
import net.i2p.util.Log;
/**
* Handle all of the load / store of the tunnel pool (including any contained
* client tunnel pools)
*
*/
class TunnelPoolPersistenceHelper {
private Log _log;
private RouterContext _context;
public final static String PARAM_TUNNEL_POOL_FILE = "router.tunnelPoolFile";
public final static String DEFAULT_TUNNEL_POOL_FILE = "tunnelPool.dat";
public TunnelPoolPersistenceHelper(RouterContext ctx) {
_context = ctx;
_log = ctx.logManager().getLog(TunnelPoolPersistenceHelper.class);
}
public void writePool(TunnelPool pool) {
File f = getTunnelPoolFile();
writePool(pool, f);
}
public void writePool(TunnelPool pool, File f) {
FileOutputStream fos = null;
try {
fos = new FileOutputStream(f);
DataHelper.writeLong(fos, 2, pool.getFreeTunnelCount());
for (Iterator iter = pool.getFreeTunnels().iterator(); iter.hasNext(); ) {
TunnelId id = (TunnelId)iter.next();
TunnelInfo info = pool.getFreeTunnel(id);
if (info != null)
info.writeBytes(fos);
}
DataHelper.writeLong(fos, 2, pool.getOutboundTunnelCount());
for (Iterator iter = pool.getOutboundTunnels().iterator(); iter.hasNext(); ) {
TunnelId id = (TunnelId)iter.next();
TunnelInfo info = pool.getOutboundTunnel(id);
if (info != null)
info.writeBytes(fos);
}
DataHelper.writeLong(fos, 2, pool.getParticipatingTunnels().size());
for (Iterator iter = pool.getParticipatingTunnels().iterator(); iter.hasNext(); ) {
TunnelId id = (TunnelId)iter.next();
TunnelInfo info = pool.getParticipatingTunnel(id);
if (info != null)
info.writeBytes(fos);
}
DataHelper.writeLong(fos, 2, pool.getPendingTunnels().size());
for (Iterator iter = pool.getPendingTunnels().iterator(); iter.hasNext(); ) {
TunnelId id = (TunnelId)iter.next();
TunnelInfo info = pool.getPendingTunnel(id);
if (info != null)
info.writeBytes(fos);
}
DataHelper.writeLong(fos, 2, pool.getClientPools().size());
for (Iterator iter = pool.getClientPools().iterator(); iter.hasNext(); ) {
Destination dest = (Destination)iter.next();
ClientTunnelPool cpool = (ClientTunnelPool)pool.getClientPool(dest);
writeClientPool(fos, cpool);
}
fos.flush();
} catch (IOException ioe) {
_log.error("Error writing tunnel pool at " + f.getName(), ioe);
} catch (DataFormatException dfe) {
_log.error("Error formatting tunnels at " + f.getName(), dfe);
} finally {
if (fos != null) try { fos.close(); } catch (IOException ioe) {}
_log.debug("Tunnel pool state written to " + f.getName());
}
}
private void writeClientPool(FileOutputStream fos, ClientTunnelPool pool) throws IOException, DataFormatException {
pool.getDestination().writeBytes(fos);
Properties props = new Properties();
pool.getClientSettings().writeToProperties(props);
DataHelper.writeProperties(fos, props);
DataHelper.writeLong(fos, 2, pool.getInboundTunnelIds().size());
for (Iterator iter = pool.getInboundTunnelIds().iterator(); iter.hasNext(); ) {
TunnelId id = (TunnelId)iter.next();
TunnelInfo info = pool.getInboundTunnel(id);
if (info != null)
info.writeBytes(fos);
}
DataHelper.writeLong(fos, 2, pool.getInactiveInboundTunnelIds().size());
for (Iterator iter = pool.getInactiveInboundTunnelIds().iterator(); iter.hasNext(); ) {
TunnelId id = (TunnelId)iter.next();
TunnelInfo info = pool.getInactiveInboundTunnel(id);
if (info != null)
info.writeBytes(fos);
}
}
/**
* Load up the tunnels from disk, adding as appropriate to the TunnelPool
*/
public void loadPool(TunnelPool pool) {
File f = getTunnelPoolFile();
loadPool(pool, f);
}
public void loadPool(TunnelPool pool, File f) {
if (!f.exists()) return;
FileInputStream fin = null;
try {
fin = new FileInputStream(f);
int numFree = (int)DataHelper.readLong(fin, 2);
for (int i = 0; i < numFree; i++) {
TunnelInfo info = new TunnelInfo(_context);
info.readBytes(fin);
pool.addFreeTunnel(info);
}
int numOut = (int)DataHelper.readLong(fin, 2);
for (int i = 0; i < numOut; i++) {
TunnelInfo info = new TunnelInfo(_context);
info.readBytes(fin);
pool.addOutboundTunnel(info);
}
int numParticipating = (int)DataHelper.readLong(fin, 2);
for (int i = 0; i < numParticipating; i++) {
TunnelInfo info = new TunnelInfo(_context);
info.readBytes(fin);
pool.addParticipatingTunnel(info);
}
int numPending = (int)DataHelper.readLong(fin, 2);
for (int i = 0; i < numPending; i++) {
TunnelInfo info = new TunnelInfo(_context);
info.readBytes(fin);
pool.addPendingTunnel(info);
}
int numClients = (int)DataHelper.readLong(fin, 2);
for (int i = 0; i < numClients; i++) {
readClientPool(fin, pool);
}
} catch (IOException ioe) {
_log.error("Error reading tunnel pool from " + f.getName(), ioe);
} catch (DataFormatException dfe) {
_log.error("Error formatting tunnels from " + f.getName(), dfe);
} finally {
if (fin != null) try { fin.close(); } catch (IOException ioe) {}
_log.debug("Tunnel pool state written to " + f.getName());
}
}
private void readClientPool(FileInputStream fin, TunnelPool pool) throws IOException, DataFormatException {
Destination dest = new Destination();
dest.readBytes(fin);
ClientTunnelSettings settings = new ClientTunnelSettings();
Properties props = DataHelper.readProperties(fin);
settings.readFromProperties(props);
HashSet activeTunnels = new HashSet();
int numActiveTunnels = (int)DataHelper.readLong(fin, 2);
for (int i = 0; i < numActiveTunnels; i++) {
TunnelInfo info = new TunnelInfo(_context);
info.readBytes(fin);
activeTunnels.add(info);
}
HashSet inactiveTunnels = new HashSet();
int numInactiveTunnels = (int)DataHelper.readLong(fin, 2);
for (int i = 0; i < numInactiveTunnels; i++) {
TunnelInfo info = new TunnelInfo(_context);
info.readBytes(fin);
inactiveTunnels.add(info);
}
ClientTunnelPool cpool = new ClientTunnelPool(_context, dest, settings, pool);
cpool.setActiveTunnels(activeTunnels);
cpool.setInactiveTunnels(inactiveTunnels);
pool.addClientPool(cpool);
cpool.startPool();
}
/**
* Retrieve the file the pool should be persisted in
*
*/
private File getTunnelPoolFile() {
String filename = null;
String str = _context.router().getConfigSetting(PARAM_TUNNEL_POOL_FILE);
if ( (str != null) && (str.trim().length() > 0) )
filename = str;
else
filename = DEFAULT_TUNNEL_POOL_FILE;
return new File(filename);
}
}

View File

@ -1,124 +0,0 @@
package net.i2p.router.tunnelmanager;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
import java.util.Date;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Set;
import net.i2p.data.TunnelId;
import net.i2p.router.JobImpl;
import net.i2p.router.RouterContext;
import net.i2p.router.TunnelInfo;
import net.i2p.util.Log;
/**
* Manage the testing for free, outbound, and active inbound client tunnels
*
*/
class TunnelTestManager {
private Log _log;
private RouterContext _context;
private TunnelPool _pool;
private boolean _stopTesting;
/** dont test any particular tunnel more than once a minute */
private final static long MINIMUM_RETEST_DELAY = 60*1000;
public TunnelTestManager(RouterContext ctx, TunnelPool pool) {
_context = ctx;
_log = ctx.logManager().getLog(TunnelTestManager.class);
ctx.statManager().createRateStat("tunnel.testSuccessTime", "How long do successful tunnel tests take?", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
_pool = pool;
_stopTesting = false;
_context.jobQueue().addJob(new CoordinateTunnelTestingJob());
}
private Set selectTunnelsToTest() {
Set allIds = getAllIds();
Set toTest = new HashSet(allIds.size());
long now = _context.clock().now();
for (Iterator iter = allIds.iterator(); iter.hasNext();) {
TunnelId id = (TunnelId)iter.next();
TunnelInfo info = _pool.getTunnelInfo(id);
if ( (info != null) && (info.getSettings() != null) ) {
if (info.getSettings().getExpiration() <= 0) {
// skip local tunnels
} else if (!info.getIsReady()) {
// skip not ready tunnels
} else if (info.getSettings().getExpiration() < now + MINIMUM_RETEST_DELAY) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Tunnel " + id.getTunnelId()
+ " will be expiring within the current period ("
+ new Date(info.getSettings().getExpiration())
+ "), so skip testing it");
} else if (info.getSettings().getCreated() + MINIMUM_RETEST_DELAY < now) {
// we're past the initial buffer period
if (info.getLastTested() + MINIMUM_RETEST_DELAY < now) {
// we haven't tested this tunnel in the minimum delay, so maybe we
// should.
if (_context.random().nextBoolean()) {
toTest.add(id);
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("We could have tested tunnel " + id.getTunnelId()
+ ", but randomly decided not to.");
}
}
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Tunnel " + id.getTunnelId() + " was just created ("
+ new Date(info.getSettings().getCreated())
+ "), wait until the next pass to test it");
}
} else {
if (_log.shouldLog(Log.WARN))
_log.warn("Hmm, a normally testable tunnel [" + id.getTunnelId() + "] didn't have info or settings: " + info);
}
}
return toTest;
}
private Set getAllIds() {
return _pool.getManagedTunnelIds();
}
public void stopTesting() { _stopTesting = true; }
private void runTest(TunnelId tunnel) {
_context.jobQueue().addJob(new TestTunnelJob(_context, tunnel, _pool));
}
private class CoordinateTunnelTestingJob extends JobImpl {
public CoordinateTunnelTestingJob() {
super(TunnelTestManager.this._context);
getTiming().setStartAfter(TunnelTestManager.this._context.clock().now() + MINIMUM_RETEST_DELAY);
}
public String getName() { return "Coordinate Tunnel Testing"; }
public void runJob() {
if (_stopTesting) return;
Set toTestIds = selectTunnelsToTest();
if (_log.shouldLog(Log.INFO))
_log.info("Running tests on selected tunnels: " + toTestIds);
for (Iterator iter = toTestIds.iterator(); iter.hasNext(); ) {
TunnelId id = (TunnelId)iter.next();
runTest(id);
}
reschedule();
}
private void reschedule() {
long nxt = TunnelTestManager.this._context.clock().now() + 30*1000;
getTiming().setStartAfter(nxt);
TunnelTestManager.this._context.jobQueue().addJob(CoordinateTunnelTestingJob.this);
}
}
}