2005-02-16 jrandom

* (Merged the 0.5-pre branch back into CVS HEAD)
    * Replaced the old tunnel routing crypto with the one specified in
      router/doc/tunnel-alt.html, including updates to the web console to view
      and tweak it.
    * Provide the means for routers to reject tunnel requests with a wider
      range of responses:
        probabalistic rejection, due to approaching overload
        transient rejection, due to temporary overload
        bandwidth rejection, due to persistent bandwidth overload
        critical rejection, due to general router fault (or imminent shutdown)
      The different responses are factored into the profiles accordingly.
    * Replaced the old I2CP tunnel related options (tunnels.depthInbound, etc)
      with a series of new properties, relevent to the new tunnel routing code:
        inbound.nickname (used on the console)
        inbound.quantity (# of tunnels to use in any leaseSets)
        inbound.backupQuantity (# of tunnels to keep in the ready)
        inbound.length (# of remote peers in the tunnel)
        inbound.lengthVariance (if > 0, permute the length by adding a random #
                                up to the variance.  if < 0, permute the length
                                by adding or subtracting a random # up to the
                                variance)
        outbound.* (same as the inbound, except for the, uh, outbound tunnels
                    in that client's pool)
      There are other options, and more will be added later, but the above are
      the most relevent ones.
    * Replaced Jetty 4.2.21 with Jetty 5.1.2
    * Compress all profile data on disk.
    * Adjust the reseeding functionality to work even when the JVM's http proxy
      is set.
    * Enable a poor-man's interactive-flow in the streaming lib by choking the
      max window size.
    * Reduced the default streaming lib max message size to 16KB (though still
      configurable by the user), also doubling the default maximum window
      size.
    * Replaced the RouterIdentity in a Lease with its SHA256 hash.
    * Reduced the overall I2NP message checksum from a full 32 byte SHA256 to
      the first byte of the SHA256.
    * Added a new "netId" flag to let routers drop references to other routers
      who we won't be able to talk to.
    * Extended the timestamper to get a second (or third) opinion whenever it
      wants to actually adjust the clock offset.
    * Replaced that kludge of a timestamp I2NP message with a full blown
      DateMessage.
    * Substantial memory optimizations within the router and the SDK to reduce
      GC churn.  Client apps and the streaming libs have not been tuned,
      however.
    * More bugfixes thank you can shake a stick at.

2005-02-13  jrandom
    * Updated jbigi source to handle 64bit CPUs.  The bundled jbigi.jar still
      only contains 32bit versions, so build your own, placing libjbigi.so in
      your install dir if necessary.  (thanks mule!)
    * Added support for libjbigi-$os-athlon64 to NativeBigInteger and CPUID
      (thanks spaetz!)
This commit is contained in:
jrandom
2005-02-16 22:23:47 +00:00
committed by zzz
parent 36f7e98e90
commit 566a713baa
142 changed files with 3202 additions and 2535 deletions

View File

@ -28,19 +28,19 @@ public class DeliveryStatusMessage extends I2NPMessageImpl {
private final static Log _log = new Log(DeliveryStatusMessage.class);
public final static int MESSAGE_TYPE = 10;
private long _id;
private Date _arrival;
private long _arrival;
public DeliveryStatusMessage(I2PAppContext context) {
super(context);
setMessageId(-1);
setArrival(null);
setArrival(-1);
}
public long getMessageId() { return _id; }
public void setMessageId(long id) { _id = id; }
public Date getArrival() { return _arrival; }
public void setArrival(Date arrival) { _arrival = arrival; }
public long getArrival() { return _arrival; }
public void setArrival(long arrival) { _arrival = arrival; }
public void readMessage(byte data[], int offset, int dataSize, int type) throws I2NPMessageException, IOException {
if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message");
@ -48,11 +48,7 @@ public class DeliveryStatusMessage extends I2NPMessageImpl {
_id = DataHelper.fromLong(data, curIndex, 4);
curIndex += 4;
try {
_arrival = DataHelper.fromDate(data, curIndex);
} catch (DataFormatException dfe) {
throw new I2NPMessageException("Unable to read the arrival");
}
_arrival = DataHelper.fromLong(data, curIndex, DataHelper.DATE_LENGTH);
}
/** calculate the message body's length (not including the header and footer */
@ -61,13 +57,12 @@ public class DeliveryStatusMessage extends I2NPMessageImpl {
}
/** write the message body to the output array, starting at the given index */
protected int writeMessageBody(byte out[], int curIndex) throws I2NPMessageException {
if ( (_id < 0) || (_arrival == null) ) throw new I2NPMessageException("Not enough data to write out");
if ( (_id < 0) || (_arrival <= 0) ) throw new I2NPMessageException("Not enough data to write out");
byte id[] = DataHelper.toLong(4, _id);
System.arraycopy(id, 0, out, curIndex, 4);
curIndex += 4;
byte date[] = DataHelper.toDate(_arrival);
System.arraycopy(date, 0, out, curIndex, DataHelper.DATE_LENGTH);
DataHelper.toLong(out, curIndex, DataHelper.DATE_LENGTH, _arrival);
curIndex += DataHelper.DATE_LENGTH;
return curIndex;
}
@ -75,8 +70,7 @@ public class DeliveryStatusMessage extends I2NPMessageImpl {
public int getType() { return MESSAGE_TYPE; }
public int hashCode() {
return (int)getMessageId() +
DataHelper.hashCode(getArrival());
return (int)getMessageId() + (int)getArrival();
}
public boolean equals(Object object) {
@ -93,7 +87,7 @@ public class DeliveryStatusMessage extends I2NPMessageImpl {
StringBuffer buf = new StringBuffer();
buf.append("[DeliveryStatusMessage: ");
buf.append("\n\tMessage ID: ").append(getMessageId());
buf.append("\n\tArrival: ").append(_context.clock().now() - _arrival.getTime());
buf.append("\n\tArrival: ").append(_context.clock().now() - _arrival);
buf.append("ms in the past");
buf.append("]");
return buf.toString();

View File

@ -126,7 +126,16 @@ public class GarlicClove extends DataStructureImpl {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Wrote instructions: " + _instructions);
out.write(_msg.toByteArray());
try {
byte m[] = _msg.toByteArray();
if (m == null)
throw new RuntimeException("foo, returned null");
if (m.length <= 0)
throw new RuntimeException("foo, returned 0 length");
out.write(m);
} catch (Exception e) {
throw new DataFormatException("Unable to write the clove: " + _msg + " to " + out, e);
}
DataHelper.writeLong(out, 4, _cloveId);
DataHelper.writeDate(out, _expiration);
if (_log.shouldLog(Log.DEBUG))
@ -137,6 +146,14 @@ public class GarlicClove extends DataStructureImpl {
_log.debug("Written cert: " + _certificate);
}
public int estimateSize() {
return 64 // instructions (high estimate)
+ _msg.getMessageSize()
+ 4 // cloveId
+ DataHelper.DATE_LENGTH
+ 4; // certificate
}
public boolean equals(Object obj) {
if ( (obj == null) || !(obj instanceof GarlicClove))
return false;

View File

@ -10,7 +10,6 @@ package net.i2p.data.i2np;
import java.io.IOException;
import java.io.InputStream;
import java.util.Date;
import net.i2p.data.DataStructure;
@ -67,7 +66,7 @@ public interface I2NPMessage extends DataStructure {
* Date after which the message should be dropped (and the associated uniqueId forgotten)
*
*/
public Date getMessageExpiration();
public long getMessageExpiration();
/** How large the message is, including any checksums */
public int getMessageSize();

View File

@ -132,10 +132,14 @@ public class I2NPMessageHandler {
return new DatabaseSearchReplyMessage(_context);
case DeliveryStatusMessage.MESSAGE_TYPE:
return new DeliveryStatusMessage(_context);
case DateMessage.MESSAGE_TYPE:
return new DateMessage(_context);
case GarlicMessage.MESSAGE_TYPE:
return new GarlicMessage(_context);
case TunnelMessage.MESSAGE_TYPE:
return new TunnelMessage(_context);
case TunnelDataMessage.MESSAGE_TYPE:
return new TunnelDataMessage(_context);
case TunnelGatewayMessage.MESSAGE_TYPE:
return new TunnelGatewayMessage(_context);
case DataMessage.MESSAGE_TYPE:
return new DataMessage(_context);
case TunnelCreateMessage.MESSAGE_TYPE:

View File

@ -12,7 +12,6 @@ import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.Date;
import net.i2p.I2PAppContext;
import net.i2p.crypto.SHA256EntryCache;
@ -30,19 +29,20 @@ import net.i2p.util.Log;
public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPMessage {
private Log _log;
protected I2PAppContext _context;
private Date _expiration;
private long _expiration;
private long _uniqueId;
private byte _data[];
public final static long DEFAULT_EXPIRATION_MS = 1*60*1000; // 1 minute by default
public final static int CHECKSUM_LENGTH = 1; //Hash.HASH_LENGTH;
public I2NPMessageImpl(I2PAppContext context) {
_context = context;
_log = context.logManager().getLog(I2NPMessageImpl.class);
_expiration = new Date(_context.clock().now() + DEFAULT_EXPIRATION_MS);
_expiration = _context.clock().now() + DEFAULT_EXPIRATION_MS;
_uniqueId = _context.random().nextLong(MAX_ID_VALUE);
_context.statManager().createRateStat("i2np.writeTime", "How long it takes to write an I2NP message", "I2NP", new long[] { 10*60*1000, 60*60*1000 });
_context.statManager().createRateStat("i2np.readTime", "How long it takes to read an I2NP message", "I2NP", new long[] { 10*60*1000, 60*60*1000 });
//_context.statManager().createRateStat("i2np.writeTime", "How long it takes to write an I2NP message", "I2NP", new long[] { 10*60*1000, 60*60*1000 });
//_context.statManager().createRateStat("i2np.readTime", "How long it takes to read an I2NP message", "I2NP", new long[] { 10*60*1000, 60*60*1000 });
}
public void readBytes(InputStream in) throws DataFormatException, IOException {
@ -57,10 +57,14 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM
if (type < 0)
type = (int)DataHelper.readLong(in, 1);
_uniqueId = DataHelper.readLong(in, 4);
_expiration = DataHelper.readDate(in);
_expiration = DataHelper.readLong(in, DataHelper.DATE_LENGTH);
int size = (int)DataHelper.readLong(in, 2);
Hash h = new Hash();
h.readBytes(in);
byte checksum[] = new byte[CHECKSUM_LENGTH];
int read = DataHelper.read(in, checksum);
if (read != CHECKSUM_LENGTH)
throw new I2NPMessageException("checksum is too small [" + read + "]");
//Hash h = new Hash();
//h.readBytes(in);
if (buffer.length < size) {
if (size > 64*1024) throw new I2NPMessageException("size=" + size);
buffer = new byte[size];
@ -77,18 +81,19 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM
SHA256EntryCache.CacheEntry cache = _context.sha().cache().acquire(size);
Hash calc = _context.sha().calculateHash(buffer, 0, size, cache);
boolean eq = calc.equals(h);
//boolean eq = calc.equals(h);
boolean eq = DataHelper.eq(checksum, 0, calc.getData(), 0, CHECKSUM_LENGTH);
_context.sha().cache().release(cache);
if (!eq)
throw new I2NPMessageException("Hash does not match");
throw new I2NPMessageException("Hash does not match for " + getClass().getName());
long start = _context.clock().now();
if (_log.shouldLog(Log.DEBUG))
_log.debug("Reading bytes: type = " + type + " / uniqueId : " + _uniqueId + " / expiration : " + _expiration);
readMessage(buffer, 0, size, type);
long time = _context.clock().now() - start;
if (time > 50)
_context.statManager().addRateData("i2np.readTime", time, time);
//if (time > 50)
// _context.statManager().addRateData("i2np.readTime", time, time);
return size + Hash.HASH_LENGTH + 1 + 4 + DataHelper.DATE_LENGTH;
} catch (DataFormatException dfe) {
throw new I2NPMessageException("Error reading the message header", dfe);
@ -102,19 +107,15 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM
}
_uniqueId = DataHelper.fromLong(data, cur, 4);
cur += 4;
try {
_expiration = DataHelper.fromDate(data, cur);
cur += DataHelper.DATE_LENGTH;
} catch (DataFormatException dfe) {
throw new I2NPMessageException("Unable to read the expiration", dfe);
}
_expiration = DataHelper.fromLong(data, cur, DataHelper.DATE_LENGTH);
cur += DataHelper.DATE_LENGTH;
int size = (int)DataHelper.fromLong(data, cur, 2);
cur += 2;
Hash h = new Hash();
byte hdata[] = new byte[Hash.HASH_LENGTH];
System.arraycopy(data, cur, hdata, 0, Hash.HASH_LENGTH);
cur += Hash.HASH_LENGTH;
h.setData(hdata);
//Hash h = new Hash();
byte hdata[] = new byte[CHECKSUM_LENGTH];
System.arraycopy(data, cur, hdata, 0, CHECKSUM_LENGTH);
cur += CHECKSUM_LENGTH;
//h.setData(hdata);
if (cur + size > data.length)
throw new I2NPMessageException("Payload is too short ["
@ -125,10 +126,11 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM
SHA256EntryCache.CacheEntry cache = _context.sha().cache().acquire(size);
Hash calc = _context.sha().calculateHash(data, cur, size, cache);
boolean eq = calc.equals(h);
//boolean eq = calc.equals(h);
boolean eq = DataHelper.eq(hdata, 0, calc.getData(), 0, CHECKSUM_LENGTH);
_context.sha().cache().release(cache);
if (!eq)
throw new I2NPMessageException("Hash does not match");
throw new I2NPMessageException("Hash does not match for " + getClass().getName());
long start = _context.clock().now();
if (_log.shouldLog(Log.DEBUG))
@ -136,14 +138,14 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM
readMessage(data, cur, size, type);
cur += size;
long time = _context.clock().now() - start;
if (time > 50)
_context.statManager().addRateData("i2np.readTime", time, time);
//if (time > 50)
// _context.statManager().addRateData("i2np.readTime", time, time);
return cur - offset;
}
public void writeBytes(OutputStream out) throws DataFormatException, IOException {
int size = getMessageSize();
if (size < 47) throw new DataFormatException("Unable to build the message");
if (size < 15 + CHECKSUM_LENGTH) throw new DataFormatException("Unable to build the message");
byte buf[] = new byte[size];
int read = toByteArray(buf);
if (read < 0)
@ -160,18 +162,19 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM
* Date after which the message should be dropped (and the associated uniqueId forgotten)
*
*/
public Date getMessageExpiration() { return _expiration; }
public void setMessageExpiration(Date exp) { _expiration = exp; }
public long getMessageExpiration() { return _expiration; }
public void setMessageExpiration(long exp) { _expiration = exp; }
public synchronized int getMessageSize() {
return calculateWrittenLength()+47; // 47 bytes in the header
return calculateWrittenLength()+15 + CHECKSUM_LENGTH; // 47 bytes in the header
}
public byte[] toByteArray() {
byte data[] = new byte[getMessageSize()];
int written = toByteArray(data);
if (written != data.length) {
_log.error("Error writing out " + data.length + " for " + getClass().getName());
_log.log(Log.CRIT, "Error writing out " + data.length + " (written: " + written + ", msgSize: " + getMessageSize() +
", writtenLen: " + calculateWrittenLength() + ") for " + getClass().getName());
return null;
}
return data;
@ -180,34 +183,44 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM
public int toByteArray(byte buffer[]) {
long start = _context.clock().now();
byte prefix[][] = new byte[][] { DataHelper.toLong(1, getType()),
DataHelper.toLong(4, _uniqueId),
DataHelper.toDate(_expiration),
new byte[2],
new byte[Hash.HASH_LENGTH]};
byte suffix[][] = new byte[][] { };
int prefixLen = 1 // type
+ 4 // uniqueId
+ DataHelper.DATE_LENGTH // expiration
+ 2 // payload length
+ CHECKSUM_LENGTH; // walnuts
//byte prefix[][] = new byte[][] { DataHelper.toLong(1, getType()),
// DataHelper.toLong(4, _uniqueId),
// DataHelper.toLong(DataHelper.DATE_LENGTH, _expiration),
// new byte[2],
// new byte[CHECKSUM_LENGTH]};
//byte suffix[][] = new byte[][] { };
try {
int writtenLen = toByteArray(buffer, prefix, suffix);
int writtenLen = writeMessageBody(buffer, prefixLen);
int payloadLen = writtenLen - prefixLen;
SHA256EntryCache.CacheEntry cache = _context.sha().cache().acquire(payloadLen);
Hash h = _context.sha().calculateHash(buffer, prefixLen, payloadLen, cache);
int prefixLen = 1+4+8+2+Hash.HASH_LENGTH;
int suffixLen = 0;
int payloadLen = writtenLen - prefixLen - suffixLen;
Hash h = _context.sha().calculateHash(buffer, prefixLen, payloadLen);
byte len[] = DataHelper.toLong(2, payloadLen);
buffer[1+4+8] = len[0];
buffer[1+4+8+1] = len[1];
for (int i = 0; i < Hash.HASH_LENGTH; i++)
System.arraycopy(h.getData(), 0, buffer, 1+4+8+2, Hash.HASH_LENGTH);
int off = 0;
DataHelper.toLong(buffer, off, 1, getType());
off += 1;
DataHelper.toLong(buffer, off, 4, _uniqueId);
off += 4;
DataHelper.toLong(buffer, off, DataHelper.DATE_LENGTH, _expiration);
off += DataHelper.DATE_LENGTH;
DataHelper.toLong(buffer, off, 2, payloadLen);
off += 2;
System.arraycopy(h.getData(), 0, buffer, off, CHECKSUM_LENGTH);
_context.sha().cache().release(cache);
long time = _context.clock().now() - start;
if (time > 50)
_context.statManager().addRateData("i2np.writeTime", time, time);
//if (time > 50)
// _context.statManager().addRateData("i2np.writeTime", time, time);
return writtenLen;
} catch (I2NPMessageException ime) {
_context.logManager().getLog(getClass()).error("Error writing", ime);
throw new IllegalStateException("Unable to serialize the message: " + ime.getMessage());
_context.logManager().getLog(getClass()).log(Log.CRIT, "Error writing", ime);
throw new IllegalStateException("Unable to serialize the message (" + getClass().getName()
+ "): " + ime.getMessage());
}
}
@ -218,7 +231,7 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM
* @return the index into the array after the last byte written
*/
protected abstract int writeMessageBody(byte out[], int curIndex) throws I2NPMessageException;
/*
protected int toByteArray(byte out[], byte[][] prefix, byte[][] suffix) throws I2NPMessageException {
int curIndex = 0;
for (int i = 0; i < prefix.length; i++) {
@ -235,4 +248,5 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM
return curIndex;
}
*/
}

View File

@ -11,6 +11,7 @@ package net.i2p.data.i2np;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.Properties;
import net.i2p.I2PAppContext;
import net.i2p.data.Certificate;
@ -19,8 +20,6 @@ import net.i2p.data.DataHelper;
import net.i2p.data.Hash;
import net.i2p.data.SessionKey;
import net.i2p.data.SessionTag;
import net.i2p.data.SigningPrivateKey;
import net.i2p.data.SigningPublicKey;
import net.i2p.data.TunnelId;
import net.i2p.util.Log;
@ -28,95 +27,47 @@ import net.i2p.util.Log;
* Defines the message sent to a router to request that it participate in a
* tunnel using the included configuration settings.
*
* @author jrandom
*/
public class TunnelCreateMessage extends I2NPMessageImpl {
private final static Log _log = new Log(TunnelCreateMessage.class);
private Log _log;
public final static int MESSAGE_TYPE = 6;
private int _participantType;
private TunnelId _tunnelId;
private Hash _nextRouter;
private TunnelId _nextTunnelId;
private long _tunnelDuration;
private TunnelConfigurationSessionKey _configKey;
private long _maxPeakMessagesPerMin;
private long _maxAvgMessagesPerMin;
private long _maxPeakBytesPerMin;
private long _maxAvgBytesPerMin;
private boolean _includeDummyTraffic;
private boolean _reorderMessages;
private TunnelSigningPublicKey _verificationPubKey;
private TunnelSigningPrivateKey _verificationPrivKey;
private TunnelSessionKey _tunnelKey;
private Certificate _certificate;
private int _durationSeconds;
private SessionKey _layerKey;
private SessionKey _ivKey;
private Properties _options;
private Hash _replyGateway;
private TunnelId _replyTunnel;
private SessionTag _replyTag;
private SessionKey _replyKey;
private TunnelId _replyTunnel;
private Hash _replyPeer;
private boolean _isGateway;
private long _nonce;
private Certificate _certificate;
private byte[] _optionsCache;
private byte[] _certificateCache;
public static final int PARTICIPANT_TYPE_GATEWAY = 1;
public static final int PARTICIPANT_TYPE_ENDPOINT = 2;
public static final int PARTICIPANT_TYPE_OTHER = 3;
private final static long FLAG_DUMMY = 1 << 7;
private final static long FLAG_REORDER = 1 << 6;
public static final long MAX_NONCE_VALUE = ((1l << 32l) - 1l);
private static final Hash INVALID_HASH = new Hash(new byte[Hash.HASH_LENGTH]); // all 0s
private static final TunnelId INVALID_TUNNEL = TunnelId.INVALID;
public TunnelCreateMessage(I2PAppContext context) {
super(context);
setParticipantType(-1);
setNextRouter(null);
setNextTunnelId(null);
setTunnelId(null);
setTunnelDurationSeconds(-1);
setConfigurationKey(null);
setMaxPeakMessagesPerMin(-1);
setMaxAvgMessagesPerMin(-1);
setMaxPeakBytesPerMin(-1);
setMaxAvgBytesPerMin(-1);
setIncludeDummyTraffic(false);
setReorderMessages(false);
setVerificationPublicKey(null);
setVerificationPrivateKey(null);
setTunnelKey(null);
setCertificate(null);
setReplyTag(null);
setReplyKey(null);
setReplyTunnel(null);
setReplyPeer(null);
_log = context.logManager().getLog(TunnelCreateMessage.class);
}
public void setParticipantType(int type) { _participantType = type; }
public int getParticipantType() { return _participantType; }
public void setNextRouter(Hash routerIdentityHash) { _nextRouter = routerIdentityHash; }
public Hash getNextRouter() { return _nextRouter; }
public void setNextTunnelId(TunnelId id) { _nextTunnelId = id; }
public TunnelId getNextTunnelId() { return _nextTunnelId; }
public void setTunnelId(TunnelId id) { _tunnelId = id; }
public TunnelId getTunnelId() { return _tunnelId; }
public void setTunnelDurationSeconds(long durationSeconds) { _tunnelDuration = durationSeconds; }
public long getTunnelDurationSeconds() { return _tunnelDuration; }
public void setConfigurationKey(TunnelConfigurationSessionKey key) { _configKey = key; }
public TunnelConfigurationSessionKey getConfigurationKey() { return _configKey; }
public void setMaxPeakMessagesPerMin(long msgs) { _maxPeakMessagesPerMin = msgs; }
public long getMaxPeakMessagesPerMin() { return _maxPeakMessagesPerMin; }
public void setMaxAvgMessagesPerMin(long msgs) { _maxAvgMessagesPerMin = msgs; }
public long getMaxAvgMessagesPerMin() { return _maxAvgMessagesPerMin; }
public void setMaxPeakBytesPerMin(long bytes) { _maxPeakBytesPerMin = bytes; }
public long getMaxPeakBytesPerMin() { return _maxPeakBytesPerMin; }
public void setMaxAvgBytesPerMin(long bytes) { _maxAvgBytesPerMin = bytes; }
public long getMaxAvgBytesPerMin() { return _maxAvgBytesPerMin; }
public void setIncludeDummyTraffic(boolean include) { _includeDummyTraffic = include; }
public boolean getIncludeDummyTraffic() { return _includeDummyTraffic; }
public void setReorderMessages(boolean reorder) { _reorderMessages = reorder; }
public boolean getReorderMessages() { return _reorderMessages; }
public void setVerificationPublicKey(TunnelSigningPublicKey key) { _verificationPubKey = key; }
public TunnelSigningPublicKey getVerificationPublicKey() { return _verificationPubKey; }
public void setVerificationPrivateKey(TunnelSigningPrivateKey key) { _verificationPrivKey = key; }
public TunnelSigningPrivateKey getVerificationPrivateKey() { return _verificationPrivKey; }
public void setTunnelKey(TunnelSessionKey key) { _tunnelKey = key; }
public TunnelSessionKey getTunnelKey() { return _tunnelKey; }
public void setDurationSeconds(int seconds) { _durationSeconds = seconds; }
public int getDurationSeconds() { return _durationSeconds; }
public void setLayerKey(SessionKey key) { _layerKey = key; }
public SessionKey getLayerKey() { return _layerKey; }
public void setIVKey(SessionKey key) { _ivKey = key; }
public SessionKey getIVKey() { return _ivKey; }
public void setCertificate(Certificate cert) { _certificate = cert; }
public Certificate getCertificate() { return _certificate; }
public void setReplyTag(SessionTag tag) { _replyTag = tag; }
@ -125,257 +76,185 @@ public class TunnelCreateMessage extends I2NPMessageImpl {
public SessionKey getReplyKey() { return _replyKey; }
public void setReplyTunnel(TunnelId id) { _replyTunnel = id; }
public TunnelId getReplyTunnel() { return _replyTunnel; }
public void setReplyPeer(Hash peer) { _replyPeer = peer; }
public Hash getReplyPeer() { return _replyPeer; }
public void setReplyGateway(Hash peer) { _replyGateway = peer; }
public Hash getReplyGateway() { return _replyGateway; }
public void setNonce(long nonce) { _nonce = nonce; }
public long getNonce() { return _nonce; }
public void setIsGateway(boolean isGateway) { _isGateway = isGateway; }
public boolean getIsGateway() { return _isGateway; }
public Properties getOptions() { return _options; }
public void setOptions(Properties opts) { _options = opts; }
public void readMessage(byte data[], int offset, int dataSize, int type) throws I2NPMessageException, IOException {
if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message");
int curIndex = offset;
_participantType = (int)DataHelper.fromLong(data, curIndex, 1);
curIndex++;
if (_participantType != PARTICIPANT_TYPE_ENDPOINT) {
byte peer[] = new byte[Hash.HASH_LENGTH];
System.arraycopy(data, curIndex, peer, 0, Hash.HASH_LENGTH);
curIndex += Hash.HASH_LENGTH;
_nextRouter = new Hash(peer);
_nextTunnelId = new TunnelId(DataHelper.fromLong(data, curIndex, 4));
curIndex += 4;
}
_tunnelId = new TunnelId(DataHelper.fromLong(data, curIndex, 4));
curIndex += 4;
if (_tunnelId.getTunnelId() <= 0)
throw new I2NPMessageException("wtf, tunnelId == " + _tunnelId);
_tunnelDuration = DataHelper.fromLong(data, curIndex, 4);
curIndex += 4;
byte key[] = new byte[SessionKey.KEYSIZE_BYTES];
System.arraycopy(data, curIndex, key, 0, SessionKey.KEYSIZE_BYTES);
curIndex += SessionKey.KEYSIZE_BYTES;
_configKey = new TunnelConfigurationSessionKey(new SessionKey(key));
_maxPeakMessagesPerMin = DataHelper.fromLong(data, curIndex, 4);
curIndex += 4;
_maxAvgMessagesPerMin = DataHelper.fromLong(data, curIndex, 4);
curIndex += 4;
_maxPeakBytesPerMin = DataHelper.fromLong(data, curIndex, 4);
curIndex += 4;
_maxAvgBytesPerMin = DataHelper.fromLong(data, curIndex, 4);
curIndex += 4;
int flags = (int)DataHelper.fromLong(data, curIndex, 1);
curIndex++;
_includeDummyTraffic = flagsIncludeDummy(flags);
_reorderMessages = flagsReorder(flags);
key = new byte[SigningPublicKey.KEYSIZE_BYTES];
System.arraycopy(data, curIndex, key, 0, SigningPublicKey.KEYSIZE_BYTES);
curIndex += SigningPublicKey.KEYSIZE_BYTES;
_verificationPubKey = new TunnelSigningPublicKey(new SigningPublicKey(key));
if (_participantType == PARTICIPANT_TYPE_GATEWAY) {
key = new byte[SigningPrivateKey.KEYSIZE_BYTES];
System.arraycopy(data, curIndex, key, 0, SigningPrivateKey.KEYSIZE_BYTES);
curIndex += SigningPrivateKey.KEYSIZE_BYTES;
_verificationPrivKey = new TunnelSigningPrivateKey(new SigningPrivateKey(key));
}
if ( (_participantType == PARTICIPANT_TYPE_ENDPOINT) || (_participantType == PARTICIPANT_TYPE_GATEWAY) ) {
key = new byte[SessionKey.KEYSIZE_BYTES];
System.arraycopy(data, curIndex, key, 0, SessionKey.KEYSIZE_BYTES);
curIndex += SessionKey.KEYSIZE_BYTES;
_tunnelKey = new TunnelSessionKey(new SessionKey(key));
}
int certType = (int) DataHelper.fromLong(data, curIndex, 1);
curIndex++;
int certLength = (int) DataHelper.fromLong(data, curIndex, 2);
curIndex += 2;
if (certLength <= 0) {
_certificate = new Certificate(certType, null);
if (DataHelper.eq(INVALID_HASH.getData(), 0, data, offset, Hash.HASH_LENGTH)) {
_nextRouter = null;
} else {
if (certLength > 16*1024) throw new I2NPMessageException("cert size " + certLength);
byte certPayload[] = new byte[certLength];
System.arraycopy(data, curIndex, certPayload, 0, certLength);
curIndex += certLength;
_certificate = new Certificate(certType, certPayload);
_nextRouter = new Hash(new byte[Hash.HASH_LENGTH]);
System.arraycopy(data, offset, _nextRouter.getData(), 0, Hash.HASH_LENGTH);
}
offset += Hash.HASH_LENGTH;
long id = DataHelper.fromLong(data, offset, 4);
if (id > 0)
_nextTunnelId = new TunnelId(id);
offset += 4;
_durationSeconds = (int)DataHelper.fromLong(data, offset, 2);
offset += 2;
_layerKey = new SessionKey(new byte[SessionKey.KEYSIZE_BYTES]);
System.arraycopy(data, offset, _layerKey.getData(), 0, SessionKey.KEYSIZE_BYTES);
offset += SessionKey.KEYSIZE_BYTES;
_ivKey = new SessionKey(new byte[SessionKey.KEYSIZE_BYTES]);
System.arraycopy(data, offset, _ivKey.getData(), 0, SessionKey.KEYSIZE_BYTES);
offset += SessionKey.KEYSIZE_BYTES;
try {
Properties opts = new Properties();
_options = opts;
offset = DataHelper.fromProperties(data, offset, opts);
} catch (DataFormatException dfe) {
throw new I2NPMessageException("Error reading the options", dfe);
}
byte tag[] = new byte[SessionTag.BYTE_LENGTH];
System.arraycopy(data, curIndex, tag, 0, SessionTag.BYTE_LENGTH);
curIndex += SessionTag.BYTE_LENGTH;
_replyTag = new SessionTag(tag);
_replyGateway = new Hash(new byte[Hash.HASH_LENGTH]);
System.arraycopy(data, offset, _replyGateway.getData(), 0, Hash.HASH_LENGTH);
offset += Hash.HASH_LENGTH;
key = new byte[SessionKey.KEYSIZE_BYTES];
System.arraycopy(data, curIndex, key, 0, SessionKey.KEYSIZE_BYTES);
curIndex += SessionKey.KEYSIZE_BYTES;
_replyKey = new SessionKey(key);
_replyTunnel = new TunnelId(DataHelper.fromLong(data, offset, 4));
offset += 4;
_replyTunnel = new TunnelId(DataHelper.fromLong(data, curIndex, 4));
curIndex += 4;
_replyTag = new SessionTag(new byte[SessionTag.BYTE_LENGTH]);
System.arraycopy(data, offset, _replyTag.getData(), 0, SessionTag.BYTE_LENGTH);
offset += SessionTag.BYTE_LENGTH;
byte peer[] = new byte[Hash.HASH_LENGTH];
System.arraycopy(data, curIndex, peer, 0, Hash.HASH_LENGTH);
curIndex += Hash.HASH_LENGTH;
_replyPeer = new Hash(peer);
_replyKey = new SessionKey(new byte[SessionKey.KEYSIZE_BYTES]);
System.arraycopy(data, offset, _replyKey.getData(), 0, SessionKey.KEYSIZE_BYTES);
offset += SessionKey.KEYSIZE_BYTES;
_nonce = DataHelper.fromLong(data, offset, 4);
offset += 4;
try {
Certificate cert = new Certificate();
_certificate = cert;
offset += cert.readBytes(data, offset);
} catch (DataFormatException dfe) {
throw new I2NPMessageException("Error reading the certificate", dfe);
}
Boolean b = DataHelper.fromBoolean(data, offset);
if (b == null)
throw new I2NPMessageException("isGateway == unknown?!");
_isGateway = b.booleanValue();
offset += DataHelper.BOOLEAN_LENGTH;
}
/** calculate the message body's length (not including the header and footer */
protected int calculateWrittenLength() {
int length = 0;
length += 1; // participantType
if (_participantType != PARTICIPANT_TYPE_ENDPOINT) {
length += Hash.HASH_LENGTH;
length += 4; // nextTunnelId
}
length += 4; // tunnelId
length += 4; // duration;
length += SessionKey.KEYSIZE_BYTES;
length += 4*4; // max limits
length += 1; // flags
length += SigningPublicKey.KEYSIZE_BYTES;
if (_participantType == PARTICIPANT_TYPE_GATEWAY)
length += SigningPrivateKey.KEYSIZE_BYTES;
if ( (_participantType == PARTICIPANT_TYPE_ENDPOINT)
|| (_participantType == PARTICIPANT_TYPE_GATEWAY) )
length += SessionKey.KEYSIZE_BYTES;
_certificateCache = _certificate.toByteArray();
length += _certificateCache.length;
length += SessionTag.BYTE_LENGTH;
length += SessionKey.KEYSIZE_BYTES;
length += Hash.HASH_LENGTH; // nextRouter
length += 4; // nextTunnel
length += 2; // duration
length += SessionKey.KEYSIZE_BYTES; // layerKey
length += SessionKey.KEYSIZE_BYTES; // ivKey
if (_optionsCache == null)
_optionsCache = DataHelper.toProperties(_options);
length += _optionsCache.length;
length += Hash.HASH_LENGTH; // replyGateway
length += 4; // replyTunnel
length += Hash.HASH_LENGTH; // replyPeer
length += SessionTag.BYTE_LENGTH; // replyTag
length += SessionKey.KEYSIZE_BYTES; // replyKey
length += 4; // nonce
if (_certificateCache == null)
_certificateCache = _certificate.toByteArray();
length += _certificateCache.length;
length += DataHelper.BOOLEAN_LENGTH;
return length;
}
/** write the message body to the output array, starting at the given index */
protected int writeMessageBody(byte out[], int curIndex) throws I2NPMessageException {
byte type[] = DataHelper.toLong(1, _participantType);
out[curIndex++] = type[0];
if (_participantType != PARTICIPANT_TYPE_ENDPOINT) {
System.arraycopy(_nextRouter.getData(), 0, out, curIndex, Hash.HASH_LENGTH);
curIndex += Hash.HASH_LENGTH;
byte id[] = DataHelper.toLong(4, _nextTunnelId.getTunnelId());
System.arraycopy(id, 0, out, curIndex, 4);
curIndex += 4;
}
byte id[] = DataHelper.toLong(4, _tunnelId.getTunnelId());
System.arraycopy(id, 0, out, curIndex, 4);
curIndex += 4;
byte duration[] = DataHelper.toLong(4, _tunnelDuration);
System.arraycopy(duration, 0, out, curIndex, 4);
curIndex += 4;
System.arraycopy(_configKey.getKey().getData(), 0, out, curIndex, SessionKey.KEYSIZE_BYTES);
curIndex += SessionKey.KEYSIZE_BYTES;
protected int writeMessageBody(byte data[], int offset) throws I2NPMessageException {
if (_nextRouter == null)
System.arraycopy(INVALID_HASH.getData(), 0, data, offset, Hash.HASH_LENGTH);
else
System.arraycopy(_nextRouter.getData(), 0, data, offset, Hash.HASH_LENGTH);
offset += Hash.HASH_LENGTH;
byte val[] = DataHelper.toLong(4, _maxPeakMessagesPerMin);
System.arraycopy(val, 0, out, curIndex, 4);
curIndex += 4;
val = DataHelper.toLong(4, _maxAvgMessagesPerMin);
System.arraycopy(val, 0, out, curIndex, 4);
curIndex += 4;
val = DataHelper.toLong(4, _maxPeakBytesPerMin);
System.arraycopy(val, 0, out, curIndex, 4);
curIndex += 4;
val = DataHelper.toLong(4, _maxAvgBytesPerMin);
System.arraycopy(val, 0, out, curIndex, 4);
curIndex += 4;
long flags = getFlags();
byte flag[] = DataHelper.toLong(1, flags);
out[curIndex++] = flag[0];
System.arraycopy(_verificationPubKey.getKey().getData(), 0, out, curIndex, SigningPublicKey.KEYSIZE_BYTES);
curIndex += SigningPublicKey.KEYSIZE_BYTES;
if (_nextTunnelId == null)
DataHelper.toLong(data, offset, 4, 0);
else
DataHelper.toLong(data, offset, 4, _nextTunnelId.getTunnelId());
offset += 4;
if (_participantType == PARTICIPANT_TYPE_GATEWAY) {
System.arraycopy(_verificationPrivKey.getKey().getData(), 0, out, curIndex, SigningPrivateKey.KEYSIZE_BYTES);
curIndex += SigningPrivateKey.KEYSIZE_BYTES;
}
DataHelper.toLong(data, offset, 2, _durationSeconds);
offset += 2;
if ( (_participantType == PARTICIPANT_TYPE_ENDPOINT) || (_participantType == PARTICIPANT_TYPE_GATEWAY) ) {
System.arraycopy(_tunnelKey.getKey().getData(), 0, out, curIndex, SessionKey.KEYSIZE_BYTES);
curIndex += SessionKey.KEYSIZE_BYTES;
}
System.arraycopy(_certificateCache, 0, out, curIndex, _certificateCache.length);
curIndex += _certificateCache.length;
System.arraycopy(_replyTag.getData(), 0, out, curIndex, SessionTag.BYTE_LENGTH);
curIndex += SessionTag.BYTE_LENGTH;
System.arraycopy(_replyKey.getData(), 0, out, curIndex, SessionKey.KEYSIZE_BYTES);
curIndex += SessionKey.KEYSIZE_BYTES;
id = DataHelper.toLong(4, _replyTunnel.getTunnelId());
System.arraycopy(id, 0, out, curIndex, 4);
curIndex += 4;
System.arraycopy(_replyPeer.getData(), 0, out, curIndex, Hash.HASH_LENGTH);
curIndex += Hash.HASH_LENGTH;
return curIndex;
System.arraycopy(_layerKey.getData(), 0, data, offset, SessionKey.KEYSIZE_BYTES);
offset += SessionKey.KEYSIZE_BYTES;
System.arraycopy(_ivKey.getData(), 0, data, offset, SessionKey.KEYSIZE_BYTES);
offset += SessionKey.KEYSIZE_BYTES;
if (_optionsCache == null)
_optionsCache = DataHelper.toProperties(_options);
System.arraycopy(_optionsCache, 0, data, offset, _optionsCache.length);
offset += _optionsCache.length;
System.arraycopy(_replyGateway.getData(), 0, data, offset, Hash.HASH_LENGTH);
offset += Hash.HASH_LENGTH;
DataHelper.toLong(data, offset, 4, _replyTunnel.getTunnelId());
offset += 4;
System.arraycopy(_replyTag.getData(), 0, data, offset, SessionTag.BYTE_LENGTH);
offset += SessionTag.BYTE_LENGTH;
System.arraycopy(_replyKey.getData(), 0, data, offset, SessionKey.KEYSIZE_BYTES);
offset += SessionKey.KEYSIZE_BYTES;
DataHelper.toLong(data, offset, 4, _nonce);
offset += 4;
if (_certificateCache == null)
_certificateCache = _certificate.toByteArray();
System.arraycopy(_certificateCache, 0, data, offset, _certificateCache.length);
offset += _certificateCache.length;
DataHelper.toBoolean(data, offset, _isGateway);
offset += DataHelper.BOOLEAN_LENGTH;
return offset;
}
private boolean flagsIncludeDummy(long flags) {
return (0 != (flags & FLAG_DUMMY));
}
private boolean flagsReorder(long flags) {
return (0 != (flags & FLAG_REORDER));
}
private long getFlags() {
long val = 0L;
if (getIncludeDummyTraffic())
val = val | FLAG_DUMMY;
if (getReorderMessages())
val = val | FLAG_REORDER;
return val;
public byte[] toByteArray() {
byte rv[] = super.toByteArray();
if (rv == null)
throw new RuntimeException("unable to toByteArray(): " + toString());
return rv;
}
public int getType() { return MESSAGE_TYPE; }
public int hashCode() {
return (int)(DataHelper.hashCode(getCertificate()) +
DataHelper.hashCode(getConfigurationKey()) +
DataHelper.hashCode(getNextRouter()) +
DataHelper.hashCode(getNextTunnelId()) +
DataHelper.hashCode(getReplyPeer()) +
DataHelper.hashCode(getReplyTunnel()) +
DataHelper.hashCode(getTunnelId()) +
DataHelper.hashCode(getTunnelKey()) +
DataHelper.hashCode(getVerificationPrivateKey()) +
DataHelper.hashCode(getVerificationPublicKey()) +
(getIncludeDummyTraffic() ? 1 : 0) +
getMaxAvgBytesPerMin() +
getMaxAvgMessagesPerMin() +
getMaxPeakBytesPerMin() +
getMaxPeakMessagesPerMin() +
getParticipantType() +
(getReorderMessages() ? 1 : 0) +
getTunnelDurationSeconds());
return DataHelper.hashCode(getNextRouter()) +
DataHelper.hashCode(getNextTunnelId()) +
DataHelper.hashCode(getReplyGateway()) +
DataHelper.hashCode(getReplyTunnel());
}
public boolean equals(Object object) {
if ( (object != null) && (object instanceof TunnelCreateMessage) ) {
TunnelCreateMessage msg = (TunnelCreateMessage)object;
return DataHelper.eq(getCertificate(), msg.getCertificate()) &&
DataHelper.eq(getConfigurationKey(), msg.getConfigurationKey()) &&
DataHelper.eq(getNextRouter(), msg.getNextRouter()) &&
DataHelper.eq(getNextTunnelId(), msg.getNextTunnelId()) &&
DataHelper.eq(getReplyTag(), msg.getReplyTag()) &&
DataHelper.eq(getReplyKey(), msg.getReplyKey()) &&
DataHelper.eq(getReplyTunnel(), msg.getReplyTunnel()) &&
DataHelper.eq(getReplyPeer(), msg.getReplyPeer()) &&
DataHelper.eq(getTunnelId(), msg.getTunnelId()) &&
DataHelper.eq(getTunnelKey(), msg.getTunnelKey()) &&
DataHelper.eq(getVerificationPrivateKey(), msg.getVerificationPrivateKey()) &&
DataHelper.eq(getVerificationPublicKey(), msg.getVerificationPublicKey()) &&
(getIncludeDummyTraffic() == msg.getIncludeDummyTraffic()) &&
(getMaxAvgBytesPerMin() == msg.getMaxAvgBytesPerMin()) &&
(getMaxAvgMessagesPerMin() == msg.getMaxAvgMessagesPerMin()) &&
(getMaxPeakBytesPerMin() == msg.getMaxPeakBytesPerMin()) &&
(getMaxPeakMessagesPerMin() == msg.getMaxPeakMessagesPerMin()) &&
(getParticipantType() == msg.getParticipantType()) &&
(getReorderMessages() == msg.getReorderMessages()) &&
(getTunnelDurationSeconds() == msg.getTunnelDurationSeconds());
return DataHelper.eq(getNextRouter(), msg.getNextRouter()) &&
DataHelper.eq(getNextTunnelId(), msg.getNextTunnelId()) &&
DataHelper.eq(getReplyGateway(), msg.getReplyGateway()) &&
DataHelper.eq(getReplyTunnel(), msg.getReplyTunnel());
} else {
return false;
}
@ -384,28 +263,13 @@ public class TunnelCreateMessage extends I2NPMessageImpl {
public String toString() {
StringBuffer buf = new StringBuffer();
buf.append("[TunnelCreateMessage: ");
buf.append("\n\tParticipant Type: ").append(getParticipantType());
buf.append("\n\tCertificate: ").append(getCertificate());
buf.append("\n\tConfiguration Key: ").append(getConfigurationKey());
buf.append("\n\tNext Router: ").append(getNextRouter());
buf.append("\n\tNext Tunnel: ").append(getNextTunnelId());
buf.append("\n\tReply Tag: ").append(getReplyTag());
buf.append("\n\tReply Key: ").append(getReplyKey());
buf.append("\n\tReply Tunnel: ").append(getReplyTunnel());
buf.append("\n\tReply Peer: ").append(getReplyPeer());
buf.append("\n\tTunnel ID: ").append(getTunnelId());
buf.append("\n\tTunnel Key: ").append(getTunnelKey());
buf.append("\n\tVerification Private Key: ").append(getVerificationPrivateKey());
buf.append("\n\tVerification Public Key: ").append(getVerificationPublicKey());
buf.append("\n\tInclude Dummy Traffic: ").append(getIncludeDummyTraffic());
buf.append("\n\tMax Avg Bytes / Minute: ").append(getMaxAvgBytesPerMin());
buf.append("\n\tMax Peak Bytes / Minute: ").append(getMaxPeakBytesPerMin());
buf.append("\n\tMax Avg Messages / Minute: ").append(getMaxAvgMessagesPerMin());
buf.append("\n\tMax Peak Messages / Minute: ").append(getMaxPeakMessagesPerMin());
buf.append("\n\tReorder Messages: ").append(getReorderMessages());
buf.append("\n\tTunnel Duration (seconds): ").append(getTunnelDurationSeconds());
buf.append("\n\tReply Peer: ").append(getReplyGateway());
buf.append("]");
return buf.toString();
}
public int getType() { return MESSAGE_TYPE; }
}

View File

@ -28,26 +28,22 @@ import net.i2p.util.Log;
public class TunnelCreateStatusMessage extends I2NPMessageImpl {
private final static Log _log = new Log(TunnelCreateStatusMessage.class);
public final static int MESSAGE_TYPE = 7;
private TunnelId _tunnelId;
private TunnelId _receiveTunnelId;
private int _status;
private Hash _from;
private long _nonce;
public final static int STATUS_SUCCESS = 0;
public final static int STATUS_FAILED_DUPLICATE_ID = 1;
public final static int STATUS_FAILED_OVERLOADED = 2;
public final static int STATUS_FAILED_CERTIFICATE = 3;
public final static int STATUS_FAILED_DELETED = 100;
public TunnelCreateStatusMessage(I2PAppContext context) {
super(context);
setTunnelId(null);
setReceiveTunnelId(null);
setStatus(-1);
setFromHash(null);
setNonce(-1);
}
public TunnelId getTunnelId() { return _tunnelId; }
public void setTunnelId(TunnelId id) {
_tunnelId = id;
public TunnelId getReceiveTunnelId() { return _receiveTunnelId; }
public void setReceiveTunnelId(TunnelId id) {
_receiveTunnelId = id;
if ( (id != null) && (id.getTunnelId() <= 0) )
throw new IllegalArgumentException("wtf, tunnelId " + id);
}
@ -55,63 +51,57 @@ public class TunnelCreateStatusMessage extends I2NPMessageImpl {
public int getStatus() { return _status; }
public void setStatus(int status) { _status = status; }
/**
* Contains the SHA256 Hash of the RouterIdentity sending the message
*/
public Hash getFromHash() { return _from; }
public void setFromHash(Hash from) { _from = from; }
public long getNonce() { return _nonce; }
public void setNonce(long nonce) { _nonce = nonce; }
public void readMessage(byte data[], int offset, int dataSize, int type) throws I2NPMessageException, IOException {
if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message");
int curIndex = offset;
_tunnelId = new TunnelId(DataHelper.fromLong(data, curIndex, 4));
_receiveTunnelId = new TunnelId(DataHelper.fromLong(data, curIndex, 4));
curIndex += 4;
if (_tunnelId.getTunnelId() <= 0)
throw new I2NPMessageException("wtf, negative tunnelId? " + _tunnelId);
if (_receiveTunnelId.getTunnelId() <= 0)
throw new I2NPMessageException("wtf, negative tunnelId? " + _receiveTunnelId);
_status = (int)DataHelper.fromLong(data, curIndex, 1);
curIndex++;
byte peer[] = new byte[Hash.HASH_LENGTH];
System.arraycopy(data, curIndex, peer, 0, Hash.HASH_LENGTH);
curIndex += Hash.HASH_LENGTH;
_from = new Hash(peer);
_nonce = DataHelper.fromLong(data, curIndex, 4);
}
/** calculate the message body's length (not including the header and footer */
protected int calculateWrittenLength() {
return 4 + 1 + Hash.HASH_LENGTH; // id + status + from
return 4 + 1 + 4; // id + status + nonce
}
/** write the message body to the output array, starting at the given index */
protected int writeMessageBody(byte out[], int curIndex) throws I2NPMessageException {
if ( (_tunnelId == null) || (_from == null) ) throw new I2NPMessageException("Not enough data to write out");
if (_tunnelId.getTunnelId() < 0) throw new I2NPMessageException("Negative tunnelId!? " + _tunnelId);
if ( (_receiveTunnelId == null) || (_nonce <= 0) ) throw new I2NPMessageException("Not enough data to write out");
if (_receiveTunnelId.getTunnelId() <= 0) throw new I2NPMessageException("Invalid tunnelId!? " + _receiveTunnelId);
byte id[] = DataHelper.toLong(4, _tunnelId.getTunnelId());
System.arraycopy(id, 0, out, curIndex, 4);
DataHelper.toLong(out, curIndex, 4, _receiveTunnelId.getTunnelId());
curIndex += 4;
DataHelper.toLong(out, curIndex, 1, _status);
curIndex++;
DataHelper.toLong(out, curIndex, 4, _nonce);
curIndex += 4;
byte status[] = DataHelper.toLong(1, _status);
out[curIndex++] = status[0];
System.arraycopy(_from.getData(), 0, out, curIndex, Hash.HASH_LENGTH);
curIndex += Hash.HASH_LENGTH;
return curIndex;
}
public int getType() { return MESSAGE_TYPE; }
public int hashCode() {
return DataHelper.hashCode(getTunnelId()) +
return DataHelper.hashCode(getReceiveTunnelId()) +
getStatus() +
DataHelper.hashCode(getFromHash());
(int)getNonce();
}
public boolean equals(Object object) {
if ( (object != null) && (object instanceof TunnelCreateStatusMessage) ) {
TunnelCreateStatusMessage msg = (TunnelCreateStatusMessage)object;
return DataHelper.eq(getTunnelId(),msg.getTunnelId()) &&
DataHelper.eq(getFromHash(),msg.getFromHash()) &&
return DataHelper.eq(getReceiveTunnelId(),msg.getReceiveTunnelId()) &&
DataHelper.eq(getNonce(),msg.getNonce()) &&
(getStatus() == msg.getStatus());
} else {
return false;
@ -121,9 +111,9 @@ public class TunnelCreateStatusMessage extends I2NPMessageImpl {
public String toString() {
StringBuffer buf = new StringBuffer();
buf.append("[TunnelCreateStatusMessage: ");
buf.append("\n\tTunnel ID: ").append(getTunnelId());
buf.append("\n\tTunnel ID: ").append(getReceiveTunnelId());
buf.append("\n\tStatus: ").append(getStatus());
buf.append("\n\tFrom: ").append(getFromHash());
buf.append("\n\tNonce: ").append(getNonce());
buf.append("]");
return buf.toString();
}

View File

@ -10,6 +10,8 @@ package net.i2p.router;
import java.io.IOException;
import java.io.Writer;
import java.util.Collections;
import java.util.Set;
import net.i2p.data.Destination;
import net.i2p.data.Hash;
@ -38,6 +40,9 @@ public abstract class ClientManagerFacade implements Service {
* @param onFailedJob Job to run after the timeout passes without receiving authorization
*/
public abstract void requestLeaseSet(Destination dest, LeaseSet set, long timeout, Job onCreateJob, Job onFailedJob);
public abstract void requestLeaseSet(Hash dest, LeaseSet set);
/**
* Instruct the client (or all clients) that they are under attack. This call
* does not block.
@ -67,6 +72,14 @@ public abstract class ClientManagerFacade implements Service {
public boolean verifyClientLiveliness() { return true; }
/**
* Return the list of locally connected clients
*
* @return set of Destination objects
*/
public Set listClients() { return Collections.EMPTY_SET; }
/**
* Return the client's current config, or null if not connected
*
@ -96,4 +109,7 @@ class DummyClientManagerFacade extends ClientManagerFacade {
public void messageDeliveryStatusUpdate(Destination fromDest, MessageId id, boolean delivered) {}
public SessionConfig getClientSessionConfig(Destination _dest) { return null; }
public void requestLeaseSet(Hash dest, LeaseSet set) {}
}

View File

@ -12,7 +12,6 @@ import java.util.Properties;
import net.i2p.client.I2PClient;
import net.i2p.router.message.OutboundClientMessageJob;
import net.i2p.router.message.OutboundClientMessageOneShotJob;
import net.i2p.util.Log;
@ -59,10 +58,7 @@ public class ClientMessagePool {
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Adding message for remote delivery");
if (isGuaranteed(msg))
_context.jobQueue().addJob(new OutboundClientMessageJob(_context, msg));
else
_context.jobQueue().addJob(new OutboundClientMessageOneShotJob(_context, msg));
_context.jobQueue().addJob(new OutboundClientMessageOneShotJob(_context, msg));
}
}

View File

@ -16,160 +16,46 @@ import java.util.Properties;
*
*/
public class ClientTunnelSettings {
private int _numInbound;
private int _numOutbound;
private int _depthInbound;
private int _depthOutbound;
private long _msgsPerMinuteAvgInbound;
private long _bytesPerMinuteAvgInbound;
private long _msgsPerMinutePeakInbound;
private long _bytesPerMinutePeakInbound;
private boolean _includeDummyInbound;
private boolean _includeDummyOutbound;
private boolean _reorderInbound;
private boolean _reorderOutbound;
private long _inboundDuration;
private boolean _enforceStrictMinimumLength;
public final static String PROP_NUM_INBOUND = "tunnels.numInbound";
public final static String PROP_NUM_OUTBOUND = "tunnels.numOutbound";
public final static String PROP_DEPTH_INBOUND = "tunnels.depthInbound";
public final static String PROP_DEPTH_OUTBOUND = "tunnels.depthOutbound";
public final static String PROP_MSGS_AVG = "tunnels.messagesPerMinuteAverage";
public final static String PROP_MSGS_PEAK = "tunnels.messagesPerMinutePeak";
public final static String PROP_BYTES_AVG = "tunnels.bytesPerMinuteAverage";
public final static String PROP_BYTES_PEAK = "tunnels.bytesPerMinutePeak";
public final static String PROP_DUMMY_INBOUND = "tunnels.includeDummyTrafficInbound";
public final static String PROP_DUMMY_OUTBOUND = "tunnels.includeDummyTrafficOutbound";
public final static String PROP_REORDER_INBOUND = "tunnels.reorderInboundMessages";
public final static String PROP_REORDER_OUTBOUND = "tunnels.reoderOutboundMessages";
public final static String PROP_DURATION = "tunnels.tunnelDuration";
/**
* if tunnels.strictMinimumLength=true then never accept a tunnel shorter than the client's
* request, otherwise we'll try to meet that minimum, but if we don't have any that length,
* we'll accept the longest we do have.
*
*/
public final static String PROP_STRICT_MINIMUM_LENGTH = "tunnels.enforceStrictMinimumLength";
public final static int DEFAULT_NUM_INBOUND = 2;
public final static int DEFAULT_NUM_OUTBOUND = 1;
public final static int DEFAULT_DEPTH_INBOUND = 2;
public final static int DEFAULT_DEPTH_OUTBOUND = 2;
public final static long DEFAULT_MSGS_AVG = 0;
public final static long DEFAULT_MSGS_PEAK = 0;
public final static long DEFAULT_BYTES_AVG = 0;
public final static long DEFAULT_BYTES_PEAK = 0;
public final static boolean DEFAULT_DUMMY_INBOUND = false;
public final static boolean DEFAULT_DUMMY_OUTBOUND = false;
public final static boolean DEFAULT_REORDER_INBOUND = false;
public final static boolean DEFAULT_REORDER_OUTBOUND = false;
public final static long DEFAULT_DURATION = 10*60*1000;
public final static boolean DEFAULT_STRICT_MINIMUM_LENGTH = true;
private TunnelPoolSettings _inboundSettings;
private TunnelPoolSettings _outboundSettings;
public ClientTunnelSettings() {
_numInbound = DEFAULT_NUM_INBOUND;
_numOutbound = DEFAULT_NUM_OUTBOUND;
_depthInbound = DEFAULT_DEPTH_INBOUND;
_depthOutbound = DEFAULT_DEPTH_OUTBOUND;
_msgsPerMinuteAvgInbound = DEFAULT_MSGS_AVG;
_bytesPerMinuteAvgInbound = DEFAULT_BYTES_AVG;
_msgsPerMinutePeakInbound = DEFAULT_MSGS_PEAK;
_bytesPerMinutePeakInbound = DEFAULT_BYTES_PEAK;
_includeDummyInbound = DEFAULT_DUMMY_INBOUND;
_includeDummyOutbound = DEFAULT_DUMMY_OUTBOUND;
_reorderInbound = DEFAULT_REORDER_INBOUND;
_reorderOutbound = DEFAULT_REORDER_OUTBOUND;
_inboundDuration = DEFAULT_DURATION;
_enforceStrictMinimumLength = DEFAULT_STRICT_MINIMUM_LENGTH;
_inboundSettings = new TunnelPoolSettings();
_inboundSettings.setIsInbound(true);
_inboundSettings.setIsExploratory(false);
_outboundSettings = new TunnelPoolSettings();
_outboundSettings.setIsInbound(false);
_outboundSettings.setIsExploratory(false);
}
public int getNumInboundTunnels() { return _numInbound; }
public int getNumOutboundTunnels() { return _numOutbound; }
public int getDepthInbound() { return _depthInbound; }
public int getDepthOutbound() { return _depthOutbound; }
public long getMessagesPerMinuteInboundAverage() { return _msgsPerMinuteAvgInbound; }
public long getMessagesPerMinuteInboundPeak() { return _msgsPerMinutePeakInbound; }
public long getBytesPerMinuteInboundAverage() { return _bytesPerMinuteAvgInbound; }
public long getBytesPerMinuteInboundPeak() { return _bytesPerMinutePeakInbound; }
public boolean getIncludeDummyInbound() { return _includeDummyInbound; }
public boolean getIncludeDummyOutbound() { return _includeDummyOutbound; }
public boolean getReorderInbound() { return _reorderInbound; }
public boolean getReorderOutbound() { return _reorderOutbound; }
public long getInboundDuration() { return _inboundDuration; }
public boolean getEnforceStrictMinimumLength() { return _enforceStrictMinimumLength; }
public void setNumInboundTunnels(int num) { _numInbound = num; }
public void setNumOutboundTunnels(int num) { _numOutbound = num; }
public void setEnforceStrictMinimumLength(boolean enforce) { _enforceStrictMinimumLength = enforce; }
public TunnelPoolSettings getInboundSettings() { return _inboundSettings; }
public void setInboundSettings(TunnelPoolSettings settings) { _inboundSettings = settings; }
public TunnelPoolSettings getOutboundSettings() { return _outboundSettings; }
public void setOutboundSettings(TunnelPoolSettings settings) { _outboundSettings = settings; }
public void readFromProperties(Properties props) {
_numInbound = getInt(props.getProperty(PROP_NUM_INBOUND), DEFAULT_NUM_INBOUND);
_numOutbound = getInt(props.getProperty(PROP_NUM_OUTBOUND), DEFAULT_NUM_OUTBOUND);
_depthInbound = getInt(props.getProperty(PROP_DEPTH_INBOUND), DEFAULT_DEPTH_INBOUND);
_depthOutbound = getInt(props.getProperty(PROP_DEPTH_OUTBOUND), DEFAULT_DEPTH_OUTBOUND);
_msgsPerMinuteAvgInbound = getLong(props.getProperty(PROP_MSGS_AVG), DEFAULT_MSGS_AVG);
_bytesPerMinuteAvgInbound = getLong(props.getProperty(PROP_MSGS_PEAK), DEFAULT_BYTES_AVG);
_msgsPerMinutePeakInbound = getLong(props.getProperty(PROP_BYTES_AVG), DEFAULT_MSGS_PEAK);
_bytesPerMinutePeakInbound = getLong(props.getProperty(PROP_BYTES_PEAK), DEFAULT_BYTES_PEAK);
_includeDummyInbound = getBoolean(props.getProperty(PROP_DUMMY_INBOUND), DEFAULT_DUMMY_INBOUND);
_includeDummyOutbound = getBoolean(props.getProperty(PROP_DUMMY_OUTBOUND), DEFAULT_DUMMY_OUTBOUND);
_reorderInbound = getBoolean(props.getProperty(PROP_REORDER_INBOUND), DEFAULT_REORDER_INBOUND);
_reorderOutbound = getBoolean(props.getProperty(PROP_REORDER_OUTBOUND), DEFAULT_REORDER_OUTBOUND);
_inboundDuration = getLong(props.getProperty(PROP_DURATION), DEFAULT_DURATION);
_enforceStrictMinimumLength = getBoolean(props.getProperty(PROP_STRICT_MINIMUM_LENGTH), DEFAULT_STRICT_MINIMUM_LENGTH);
}
_inboundSettings.readFromProperties("inbound.", props);
_outboundSettings.readFromProperties("outbound.", props);
}
public void writeToProperties(Properties props) {
if (props == null) return;
props.setProperty(PROP_NUM_INBOUND, ""+_numInbound);
props.setProperty(PROP_NUM_OUTBOUND, ""+_numOutbound);
props.setProperty(PROP_DEPTH_INBOUND, ""+_depthInbound);
props.setProperty(PROP_DEPTH_OUTBOUND, ""+_depthOutbound);
props.setProperty(PROP_MSGS_AVG, ""+_msgsPerMinuteAvgInbound);
props.setProperty(PROP_MSGS_PEAK, ""+_msgsPerMinutePeakInbound);
props.setProperty(PROP_BYTES_AVG, ""+_bytesPerMinuteAvgInbound);
props.setProperty(PROP_BYTES_PEAK, ""+_bytesPerMinutePeakInbound);
props.setProperty(PROP_DUMMY_INBOUND, (_includeDummyInbound ? Boolean.TRUE.toString() : Boolean.FALSE.toString()));
props.setProperty(PROP_DUMMY_OUTBOUND, (_includeDummyOutbound ? Boolean.TRUE.toString() : Boolean.FALSE.toString()));
props.setProperty(PROP_REORDER_INBOUND, (_reorderInbound ? Boolean.TRUE.toString() : Boolean.FALSE.toString()));
props.setProperty(PROP_REORDER_OUTBOUND, (_reorderOutbound ? Boolean.TRUE.toString() : Boolean.FALSE.toString()));
props.setProperty(PROP_DURATION, ""+_inboundDuration);
props.setProperty(PROP_STRICT_MINIMUM_LENGTH, (_enforceStrictMinimumLength ? Boolean.TRUE.toString() : Boolean.FALSE.toString()));
if (props == null) return;
_inboundSettings.writeToProperties("inbound.", props);
_outboundSettings.writeToProperties("outbound.", props);
}
public String toString() {
StringBuffer buf = new StringBuffer();
Properties p = new Properties();
writeToProperties(p);
buf.append("Client tunnel settings:\n");
buf.append("====================================\n");
for (Iterator iter = p.keySet().iterator(); iter.hasNext(); ) {
String name = (String)iter.next();
String val = p.getProperty(name);
buf.append(name).append(" = [").append(val).append("]\n");
}
buf.append("====================================\n");
return buf.toString();
}
////
////
private static final boolean getBoolean(String str, boolean defaultValue) {
if (str == null) return defaultValue;
String s = str.toUpperCase();
boolean v = "TRUE".equals(s) || "YES".equals(s);
return v;
}
private static final int getInt(String str, int defaultValue) { return (int)getLong(str, defaultValue); }
private static final long getLong(String str, long defaultValue) {
if (str == null) return defaultValue;
try {
long val = Long.parseLong(str);
return val;
} catch (NumberFormatException nfe) {
return defaultValue;
}
StringBuffer buf = new StringBuffer();
Properties p = new Properties();
writeToProperties(p);
buf.append("Client tunnel settings:\n");
buf.append("====================================\n");
for (Iterator iter = p.keySet().iterator(); iter.hasNext(); ) {
String name = (String)iter.next();
String val = p.getProperty(name);
buf.append(name).append(" = [").append(val).append("]\n");
}
buf.append("====================================\n");
return buf.toString();
}
}

View File

@ -8,14 +8,22 @@ package net.i2p.router;
*
*/
import java.io.Writer;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import net.i2p.data.Hash;
import net.i2p.data.RouterIdentity;
import net.i2p.data.i2np.DeliveryStatusMessage;
import net.i2p.data.i2np.I2NPMessage;
import net.i2p.data.i2np.DatabaseSearchReplyMessage;
import net.i2p.data.i2np.TunnelCreateStatusMessage;
import net.i2p.data.i2np.TunnelDataMessage;
import net.i2p.data.i2np.TunnelGatewayMessage;
import net.i2p.util.I2PThread;
import net.i2p.util.Log;
/**
@ -24,20 +32,46 @@ import net.i2p.util.Log;
* periodically retrieve them for processing.
*
*/
public class InNetMessagePool {
public class InNetMessagePool implements Service {
private Log _log;
private RouterContext _context;
private List _messages;
private Map _handlerJobBuilders;
private List _pendingDataMessages;
private List _pendingDataMessagesFrom;
private List _pendingGatewayMessages;
private SharedShortCircuitDataJob _shortCircuitDataJob;
private SharedShortCircuitGatewayJob _shortCircuitGatewayJob;
private boolean _alive;
private boolean _dispatchThreaded;
/**
* If set to true, we will have two additional threads - one for dispatching
* tunnel data messages, and another for dispatching tunnel gateway messages.
* These will not use the JobQueue but will operate sequentially. Otherwise,
* if this is set to false, the messages will be queued up in the jobQueue,
* using the jobQueue's single thread.
*
*/
public static final String PROP_DISPATCH_THREADED = "router.dispatchThreaded";
public static final boolean DEFAULT_DISPATCH_THREADED = false;
public InNetMessagePool(RouterContext context) {
_context = context;
_messages = new ArrayList();
_handlerJobBuilders = new HashMap();
_pendingDataMessages = new ArrayList(16);
_pendingDataMessagesFrom = new ArrayList(16);
_pendingGatewayMessages = new ArrayList(16);
_shortCircuitDataJob = new SharedShortCircuitDataJob(context);
_shortCircuitGatewayJob = new SharedShortCircuitGatewayJob(context);
_log = _context.logManager().getLog(InNetMessagePool.class);
_alive = false;
_context.statManager().createRateStat("inNetPool.dropped", "How often do we drop a message", "InNetPool", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("inNetPool.droppedDeliveryStatusDelay", "How long after a delivery status message is created do we receive it back again (for messages that are too slow to be handled)", "InNetPool", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("inNetPool.duplicate", "How often do we receive a duplicate message", "InNetPool", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("inNetPool.droppedTunnelCreateStatusMessage", "How often we drop a slow-to-arrive tunnel request response", "InNetPool", new long[] { 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("inNetPool.droppedDbLookupResponseMessage", "How often we drop a slow-to-arrive db search response", "InNetPool", new long[] { 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("pool.dispatchDataTime", "How long a tunnel dispatch takes", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("pool.dispatchGatewayTime", "How long a tunnel gateway dispatch takes", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
}
public HandlerJobBuilder registerHandlerJobBuilder(int i2npMessageType, HandlerJobBuilder builder) {
@ -56,10 +90,8 @@ public class InNetMessagePool {
* (though if the builder doesn't create a job, it is added to the pool)
*
*/
public int add(InNetMessage msg) {
I2NPMessage messageBody = msg.getMessage();
msg.processingComplete();
Date exp = messageBody.getMessageExpiration();
public int add(I2NPMessage messageBody, RouterIdentity fromRouter, Hash fromRouterHash) {
long exp = messageBody.getMessageExpiration();
if (_log.shouldLog(Log.INFO))
_log.info("Received inbound "
@ -67,131 +99,298 @@ public class InNetMessagePool {
+ " expiring on " + exp
+ " of type " + messageBody.getClass().getName());
boolean valid = _context.messageValidator().validateMessage(messageBody.getUniqueId(), exp.getTime());
if (!valid) {
if (_log.shouldLog(Log.WARN))
_log.warn("Duplicate message received [" + messageBody.getUniqueId()
+ " expiring on " + exp + "]: " + messageBody.getClass().getName());
_context.statManager().addRateData("inNetPool.dropped", 1, 0);
_context.statManager().addRateData("inNetPool.duplicate", 1, 0);
_context.messageHistory().droppedOtherMessage(messageBody);
_context.messageHistory().messageProcessingError(messageBody.getUniqueId(),
messageBody.getClass().getName(),
"Duplicate/expired");
return -1;
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Message received [" + messageBody.getUniqueId()
+ " expiring on " + exp + "] is NOT a duplicate or exipired");
}
int size = -1;
int type = messageBody.getType();
HandlerJobBuilder builder = (HandlerJobBuilder)_handlerJobBuilders.get(new Integer(type));
if (_log.shouldLog(Log.DEBUG))
_log.debug("Add message to the inNetMessage pool - builder: " + builder
+ " message class: " + messageBody.getClass().getName());
if (builder != null) {
Job job = builder.createJob(messageBody, msg.getFromRouter(),
msg.getFromRouterHash());
if (job != null) {
_context.jobQueue().addJob(job);
synchronized (_messages) {
size = _messages.size();
}
}
}
List origMessages = _context.messageRegistry().getOriginalMessages(messageBody);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Original messages for inbound message: " + origMessages.size());
if (origMessages.size() > 1) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Orig: " + origMessages + " \nthe above are replies for: " + msg,
new Exception("Multiple matches"));
}
for (int i = 0; i < origMessages.size(); i++) {
OutNetMessage omsg = (OutNetMessage)origMessages.get(i);
ReplyJob job = omsg.getOnReplyJob();
if (_log.shouldLog(Log.DEBUG))
_log.debug("Original message [" + i + "] " + omsg.getReplySelector()
+ " : " + omsg + ": reply job: " + job);
if (job != null) {
job.setMessage(messageBody);
_context.jobQueue().addJob(job);
}
}
if (origMessages.size() <= 0) {
// not handled as a reply
if (size == -1) {
// was not handled via HandlerJobBuilder
if (messageBody instanceof TunnelDataMessage) {
// do not validate the message with the validator - the IV validator is sufficient
} else {
boolean valid = _context.messageValidator().validateMessage(messageBody.getUniqueId(), exp);
if (!valid) {
if (_log.shouldLog(Log.WARN))
_log.warn("Duplicate message received [" + messageBody.getUniqueId()
+ " expiring on " + exp + "]: " + messageBody.getClass().getName());
_context.statManager().addRateData("inNetPool.dropped", 1, 0);
_context.statManager().addRateData("inNetPool.duplicate", 1, 0);
_context.messageHistory().droppedOtherMessage(messageBody);
if (type == DeliveryStatusMessage.MESSAGE_TYPE) {
long timeSinceSent = _context.clock().now() -
((DeliveryStatusMessage)messageBody).getArrival().getTime();
if (_log.shouldLog(Log.INFO))
_log.info("Dropping unhandled delivery status message created " + timeSinceSent + "ms ago: " + msg);
_context.statManager().addRateData("inNetPool.droppedDeliveryStatusDelay", timeSinceSent, timeSinceSent);
} else {
if (_log.shouldLog(Log.WARN))
_log.warn("Message " + messageBody + " expiring on "
+ (messageBody != null ? (messageBody.getMessageExpiration()+"") : " [unknown]")
+ " was not handled by a HandlerJobBuilder - DROPPING: "
+ msg, new Exception("DROPPED MESSAGE"));
_context.statManager().addRateData("inNetPool.dropped", 1, 0);
}
_context.messageHistory().messageProcessingError(messageBody.getUniqueId(),
messageBody.getClass().getName(),
"Duplicate/expired");
return -1;
} else {
String mtype = messageBody.getClass().getName();
_context.messageHistory().receiveMessage(mtype, messageBody.getUniqueId(),
messageBody.getMessageExpiration(),
msg.getFromRouterHash(), true);
return size;
if (_log.shouldLog(Log.DEBUG))
_log.debug("Message received [" + messageBody.getUniqueId()
+ " expiring on " + exp + "] is NOT a duplicate or exipired");
}
}
boolean jobFound = false;
int type = messageBody.getType();
boolean allowMatches = true;
if (messageBody instanceof TunnelGatewayMessage) {
shortCircuitTunnelGateway(messageBody);
allowMatches = false;
} else if (messageBody instanceof TunnelDataMessage) {
shortCircuitTunnelData(messageBody, fromRouterHash);
allowMatches = false;
} else {
HandlerJobBuilder builder = (HandlerJobBuilder)_handlerJobBuilders.get(new Integer(type));
if (_log.shouldLog(Log.DEBUG))
_log.debug("Add message to the inNetMessage pool - builder: " + builder
+ " message class: " + messageBody.getClass().getName());
if (builder != null) {
Job job = builder.createJob(messageBody, fromRouter,
fromRouterHash);
if (job != null) {
_context.jobQueue().addJob(job);
jobFound = true;
}
}
}
if (allowMatches) {
List origMessages = _context.messageRegistry().getOriginalMessages(messageBody);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Original messages for inbound message: " + origMessages.size());
if (origMessages.size() > 1) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Orig: " + origMessages + " \nthe above are replies for: " + messageBody,
new Exception("Multiple matches"));
}
for (int i = 0; i < origMessages.size(); i++) {
OutNetMessage omsg = (OutNetMessage)origMessages.get(i);
ReplyJob job = omsg.getOnReplyJob();
if (_log.shouldLog(Log.DEBUG))
_log.debug("Original message [" + i + "] " + omsg.getReplySelector()
+ " : " + omsg + ": reply job: " + job);
if (job != null) {
job.setMessage(messageBody);
_context.jobQueue().addJob(job);
}
}
if (origMessages.size() <= 0) {
// not handled as a reply
if (!jobFound) {
// was not handled via HandlerJobBuilder
_context.messageHistory().droppedOtherMessage(messageBody);
if (type == DeliveryStatusMessage.MESSAGE_TYPE) {
long timeSinceSent = _context.clock().now() -
((DeliveryStatusMessage)messageBody).getArrival();
if (_log.shouldLog(Log.WARN))
_log.warn("Dropping unhandled delivery status message created " + timeSinceSent + "ms ago: " + messageBody);
_context.statManager().addRateData("inNetPool.droppedDeliveryStatusDelay", timeSinceSent, timeSinceSent);
} else if (type == TunnelCreateStatusMessage.MESSAGE_TYPE) {
if (_log.shouldLog(Log.INFO))
_log.info("Dropping slow tunnel create request response: " + messageBody);
_context.statManager().addRateData("inNetPool.droppedTunnelCreateStatusMessage", 1, 0);
} else if (type == DatabaseSearchReplyMessage.MESSAGE_TYPE) {
if (_log.shouldLog(Log.INFO))
_log.info("Dropping slow db lookup response: " + messageBody);
_context.statManager().addRateData("inNetPool.droppedDbLookupResponseMessage", 1, 0);
} else {
if (_log.shouldLog(Log.ERROR))
_log.error("Message " + messageBody + " expiring on "
+ (messageBody != null ? (messageBody.getMessageExpiration()+"") : " [unknown]")
+ " was not handled by a HandlerJobBuilder - DROPPING: "
+ messageBody, new Exception("DROPPED MESSAGE"));
_context.statManager().addRateData("inNetPool.dropped", 1, 0);
}
} else {
String mtype = messageBody.getClass().getName();
_context.messageHistory().receiveMessage(mtype, messageBody.getUniqueId(),
messageBody.getMessageExpiration(),
fromRouterHash, true);
return 0; // no queue
}
}
}
String mtype = messageBody.getClass().getName();
_context.messageHistory().receiveMessage(mtype, messageBody.getUniqueId(),
messageBody.getMessageExpiration(),
msg.getFromRouterHash(), true);
return size;
fromRouterHash, true);
return 0; // no queue
}
/**
* Remove up to maxNumMessages InNetMessages from the pool and return them.
*
*/
public List getNext(int maxNumMessages) {
ArrayList msgs = new ArrayList(maxNumMessages);
synchronized (_messages) {
for (int i = 0; (i < maxNumMessages) && (_messages.size() > 0); i++)
msgs.add(_messages.remove(0));
// the following short circuits the tunnel dispatching - i'm not sure whether
// we'll want to run the dispatching in jobs or whether it shuold go inline with
// others and/or on other threads (e.g. transport threads). lets try 'em both.
private void shortCircuitTunnelGateway(I2NPMessage messageBody) {
if (false) {
doShortCircuitTunnelGateway(messageBody);
} else {
synchronized (_pendingGatewayMessages) {
_pendingGatewayMessages.add(messageBody);
_pendingGatewayMessages.notifyAll();
}
if (!_dispatchThreaded)
_context.jobQueue().addJob(_shortCircuitGatewayJob);
}
return msgs;
}
private void doShortCircuitTunnelGateway(I2NPMessage messageBody) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Shortcut dispatch tunnelGateway message " + messageBody);
long before = _context.clock().now();
_context.tunnelDispatcher().dispatch((TunnelGatewayMessage)messageBody);
long dispatchTime = _context.clock().now() - before;
_context.statManager().addRateData("tunnel.dispatchGatewayTime", dispatchTime, dispatchTime);
}
/**
* Retrieve the next message
*
*/
public InNetMessage getNext() {
synchronized (_messages) {
if (_messages.size() <= 0) return null;
return (InNetMessage)_messages.remove(0);
private void shortCircuitTunnelData(I2NPMessage messageBody, Hash from) {
if (false) {
doShortCircuitTunnelData(messageBody, from);
} else {
synchronized (_pendingDataMessages) {
_pendingDataMessages.add(messageBody);
_pendingDataMessagesFrom.add(from);
_pendingDataMessages.notifyAll();
//_context.jobQueue().addJob(new ShortCircuitDataJob(_context, messageBody, from));
}
if (!_dispatchThreaded)
_context.jobQueue().addJob(_shortCircuitDataJob);
}
}
private void doShortCircuitTunnelData(I2NPMessage messageBody, Hash from) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Shortcut dispatch tunnelData message " + messageBody);
_context.tunnelDispatcher().dispatch((TunnelDataMessage)messageBody, from);
}
public void renderStatusHTML(Writer out) {}
public void restart() {
shutdown();
try { Thread.sleep(100); } catch (InterruptedException ie) {}
startup();
}
public void shutdown() {
_alive = false;
synchronized (_pendingDataMessages) {
_pendingDataMessages.clear();
_pendingDataMessagesFrom.clear();
_pendingDataMessages.notifyAll();
}
}
/**
* Retrieve the size of the pool
*
*/
public int getCount() {
synchronized (_messages) {
return _messages.size();
public void startup() {
_alive = true;
_dispatchThreaded = DEFAULT_DISPATCH_THREADED;
String threadedStr = _context.getProperty(PROP_DISPATCH_THREADED);
if (threadedStr != null) {
_dispatchThreaded = Boolean.valueOf(threadedStr).booleanValue();
}
if (_dispatchThreaded) {
I2PThread data = new I2PThread(new TunnelDataDispatcher(), "Tunnel data dispatcher");
data.setDaemon(true);
data.start();
I2PThread gw = new I2PThread(new TunnelGatewayDispatcher(), "Tunnel gateway dispatcher");
gw.setDaemon(true);
gw.start();
}
}
private class SharedShortCircuitDataJob extends JobImpl {
public SharedShortCircuitDataJob(RouterContext ctx) {
super(ctx);
}
public String getName() { return "Dispatch tunnel participant message"; }
public void runJob() {
int remaining = 0;
I2NPMessage msg = null;
Hash from = null;
synchronized (_pendingDataMessages) {
if (_pendingDataMessages.size() > 0) {
msg = (I2NPMessage)_pendingDataMessages.remove(0);
from = (Hash)_pendingDataMessagesFrom.remove(0);
}
remaining = _pendingDataMessages.size();
}
if (msg != null)
doShortCircuitTunnelData(msg, from);
if (remaining > 0)
getContext().jobQueue().addJob(SharedShortCircuitDataJob.this);
}
}
private class SharedShortCircuitGatewayJob extends JobImpl {
public SharedShortCircuitGatewayJob(RouterContext ctx) {
super(ctx);
}
public String getName() { return "Dispatch tunnel gateway message"; }
public void runJob() {
I2NPMessage msg = null;
int remaining = 0;
synchronized (_pendingGatewayMessages) {
if (_pendingGatewayMessages.size() > 0)
msg = (I2NPMessage)_pendingGatewayMessages.remove(0);
remaining = _pendingGatewayMessages.size();
}
if (msg != null)
doShortCircuitTunnelGateway(msg);
if (remaining > 0)
getContext().jobQueue().addJob(SharedShortCircuitGatewayJob.this);
}
}
private class TunnelGatewayDispatcher implements Runnable {
public void run() {
while (_alive) {
I2NPMessage msg = null;
try {
synchronized (_pendingGatewayMessages) {
if (_pendingGatewayMessages.size() <= 0)
_pendingGatewayMessages.wait();
else
msg = (I2NPMessage)_pendingGatewayMessages.remove(0);
}
if (msg != null) {
long before = _context.clock().now();
doShortCircuitTunnelGateway(msg);
long elapsed = _context.clock().now() - before;
_context.statManager().addRateData("pool.dispatchGatewayTime", elapsed, elapsed);
}
} catch (InterruptedException ie) {
} catch (OutOfMemoryError oome) {
throw oome;
} catch (Exception e) {
if (_log.shouldLog(Log.CRIT))
_log.log(Log.CRIT, "Error in the tunnel gateway dispatcher", e);
}
}
}
}
private class TunnelDataDispatcher implements Runnable {
public void run() {
while (_alive) {
I2NPMessage msg = null;
Hash from = null;
try {
synchronized (_pendingDataMessages) {
if (_pendingDataMessages.size() <= 0) {
_pendingDataMessages.wait();
} else {
msg = (I2NPMessage)_pendingDataMessages.remove(0);
from = (Hash)_pendingDataMessagesFrom.remove(0);
}
}
if (msg != null) {
long before = _context.clock().now();
doShortCircuitTunnelData(msg, from);
long elapsed = _context.clock().now() - before;
_context.statManager().addRateData("pool.dispatchDataTime", elapsed, elapsed);
}
} catch (InterruptedException ie) {
} catch (OutOfMemoryError oome) {
throw oome;
} catch (Exception e) {
if (_log.shouldLog(Log.CRIT))
_log.log(Log.CRIT, "Error in the tunnel data dispatcher", e);
}
}
}
}
}

View File

@ -12,15 +12,13 @@ import java.io.IOException;
import java.io.Writer;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.Iterator;
import java.util.SortedMap;
import java.util.TreeMap;
import net.i2p.data.DataHelper;
import net.i2p.router.networkdb.HandleDatabaseLookupMessageJob;
import net.i2p.router.tunnelmanager.HandleTunnelCreateMessageJob;
import net.i2p.router.tunnelmanager.RequestTunnelJob;
import net.i2p.util.Clock;
import net.i2p.util.I2PThread;
import net.i2p.util.Log;
@ -177,6 +175,13 @@ public class JobQueue {
return;
}
public void removeJob(Job job) {
synchronized (_jobLock) {
_readyJobs.remove(job);
_timedJobs.remove(job);
}
}
public void timingUpdated() {
synchronized (_jobLock) {
_jobLock.notifyAll();
@ -217,13 +222,6 @@ public class JobQueue {
if (cls == HandleDatabaseLookupMessageJob.class)
return true;
// tunnels are a bitch, but its dropped() builds a pair of fake ones just in case
if (cls == RequestTunnelJob.class)
return true;
// if we're already this loaded, dont take more tunnels
if (cls == HandleTunnelCreateMessageJob.class)
return true;
}
return false;
}
@ -624,7 +622,9 @@ public class JobQueue {
buf.append("# ready/waiting jobs: ").append(readyJobs.size()).append(" <i>(lots of these mean there's likely a big problem)</i><ol>\n");
for (int i = 0; i < readyJobs.size(); i++) {
Job j = (Job)readyJobs.get(i);
buf.append("<li> [waiting ").append(now-j.getTiming().getStartAfter()).append("ms]: ");
buf.append("<li> [waiting ");
buf.append(DataHelper.formatDuration(now-j.getTiming().getStartAfter()));
buf.append("]: ");
buf.append(j.toString()).append("</li>\n");
}
buf.append("</ol>\n");
@ -638,8 +638,9 @@ public class JobQueue {
}
for (Iterator iter = ordered.values().iterator(); iter.hasNext(); ) {
Job j = (Job)iter.next();
buf.append("<li>").append(j.getName()).append(" @ ");
buf.append(new Date(j.getTiming().getStartAfter())).append("</li>\n");
long time = j.getTiming().getStartAfter() - now;
buf.append("<li>").append(j.getName()).append(" in ");
buf.append(DataHelper.formatDuration(time)).append("</li>\n");
}
buf.append("</ol>\n");

View File

@ -20,6 +20,7 @@ import java.util.Set;
import net.i2p.data.DataFormatException;
import net.i2p.data.DataStructure;
import net.i2p.data.Destination;
import net.i2p.data.Hash;
import net.i2p.data.PrivateKey;
import net.i2p.data.PublicKey;
import net.i2p.data.SigningPrivateKey;
@ -97,7 +98,7 @@ public class KeyManager {
_log.info("Registering keys for destination " + dest.calculateHash().toBase64());
LeaseSetKeys keys = new LeaseSetKeys(dest, leaseRevocationPrivateKey, endpointDecryptionKey);
synchronized (_leaseSetKeys) {
_leaseSetKeys.put(dest, keys);
_leaseSetKeys.put(dest.calculateHash(), keys);
}
if (dest != null)
queueWrite();
@ -116,7 +117,7 @@ public class KeyManager {
_log.info("Unregistering keys for destination " + dest.calculateHash().toBase64());
LeaseSetKeys rv = null;
synchronized (_leaseSetKeys) {
rv = (LeaseSetKeys)_leaseSetKeys.remove(dest);
rv = (LeaseSetKeys)_leaseSetKeys.remove(dest.calculateHash());
}
if (dest != null)
queueWrite();
@ -124,6 +125,9 @@ public class KeyManager {
}
public LeaseSetKeys getKeys(Destination dest) {
return getKeys(dest.calculateHash());
}
public LeaseSetKeys getKeys(Hash dest) {
synchronized (_leaseSetKeys) {
return (LeaseSetKeys)_leaseSetKeys.get(dest);
}

View File

@ -51,7 +51,7 @@ public class MessageHistory {
_fmt.setTimeZone(TimeZone.getTimeZone("GMT"));
_reinitializeJob = new ReinitializeJob();
_writeJob = new WriteJob();
_submitMessageHistoryJob = new SubmitMessageHistoryJob(_context);
//_submitMessageHistoryJob = new SubmitMessageHistoryJob(_context);
initialize(true);
}
@ -103,8 +103,8 @@ public class MessageHistory {
updateSettings();
addEntry(getPrefix() + "** Router initialized (started up or changed identities)");
_context.jobQueue().addJob(_writeJob);
_submitMessageHistoryJob.getTiming().setStartAfter(_context.clock().now() + 2*60*1000);
_context.jobQueue().addJob(_submitMessageHistoryJob);
//_submitMessageHistoryJob.getTiming().setStartAfter(_context.clock().now() + 2*60*1000);
//_context.jobQueue().addJob(_submitMessageHistoryJob);
}
}
@ -163,7 +163,7 @@ public class MessageHistory {
buf.append("receive tunnel create [").append(createTunnel.getTunnelId()).append("] ");
if (nextPeer != null)
buf.append("(next [").append(getName(nextPeer)).append("]) ");
buf.append("ok? ").append(ok).append(" expiring on [").append(getTime(expire)).append("]");
buf.append("ok? ").append(ok).append(" expiring on [").append(getTime(expire.getTime())).append("]");
addEntry(buf.toString());
}
@ -178,17 +178,7 @@ public class MessageHistory {
if (tunnel == null) return;
StringBuffer buf = new StringBuffer(128);
buf.append(getPrefix());
buf.append("joining tunnel [").append(tunnel.getTunnelId().getTunnelId()).append("] as [").append(state).append("] ");
buf.append(" (next: ");
TunnelInfo cur = tunnel;
while (cur.getNextHopInfo() != null) {
buf.append('[').append(getName(cur.getNextHopInfo().getThisHop()));
buf.append("], ");
cur = cur.getNextHopInfo();
}
if (cur.getNextHop() != null)
buf.append('[').append(getName(cur.getNextHop())).append(']');
buf.append(") expiring on [").append(getTime(new Date(tunnel.getSettings().getExpiration()))).append("]");
buf.append("joining tunnel [").append(tunnel.getReceiveTunnelId(0).getTunnelId()).append("] as [").append(state).append("] ");
addEntry(buf.toString());
}
@ -218,19 +208,7 @@ public class MessageHistory {
if (tunnel == null) return;
StringBuffer buf = new StringBuffer(128);
buf.append(getPrefix());
buf.append("tunnel ").append(tunnel.getTunnelId().getTunnelId()).append(" tested ok after ").append(timeToTest).append("ms (containing ");
TunnelInfo cur = tunnel;
while (cur != null) {
buf.append('[').append(getName(cur.getThisHop())).append("], ");
if (cur.getNextHopInfo() != null) {
cur = cur.getNextHopInfo();
} else {
if (cur.getNextHop() != null)
buf.append('[').append(getName(cur.getNextHop())).append(']');
cur = null;
}
}
buf.append(')');
buf.append("tunnel ").append(tunnel).append(" tested ok after ").append(timeToTest).append("ms");
addEntry(buf.toString());
}
@ -278,7 +256,7 @@ public class MessageHistory {
buf.append(getPrefix());
buf.append("dropped message ").append(msgId).append(" for unknown tunnel [").append(id.getTunnelId());
buf.append("] from [").append(getName(from)).append("]").append(" expiring on ");
buf.append(getTime(expiration));
buf.append(getTime(expiration.getTime()));
addEntry(buf.toString());
}
@ -308,7 +286,7 @@ public class MessageHistory {
buf.append("timed out waiting for a reply to [").append(sentMessage.getMessageType());
buf.append("] [").append(sentMessage.getMessageId()).append("] expiring on [");
if (sentMessage != null)
buf.append(getTime(new Date(sentMessage.getReplySelector().getExpiration())));
buf.append(getTime(sentMessage.getReplySelector().getExpiration()));
buf.append("] ").append(sentMessage.getReplySelector().toString());
addEntry(buf.toString());
}
@ -338,8 +316,9 @@ public class MessageHistory {
* @param peer router that the message was sent to
* @param sentOk whether the message was sent successfully
*/
public void sendMessage(String messageType, long messageId, Date expiration, Hash peer, boolean sentOk) {
public void sendMessage(String messageType, long messageId, long expiration, Hash peer, boolean sentOk) {
if (!_doLog) return;
if (true) return;
StringBuffer buf = new StringBuffer(256);
buf.append(getPrefix());
buf.append("send [").append(messageType).append("] message [").append(messageId).append("] ");
@ -363,8 +342,9 @@ public class MessageHistory {
* @param isValid whether the message is valid (non duplicates, etc)
*
*/
public void receiveMessage(String messageType, long messageId, Date expiration, Hash from, boolean isValid) {
public void receiveMessage(String messageType, long messageId, long expiration, Hash from, boolean isValid) {
if (!_doLog) return;
if (true) return;
StringBuffer buf = new StringBuffer(256);
buf.append(getPrefix());
buf.append("receive [").append(messageType).append("] with id [").append(messageId).append("] ");
@ -376,7 +356,7 @@ public class MessageHistory {
//_log.warn("ReceiveMessage tunnel message ["+messageId+"]", new Exception("Receive tunnel"));
}
}
public void receiveMessage(String messageType, long messageId, Date expiration, boolean isValid) {
public void receiveMessage(String messageType, long messageId, long expiration, boolean isValid) {
receiveMessage(messageType, messageId, expiration, null, isValid);
}
@ -424,6 +404,55 @@ public class MessageHistory {
addEntry(buf.toString());
}
public void receiveTunnelFragment(long messageId, int fragmentId) {
if (!_doLog) return;
if (messageId == -1) throw new IllegalArgumentException("why are you -1?");
StringBuffer buf = new StringBuffer(48);
buf.append(getPrefix());
buf.append("Receive fragment ").append(fragmentId).append(" in ").append(messageId);
addEntry(buf.toString());
}
public void receiveTunnelFragmentComplete(long messageId) {
if (!_doLog) return;
if (messageId == -1) throw new IllegalArgumentException("why are you -1?");
StringBuffer buf = new StringBuffer(48);
buf.append(getPrefix());
buf.append("Receive fragmented message completely: ").append(messageId);
addEntry(buf.toString());
}
public void droppedFragmentedMessage(long messageId) {
if (!_doLog) return;
if (messageId == -1) throw new IllegalArgumentException("why are you -1?");
StringBuffer buf = new StringBuffer(48);
buf.append(getPrefix());
buf.append("Fragmented message dropped: ").append(messageId);
addEntry(buf.toString());
}
public void fragmentMessage(long messageId, int numFragments) {
if (!_doLog) return;
if (messageId == -1) throw new IllegalArgumentException("why are you -1?");
StringBuffer buf = new StringBuffer(48);
buf.append(getPrefix());
buf.append("Break message ").append(messageId).append(" into fragments: ").append(numFragments);
addEntry(buf.toString());
}
public void droppedTunnelDataMessageUnknown(long msgId, long tunnelId) {
if (!_doLog) return;
if (msgId == -1) throw new IllegalArgumentException("why are you -1?");
StringBuffer buf = new StringBuffer(48);
buf.append(getPrefix());
buf.append("Dropped data message ").append(msgId).append(" for unknown tunnel ").append(tunnelId);
addEntry(buf.toString());
}
public void droppedTunnelGatewayMessageUnknown(long msgId, long tunnelId) {
if (!_doLog) return;
if (msgId == -1) throw new IllegalArgumentException("why are you -1?");
StringBuffer buf = new StringBuffer(48);
buf.append(getPrefix());
buf.append("Dropped gateway message ").append(msgId).append(" for unknown tunnel ").append(tunnelId);
addEntry(buf.toString());
}
/**
* Prettify the hash by doing a base64 and returning the first 6 characters
*
@ -437,14 +466,14 @@ public class MessageHistory {
private final String getPrefix() {
StringBuffer buf = new StringBuffer(48);
buf.append(getTime(new Date(_context.clock().now())));
buf.append(getTime(_context.clock().now()));
buf.append(' ').append(_localIdent).append(": ");
return buf.toString();
}
private final String getTime(Date when) {
private final String getTime(long when) {
synchronized (_fmt) {
return _fmt.format(when);
return _fmt.format(new Date(when));
}
}

View File

@ -5,6 +5,7 @@ import java.util.Iterator;
import java.util.Set;
import java.util.TreeMap;
import net.i2p.util.DecayingBloomFilter;
import net.i2p.util.Log;
/**
@ -18,25 +19,15 @@ import net.i2p.util.Log;
public class MessageValidator {
private Log _log;
private RouterContext _context;
/**
* Expiration date (as a Long) to message id (as a Long).
* The expiration date (key) must be unique, so on collision, increment the value.
* This keeps messageIds around longer than they need to be, but hopefully not by much ;)
*
*/
private TreeMap _receivedIdExpirations;
/** Message id (as a Long) */
private Set _receivedIds;
/** synchronize on this before adjusting the received id data */
private Object _receivedIdLock;
private DecayingBloomFilter _filter;
public MessageValidator(RouterContext context) {
_log = context.logManager().getLog(MessageValidator.class);
_receivedIdExpirations = new TreeMap();
_receivedIds = new HashSet(256);
_receivedIdLock = new Object();
_filter = null;
_context = context;
context.statManager().createRateStat("router.duplicateMessageId", "Note that a duplicate messageId was received", "Router",
new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
}
@ -51,12 +42,17 @@ public class MessageValidator {
if (_log.shouldLog(Log.WARN))
_log.warn("Rejecting message " + messageId + " because it expired " + (now-expiration) + "ms ago");
return false;
} else if (now + 4*Router.CLOCK_FUDGE_FACTOR < expiration) {
if (_log.shouldLog(Log.WARN))
_log.warn("Rejecting message " + messageId + " because it will expire too far in the future (" + (expiration-now) + "ms)");
return false;
}
boolean isDuplicate = noteReception(messageId, expiration);
if (isDuplicate) {
if (_log.shouldLog(Log.WARN))
_log.warn("Rejecting message " + messageId + " because it is a duplicate", new Exception("Duplicate origin"));
_context.statManager().addRateData("router.duplicateMessageId", 1, 0);
return false;
} else {
if (_log.shouldLog(Log.DEBUG))
@ -74,75 +70,15 @@ public class MessageValidator {
* @return true if we HAVE already seen this message, false if not
*/
private boolean noteReception(long messageId, long messageExpiration) {
Long id = new Long(messageId);
synchronized (_receivedIdLock) {
locked_cleanReceivedIds(_context.clock().now() - Router.CLOCK_FUDGE_FACTOR);
if (_receivedIds.contains(id)) {
return true;
} else {
long date = messageExpiration;
while (_receivedIdExpirations.containsKey(new Long(date)))
date++;
_receivedIdExpirations.put(new Long(date), id);
_receivedIds.add(id);
return false;
}
}
boolean dup = _filter.add(messageId);
return dup;
}
/**
* Clean the ids that we no longer need to keep track of to prevent replay
* attacks.
*
*/
private void cleanReceivedIds() {
long now = _context.clock().now() - Router.CLOCK_FUDGE_FACTOR ;
synchronized (_receivedIdLock) {
locked_cleanReceivedIds(now);
}
}
/**
* Clean the ids that we no longer need to keep track of to prevent replay
* attacks - only call this from within a block synchronized on the received ID lock.
*
*/
private void locked_cleanReceivedIds(long now) {
Set toRemoveIds = null;
Set toRemoveDates = null;
for (Iterator iter = _receivedIdExpirations.keySet().iterator(); iter.hasNext(); ) {
Long date = (Long)iter.next();
if (date.longValue() <= now) {
// no need to keep track of things in the past
if (toRemoveIds == null) {
toRemoveIds = new HashSet(2);
toRemoveDates = new HashSet(2);
}
toRemoveDates.add(date);
toRemoveIds.add(_receivedIdExpirations.get(date));
} else {
// the expiration is in the future, we still need to keep track of
// it to prevent replays
break;
}
}
if (toRemoveIds != null) {
for (Iterator iter = toRemoveDates.iterator(); iter.hasNext(); )
_receivedIdExpirations.remove(iter.next());
for (Iterator iter = toRemoveIds.iterator(); iter.hasNext(); )
_receivedIds.remove(iter.next());
if (_log.shouldLog(Log.INFO))
_log.info("Cleaned out " + toRemoveDates.size()
+ " expired messageIds, leaving "
+ _receivedIds.size() + " remaining");
}
public void startup() {
_filter = new DecayingBloomFilter(_context, (int)Router.CLOCK_FUDGE_FACTOR * 2, 8);
}
void shutdown() {
if (_log.shouldLog(Log.WARN)) {
StringBuffer buf = new StringBuffer(1024);
buf.append("Validated messages: ").append(_receivedIds.size());
_log.log(Log.WARN, buf.toString());
}
_filter.stopDecaying();
}
}

View File

@ -11,10 +11,11 @@ package net.i2p.router;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
@ -72,17 +73,15 @@ public class OutNetMessage {
setOnReplyJob(null);
setOnFailedReplyJob(null);
setReplySelector(null);
_timestamps = new HashMap(8);
_timestampOrder = new LinkedList();
_failedTransports = new HashSet();
_failedTransports = null;
_sendBegin = 0;
_createdBy = new Exception("Created by");
//_createdBy = new Exception("Created by");
_created = context.clock().now();
timestamp("Created");
_context.messageStateMonitor().outboundMessageAdded();
_context.statManager().createRateStat("outNetMessage.timeToDiscard",
"How long until we discard an outbound msg?",
"OutNetMessage", new long[] { 5*60*1000, 30*60*1000, 60*60*1000 });
//_context.messageStateMonitor().outboundMessageAdded();
//_context.statManager().createRateStat("outNetMessage.timeToDiscard",
// "How long until we discard an outbound msg?",
// "OutNetMessage", new long[] { 5*60*1000, 30*60*1000, 60*60*1000 });
}
/**
@ -92,24 +91,43 @@ public class OutNetMessage {
* @return how long this message has been 'in flight'
*/
public long timestamp(String eventName) {
synchronized (_timestamps) {
long now = _context.clock().now();
while (_timestamps.containsKey(eventName)) {
eventName = eventName + '.';
long now = _context.clock().now();
if (_log.shouldLog(Log.DEBUG)) {
// only timestamp if we are debugging
synchronized (this) {
locked_initTimestamps();
while (_timestamps.containsKey(eventName)) {
eventName = eventName + '.';
}
_timestamps.put(eventName, new Long(now));
_timestampOrder.add(eventName);
}
_timestamps.put(eventName, new Long(now));
_timestampOrder.add(eventName);
return now - _created;
}
return now - _created;
}
public Map getTimestamps() {
synchronized (_timestamps) {
return (Map)_timestamps.clone();
if (_log.shouldLog(Log.DEBUG)) {
synchronized (this) {
locked_initTimestamps();
return (Map)_timestamps.clone();
}
}
return Collections.EMPTY_MAP;
}
public Long getTimestamp(String eventName) {
synchronized (_timestamps) {
return (Long)_timestamps.get(eventName);
if (_log.shouldLog(Log.DEBUG)) {
synchronized (this) {
locked_initTimestamps();
return (Long)_timestamps.get(eventName);
}
}
return ZERO;
}
private static final Long ZERO = new Long(0);
private void locked_initTimestamps() {
if (_timestamps == null) {
_timestamps = new HashMap(8);
_timestampOrder = new ArrayList(8);
}
}
@ -204,8 +222,15 @@ public class OutNetMessage {
public MessageSelector getReplySelector() { return _replySelector; }
public void setReplySelector(MessageSelector selector) { _replySelector = selector; }
public void transportFailed(String transportStyle) { _failedTransports.add(transportStyle); }
public Set getFailedTransports() { return new HashSet(_failedTransports); }
public void transportFailed(String transportStyle) {
if (_failedTransports == null)
_failedTransports = new HashSet(1);
_failedTransports.add(transportStyle);
}
/** not thread safe - dont fail transports and iterate over this at the same time */
public Set getFailedTransports() {
return (_failedTransports == null ? Collections.EMPTY_SET : _failedTransports);
}
/** when did the sending process begin */
public long getSendBegin() { return _sendBegin; }
@ -224,10 +249,11 @@ public class OutNetMessage {
_log.debug("Discard " + _messageSize + "byte " + _messageType + " message after "
+ timeToDiscard);
_message = null;
_context.statManager().addRateData("outNetMessage.timeToDiscard", timeToDiscard, timeToDiscard);
_context.messageStateMonitor().outboundMessageDiscarded();
//_context.statManager().addRateData("outNetMessage.timeToDiscard", timeToDiscard, timeToDiscard);
//_context.messageStateMonitor().outboundMessageDiscarded();
}
/*
public void finalize() throws Throwable {
if (_message != null) {
if (_log.shouldLog(Log.WARN)) {
@ -245,7 +271,7 @@ public class OutNetMessage {
_context.messageStateMonitor().outboundMessageFinalized();
super.finalize();
}
*/
public String toString() {
StringBuffer buf = new StringBuffer(128);
buf.append("[OutNetMessage contains ");
@ -256,7 +282,8 @@ public class OutNetMessage {
buf.append(_message.getClass().getName());
}
buf.append(" expiring on ").append(new Date(_expiration));
buf.append(" failed delivery on transports ").append(_failedTransports);
if (_failedTransports != null)
buf.append(" failed delivery on transports ").append(_failedTransports);
if (_target == null)
buf.append(" targetting no one in particular...");
else
@ -277,25 +304,27 @@ public class OutNetMessage {
}
private void renderTimestamps(StringBuffer buf) {
synchronized (_timestamps) {
long lastWhen = -1;
for (int i = 0; i < _timestampOrder.size(); i++) {
String name = (String)_timestampOrder.get(i);
Long when = (Long)_timestamps.get(name);
buf.append("\t[");
long diff = when.longValue() - lastWhen;
if ( (lastWhen > 0) && (diff > 500) )
buf.append("**");
if (lastWhen > 0)
buf.append(diff);
else
buf.append(0);
buf.append("ms: \t").append(name);
buf.append('=').append(formatDate(when.longValue()));
buf.append("]\n");
lastWhen = when.longValue();
if (_log.shouldLog(Log.DEBUG)) {
synchronized (this) {
long lastWhen = -1;
for (int i = 0; i < _timestampOrder.size(); i++) {
String name = (String)_timestampOrder.get(i);
Long when = (Long)_timestamps.get(name);
buf.append("\t[");
long diff = when.longValue() - lastWhen;
if ( (lastWhen > 0) && (diff > 500) )
buf.append("**");
if (lastWhen > 0)
buf.append(diff);
else
buf.append(0);
buf.append("ms: \t").append(name);
buf.append('=').append(formatDate(when.longValue()));
buf.append("]\n");
lastWhen = when.longValue();
}
}
}
}
}
private final static SimpleDateFormat _fmt = new SimpleDateFormat("HH:mm:ss.SSS");

View File

@ -49,10 +49,10 @@ public interface ProfileManager {
*
* @param peer who rejected us
* @param responseTimeMs how long it took to get the rejection
* @param explicit true if the tunnel request was explicitly rejected, false
* if we just didn't get a reply back in time.
* @param severity how much the peer doesnt want to participate in the
* tunnel (large == more severe)
*/
void tunnelRejected(Hash peer, long responseTimeMs, boolean explicit);
void tunnelRejected(Hash peer, long responseTimeMs, int severity);
/**
* Note that a tunnel that the router is participating in

View File

@ -32,9 +32,9 @@ import net.i2p.data.DataHelper;
import net.i2p.data.RouterInfo;
import net.i2p.data.SigningPrivateKey;
import net.i2p.data.i2np.GarlicMessage;
import net.i2p.data.i2np.TunnelMessage;
//import net.i2p.data.i2np.TunnelMessage;
import net.i2p.router.message.GarlicMessageHandler;
import net.i2p.router.message.TunnelMessageHandler;
//import net.i2p.router.message.TunnelMessageHandler;
import net.i2p.router.startup.StartupJob;
import net.i2p.stat.Rate;
import net.i2p.stat.RateStat;
@ -67,6 +67,9 @@ public class Router {
/** let clocks be off by 1 minute */
public final static long CLOCK_FUDGE_FACTOR = 1*60*1000;
/** used to differentiate routerInfo files on different networks */
public static final int NETWORK_ID = 1;
public final static String PROP_INFO_FILENAME = "router.info.location";
public final static String PROP_INFO_FILENAME_DEFAULT = "router.info";
@ -153,11 +156,33 @@ public class Router {
public String getConfigFilename() { return _configFilename; }
public void setConfigFilename(String filename) { _configFilename = filename; }
public String getConfigSetting(String name) { return _config.getProperty(name); }
public void setConfigSetting(String name, String value) { _config.setProperty(name, value); }
public void removeConfigSetting(String name) { _config.remove(name); }
public Set getConfigSettings() { return new HashSet(_config.keySet()); }
public Properties getConfigMap() { return _config; }
public String getConfigSetting(String name) {
synchronized (_config) {
return _config.getProperty(name);
}
}
public void setConfigSetting(String name, String value) {
synchronized (_config) {
_config.setProperty(name, value);
}
}
public void removeConfigSetting(String name) {
synchronized (_config) {
_config.remove(name);
}
}
public Set getConfigSettings() {
synchronized (_config) {
return new HashSet(_config.keySet());
}
}
public Properties getConfigMap() {
Properties rv = new Properties();
synchronized (_config) {
rv.putAll(_config);
}
return rv;
}
public RouterInfo getRouterInfo() { return _routerInfo; }
public void setRouterInfo(RouterInfo info) {
@ -191,6 +216,9 @@ public class Router {
readConfig();
setupHandlers();
_context.messageValidator().startup();
_context.tunnelDispatcher().startup();
_context.inNetMessagePool().startup();
startupQueue();
_context.jobQueue().addJob(new CoalesceStatsJob());
_context.jobQueue().addJob(new UpdateRoutingKeyModifierJob());
@ -234,7 +262,7 @@ public class Router {
}
public boolean isAlive() { return _isAlive; }
/**
* Rebuild and republish our routerInfo since something significant
* has changed.
@ -252,6 +280,7 @@ public class Router {
try {
ri.setPublished(_context.clock().now());
Properties stats = _context.statPublisher().publishStatistics();
stats.setProperty(RouterInfo.PROP_NETWORK_ID, NETWORK_ID+"");
ri.setOptions(stats);
ri.setAddresses(_context.commSystem().createAddresses());
SigningPrivateKey key = _context.keyManager().getSigningPrivateKey();
@ -302,7 +331,7 @@ public class Router {
}
System.out.println("INFO: Restarting the router after removing any old identity files");
// hard and ugly
System.exit(EXIT_GRACEFUL_RESTART);
System.exit(EXIT_HARD_RESTART);
}
/**
@ -399,7 +428,7 @@ public class Router {
private void setupHandlers() {
_context.inNetMessagePool().registerHandlerJobBuilder(GarlicMessage.MESSAGE_TYPE, new GarlicMessageHandler(_context));
_context.inNetMessagePool().registerHandlerJobBuilder(TunnelMessage.MESSAGE_TYPE, new TunnelMessageHandler(_context));
//_context.inNetMessagePool().registerHandlerJobBuilder(TunnelMessage.MESSAGE_TYPE, new TunnelMessageHandler(_context));
}
public void renderStatusHTML(Writer out) throws IOException {
@ -687,11 +716,13 @@ public class Router {
try { _context.statPublisher().shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the stats manager", t); }
try { _context.clientManager().shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the client manager", t); }
try { _context.tunnelManager().shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the tunnel manager", t); }
try { _context.tunnelDispatcher().shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the tunnel dispatcher", t); }
try { _context.netDb().shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the networkDb", t); }
try { _context.commSystem().shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the comm system", t); }
try { _context.peerManager().shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the peer manager", t); }
try { _context.messageRegistry().shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the message registry", t); }
try { _context.messageValidator().shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the message validator", t); }
try { _context.inNetMessagePool().shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the inbound net pool", t); }
try { _sessionKeyPersistenceHelper.shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the session key manager", t); }
RouterContext.listContexts().remove(_context);
dumpStats();

View File

@ -22,7 +22,8 @@ import net.i2p.router.transport.CommSystemFacadeImpl;
import net.i2p.router.transport.FIFOBandwidthLimiter;
import net.i2p.router.transport.OutboundMessageRegistry;
import net.i2p.router.transport.VMCommSystem;
import net.i2p.router.tunnelmanager.PoolingTunnelManagerFacade;
import net.i2p.router.tunnel.pool.TunnelPoolManager;
import net.i2p.router.tunnel.TunnelDispatcher;
/**
* Build off the core I2P context to provide a root for a router instance to
@ -50,6 +51,7 @@ public class RouterContext extends I2PAppContext {
private ProfileManager _profileManager;
private FIFOBandwidthLimiter _bandwidthLimiter;
private TunnelManagerFacade _tunnelManager;
private TunnelDispatcher _tunnelDispatcher;
private StatisticsManager _statPublisher;
private Shitlist _shitlist;
private MessageValidator _messageValidator;
@ -103,7 +105,8 @@ public class RouterContext extends I2PAppContext {
_peerManagerFacade = new PeerManagerFacadeImpl(this);
_profileManager = new ProfileManagerImpl(this);
_bandwidthLimiter = new FIFOBandwidthLimiter(this);
_tunnelManager = new PoolingTunnelManagerFacade(this);
_tunnelManager = new TunnelPoolManager(this);
_tunnelDispatcher = new TunnelDispatcher(this);
_statPublisher = new StatisticsManager(this);
_shitlist = new Shitlist(this);
_messageValidator = new MessageValidator(this);
@ -215,6 +218,10 @@ public class RouterContext extends I2PAppContext {
* Any configuration for the tunnels is rooted from the context's properties
*/
public TunnelManagerFacade tunnelManager() { return _tunnelManager; }
/**
* Handle tunnel messages, as well as coordinate the gateways
*/
public TunnelDispatcher tunnelDispatcher() { return _tunnelDispatcher; }
/**
* If the router is configured to, gather up some particularly tasty morsels
* regarding the stats managed and offer to publish them into the routerInfo.

View File

@ -22,9 +22,10 @@ public interface RouterThrottle {
/**
* Should we accept the request to participate in the given tunnel,
* taking into account our current load and bandwidth usage commitments?
*
*
* @return 0 if it should be accepted, higher values for more severe rejection
*/
public boolean acceptTunnelRequest(TunnelCreateMessage msg);
public int acceptTunnelRequest(TunnelCreateMessage msg);
/**
* Should we accept the netDb lookup message, replying either with the
* value or some closer peers, or should we simply drop it due to overload?

View File

@ -2,6 +2,7 @@ package net.i2p.router;
import net.i2p.data.Hash;
import net.i2p.data.i2np.TunnelCreateMessage;
import net.i2p.router.peermanager.TunnelHistory;
import net.i2p.stat.Rate;
import net.i2p.stat.RateStat;
import net.i2p.util.Log;
@ -33,6 +34,9 @@ class RouterThrottleImpl implements RouterThrottle {
private static final String PROP_DEFAULT_KBPS_THROTTLE = "router.defaultKBpsThrottle";
private static final String PROP_BANDWIDTH_SHARE_PERCENTAGE = "router.sharePercentage";
/** tunnel acceptance */
public static final int TUNNEL_ACCEPT = 0;
public RouterThrottleImpl(RouterContext context) {
_context = context;
_log = context.logManager().getLog(RouterThrottleImpl.class);
@ -71,7 +75,8 @@ class RouterThrottleImpl implements RouterThrottle {
return true;
}
}
public boolean acceptTunnelRequest(TunnelCreateMessage msg) {
public int acceptTunnelRequest(TunnelCreateMessage msg) {
long lag = _context.jobQueue().getMaxLag();
RateStat rs = _context.statManager().getRate("router.throttleNetworkCause");
Rate r = null;
@ -84,7 +89,7 @@ class RouterThrottleImpl implements RouterThrottle {
+ " since there have been " + throttleEvents
+ " throttle events in the last 15 minutes or so");
_context.statManager().addRateData("router.throttleTunnelCause", lag, lag);
return false;
return TunnelHistory.TUNNEL_REJECT_TRANSIENT_OVERLOAD;
}
rs = _context.statManager().getRate("transport.sendProcessingTime");
@ -97,7 +102,7 @@ class RouterThrottleImpl implements RouterThrottle {
_log.debug("Refusing tunnel request with the job lag of " + lag
+ "since the 10 minute message processing time is too slow (" + processTime + ")");
_context.statManager().addRateData("router.throttleTunnelProcessingTime10m", (long)processTime, (long)processTime);
return false;
return TunnelHistory.TUNNEL_REJECT_TRANSIENT_OVERLOAD;
}
if (rs != null)
r = rs.getRate(60*1000);
@ -107,7 +112,7 @@ class RouterThrottleImpl implements RouterThrottle {
_log.debug("Refusing tunnel request with the job lag of " + lag
+ "since the 1 minute message processing time is too slow (" + processTime + ")");
_context.statManager().addRateData("router.throttleTunnelProcessingTime1m", (long)processTime, (long)processTime);
return false;
return TunnelHistory.TUNNEL_REJECT_TRANSIENT_OVERLOAD;
}
int numTunnels = _context.tunnelManager().getParticipatingCount();
@ -115,7 +120,7 @@ class RouterThrottleImpl implements RouterThrottle {
if (_context.getProperty(Router.PROP_SHUTDOWN_IN_PROGRESS) != null) {
if (_log.shouldLog(Log.WARN))
_log.warn("Refusing tunnel request since we are shutting down ASAP");
return false;
return TunnelHistory.TUNNEL_REJECT_CRIT;
}
if (numTunnels > getMinThrottleTunnels()) {
@ -127,6 +132,9 @@ class RouterThrottleImpl implements RouterThrottle {
avg = avgTunnels.getAverageValue();
else
avg = avgTunnels.getLifetimeAverageValue();
int min = getMinThrottleTunnels();
if (avg < min)
avg = min;
if ( (avg > 0) && (avg*growthFactor < numTunnels) ) {
// we're accelerating, lets try not to take on too much too fast
double probAccept = (avg*growthFactor) / numTunnels;
@ -141,7 +149,7 @@ class RouterThrottleImpl implements RouterThrottle {
_log.warn("Probabalistically refusing tunnel request (avg=" + avg
+ " current=" + numTunnels + ")");
_context.statManager().addRateData("router.throttleTunnelProbTooFast", (long)(numTunnels-avg), 0);
return false;
return TunnelHistory.TUNNEL_REJECT_PROBABALISTIC_REJECT;
}
} else {
if (_log.shouldLog(Log.INFO))
@ -160,6 +168,9 @@ class RouterThrottleImpl implements RouterThrottle {
else
avg60m = tunnelTestTime60m.getLifetimeAverageValue();
if (avg60m < 2000)
avg60m = 2000; // minimum before complaining
if ( (avg60m > 0) && (avg10m > avg60m * growthFactor) ) {
double probAccept = (avg60m*growthFactor)/avg10m;
int v = _context.random().nextInt(100);
@ -173,7 +184,7 @@ class RouterThrottleImpl implements RouterThrottle {
_log.warn("Probabalistically refusing tunnel request (test time avg 10m=" + avg10m
+ " 60m=" + avg60m + ")");
_context.statManager().addRateData("router.throttleTunnelProbTestSlow", (long)(avg10m-avg60m), 0);
return false;
return TunnelHistory.TUNNEL_REJECT_PROBABALISTIC_REJECT;
}
}
}
@ -188,7 +199,7 @@ class RouterThrottleImpl implements RouterThrottle {
_log.warn("Refusing tunnel request since we are already participating in "
+ numTunnels + " (our max is " + max + ")");
_context.statManager().addRateData("router.throttleTunnelMaxExceeded", numTunnels, 0);
return false;
return TunnelHistory.TUNNEL_REJECT_BANDWIDTH;
}
} catch (NumberFormatException nfe) {
// no default, ignore it
@ -197,23 +208,23 @@ class RouterThrottleImpl implements RouterThrottle {
// ok, we're not hosed, but can we handle the bandwidth requirements
// of another tunnel?
rs = _context.statManager().getRate("tunnel.participatingBytesProcessed");
rs = _context.statManager().getRate("tunnel.participatingMessageCount");
r = null;
if (rs != null)
r = rs.getRate(10*60*1000);
double bytesAllocated = r.getCurrentTotalValue();
double bytesAllocated = (r != null ? r.getCurrentTotalValue() * 1024 : 0);
if (!allowTunnel(bytesAllocated, numTunnels)) {
_context.statManager().addRateData("router.throttleTunnelBandwidthExceeded", (long)bytesAllocated, 0);
return false;
return TunnelHistory.TUNNEL_REJECT_BANDWIDTH;
}
_context.statManager().addRateData("tunnel.bytesAllocatedAtAccept", (long)bytesAllocated, msg.getTunnelDurationSeconds()*1000);
_context.statManager().addRateData("tunnel.bytesAllocatedAtAccept", (long)bytesAllocated, msg.getDurationSeconds()*1000);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Accepting a new tunnel request (now allocating " + bytesAllocated + " bytes across " + numTunnels
+ " tunnels with lag of " + lag + " and " + throttleEvents + " throttle events)");
return true;
return TUNNEL_ACCEPT;
}
/**
@ -320,9 +331,9 @@ class RouterThrottleImpl implements RouterThrottle {
private double getTunnelGrowthFactor() {
try {
return Double.parseDouble(_context.getProperty("router.tunnelGrowthFactor", "1.5"));
return Double.parseDouble(_context.getProperty("router.tunnelGrowthFactor", "3.0"));
} catch (NumberFormatException nfe) {
return 1.5;
return 3.0;
}
}

View File

@ -15,9 +15,9 @@ import net.i2p.CoreVersion;
*
*/
public class RouterVersion {
public final static String ID = "$Revision: 1.137 $ $Date: 2005/01/23 03:22:11 $";
public final static String VERSION = "0.4.2.6";
public final static long BUILD = 7;
public final static String ID = "$Revision: 1.137.2.12 $ $Date: 2005/02/16 13:59:59 $";
public final static String VERSION = "0.5-pre";
public final static long BUILD = 12;
public static void main(String args[]) {
System.out.println("I2P Router version: " + VERSION);
System.out.println("Router ID: " + RouterVersion.ID);

View File

@ -82,7 +82,7 @@ public class Shitlist {
}
//_context.netDb().fail(peer);
_context.tunnelManager().peerFailed(peer);
//_context.tunnelManager().peerFailed(peer);
_context.messageRegistry().peerFailed(peer);
return wasAlready;
}

View File

@ -102,56 +102,33 @@ public class StatisticsManager implements Service {
stats.putAll(_context.profileManager().summarizePeers(_publishedStats));
includeThroughput(stats);
includeRate("router.duplicateMessageId", stats, new long[] { 24*60*60*1000 });
includeRate("tunnel.duplicateIV", stats, new long[] { 24*60*60*1000 });
includeRate("tunnel.fragmentedComplete", stats, new long[] { 10*60*1000, 3*60*60*1000 });
includeRate("tunnel.fragmentedDropped", stats, new long[] { 10*60*1000, 3*60*60*1000 });
includeRate("tunnel.fullFragments", stats, new long[] { 10*60*1000, 3*60*60*1000 });
includeRate("tunnel.smallFragments", stats, new long[] { 10*60*1000, 3*60*60*1000 });
includeRate("tunnel.testFailedTime", stats, new long[] { 60*60*1000, 3*60*60*1000 });
includeRate("tunnel.dispatchOutboundTime", stats, new long[] { 60*60*1000 });
includeRate("tunnel.dispatchGatewayTime", stats, new long[] { 60*60*1000 });
includeRate("tunnel.dispatchDataTime", stats, new long[] { 60*60*1000 });
includeRate("tunnel.buildFailure", stats, new long[] { 10*60*1000, 60*60*1000 });
includeRate("tunnel.buildSuccess", stats, new long[] { 10*60*1000, 60*60*1000 });
includeRate("router.throttleTunnelProbTestSlow", stats, new long[] { 60*60*1000 });
includeRate("router.throttleTunnelProbTooFast", stats, new long[] { 60*60*1000 });
includeRate("router.throttleTunnelProcessingTime1m", stats, new long[] { 60*60*1000 });
includeRate("clock.skew", stats, new long[] { 10*60*1000, 3*60*60*1000, 24*60*60*1000 });
includeRate("transport.sendProcessingTime", stats, new long[] { 60*60*1000 });
includeRate("tcp.probabalisticDropQueueSize", stats, new long[] { 60*1000l, 60*60*1000l });
//includeRate("tcp.queueSize", stats);
//includeRate("jobQueue.jobLag", stats, new long[] { 60*1000, 60*60*1000 });
//includeRate("jobQueue.jobRun", stats, new long[] { 60*1000, 60*60*1000 });
includeRate("jobQueue.jobRunSlow", stats, new long[] { 10*60*1000l, 60*60*1000l });
includeRate("crypto.elGamal.encrypt", stats, new long[] { 60*60*1000 });
//includeRate("crypto.garlic.decryptFail", stats, new long[] { 60*60*1000, 24*60*60*1000 });
includeRate("tunnel.unknownTunnelTimeLeft", stats, new long[] { 60*60*1000 });
//includeRate("jobQueue.readyJobs", stats, new long[] { 60*60*1000 });
//includeRate("jobQueue.droppedJobs", stats, new long[] { 60*60*1000, 24*60*60*1000 });
//includeRate("inNetPool.dropped", stats, new long[] { 60*60*1000, 24*60*60*1000 });
includeRate("tunnel.participatingTunnels", stats, new long[] { 5*60*1000, 60*60*1000 });
includeRate("tunnel.participatingBytesProcessed", stats, new long[] { 10*60*1000 });
includeRate("tunnel.participatingBytesProcessedActive", stats, new long[] { 10*60*1000 });
includeRate("tunnel.testSuccessTime", stats, new long[] { 60*60*1000l, 24*60*60*1000l });
//includeRate("tunnel.outboundMessagesProcessed", stats, new long[] { 10*60*1000, 60*60*1000 });
//includeRate("tunnel.inboundMessagesProcessed", stats, new long[] { 10*60*1000, 60*60*1000 });
//includeRate("tunnel.participatingMessagesProcessed", stats, new long[] { 10*60*1000, 60*60*1000 });
//includeRate("tunnel.participatingMessagesProcessedActive", stats, new long[] { 10*60*1000, 60*60*1000 });
//includeRate("tunnel.expiredAfterAcceptTime", stats, new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
includeRate("tunnel.bytesAllocatedAtAccept", stats, new long[] { 60*60*1000l });
includeRate("netDb.lookupsReceived", stats, new long[] { 60*60*1000 });
//includeRate("netDb.lookupsHandled", stats, new long[] { 60*60*1000 });
includeRate("netDb.lookupsMatched", stats, new long[] { 60*60*1000 });
//includeRate("netDb.storeSent", stats, new long[] { 5*60*1000, 60*60*1000 });
includeRate("netDb.successPeers", stats, new long[] { 60*60*1000 });
//includeRate("netDb.failedPeers", stats, new long[] { 60*60*1000 });
//includeRate("router.throttleNetDbDoSSend", stats, new long[] { 10*60*1000, 60*60*1000, 24*60*60*1000 });
//includeRate("router.throttleNetDbDoS", stats, new long[] { 10*60*1000, 60*60*1000 });
//includeRate("netDb.searchCount", stats, new long[] { 3*60*60*1000});
//includeRate("netDb.searchMessageCount", stats, new long[] { 5*60*1000, 10*60*1000, 60*60*1000 });
//includeRate("inNetMessage.timeToDiscard", stats, new long[] { 5*60*1000, 10*60*1000, 60*60*1000 });
//includeRate("outNetMessage.timeToDiscard", stats, new long[] { 5*60*1000, 10*60*1000, 60*60*1000 });
//includeRate("router.throttleNetworkCause", stats, new long[] { 10*60*1000, 60*60*1000 });
//includeRate("transport.receiveMessageSize", stats, new long[] { 5*60*1000, 60*60*1000 });
//includeRate("transport.sendMessageSize", stats, new long[] { 5*60*1000, 60*60*1000 });
//includeRate("transport.sendMessageSmall", stats, new long[] { 5*60*1000, 60*60*1000 });
//includeRate("transport.sendMessageMedium", stats, new long[] { 5*60*1000, 60*60*1000 });
//includeRate("transport.sendMessageLarge", stats, new long[] { 5*60*1000, 60*60*1000 });
//includeRate("transport.receiveMessageSmall", stats, new long[] { 5*60*1000, 60*60*1000 });
//includeRate("transport.receiveMessageMedium", stats, new long[] { 5*60*1000, 60*60*1000 });
//includeRate("transport.receiveMessageLarge", stats, new long[] { 5*60*1000, 60*60*1000 });
includeRate("client.sendAckTime", stats, new long[] { 60*60*1000 }, true);
includeRate("stream.con.sendDuplicateSize", stats, new long[] { 60*60*1000 });
includeRate("stream.con.receiveDuplicateSize", stats, new long[] { 60*60*1000 });
//includeRate("client.sendsPerFailure", stats, new long[] { 60*60*1000, 24*60*60*1000l }, true);
//includeRate("client.timeoutCongestionTunnel", stats, new long[] { 60*60*1000, 24*60*60*1000l }, true);
//includeRate("client.timeoutCongestionMessage", stats, new long[] { 60*60*1000, 24*60*60*1000l }, true);
//includeRate("client.timeoutCongestionInbound", stats, new long[] { 60*60*1000, 24*60*60*1000l }, true);
stats.setProperty("stat_uptime", DataHelper.formatDuration(_context.router().getUptime()));
stats.setProperty("stat__rateKey", "avg;maxAvg;pctLifetime;[sat;satLim;maxSat;maxSatLim;][num;lifetimeFreq;maxFreq]");
_log.debug("Publishing peer rankings");
@ -223,6 +200,7 @@ public class StatisticsManager implements Service {
double peakFrequency = rate.getExtremeEventCount();
buf.append(num(avgFrequency)).append(';');
buf.append(num(rate.getExtremeEventCount())).append(';');
buf.append(num((double)rate.getLifetimeEventCount())).append(';');
}
}
return buf.toString();

View File

@ -8,392 +8,45 @@ package net.i2p.router;
*
*/
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.Date;
import java.util.HashSet;
import java.util.Properties;
import java.util.Set;
import net.i2p.I2PAppContext;
import net.i2p.data.DataFormatException;
import net.i2p.data.DataHelper;
import net.i2p.data.DataStructureImpl;
import net.i2p.data.Destination;
import net.i2p.data.Hash;
import net.i2p.data.SessionKey;
import net.i2p.data.SigningPrivateKey;
import net.i2p.data.SigningPublicKey;
import net.i2p.data.TunnelId;
import net.i2p.data.i2np.TunnelConfigurationSessionKey;
import net.i2p.data.i2np.TunnelSessionKey;
import net.i2p.data.i2np.TunnelSigningPrivateKey;
import net.i2p.data.i2np.TunnelSigningPublicKey;
import net.i2p.util.Log;
/**
* Defines the information associated with a tunnel
*/
public class TunnelInfo extends DataStructureImpl {
private I2PAppContext _context;
private static Log _log;
private TunnelId _id;
private Hash _nextHop;
private TunnelId _nextHopId;
private Hash _thisHop;
private TunnelInfo _nextHopInfo;
private TunnelConfigurationSessionKey _configurationKey;
private TunnelSigningPublicKey _verificationKey;
private TunnelSigningPrivateKey _signingKey;
private TunnelSessionKey _encryptionKey;
private Destination _destination;
private Properties _options;
private TunnelSettings _settings;
private long _created;
private long _lastTested;
private boolean _ready;
private boolean _wasEverReady;
private int _messagesProcessed;
private int _tunnelFailures;
private long _bytesProcessed;
public TunnelInfo(I2PAppContext context) {
_context = context;
if (_log == null)
_log = context.logManager().getLog(TunnelInfo.class);
setTunnelId(null);
setThisHop(null);
setNextHop(null);
setNextHopId(null);
setNextHopInfo(null);
_configurationKey = null;
_verificationKey = null;
_signingKey = null;
_encryptionKey = null;
setDestination(null);
setSettings(null);
_options = new Properties();
_ready = false;
_wasEverReady = false;
_created = _context.clock().now();
_lastTested = -1;
_messagesProcessed = 0;
_tunnelFailures = 0;
_bytesProcessed = 0;
}
public TunnelId getTunnelId() { return _id; }
public void setTunnelId(TunnelId id) { _id = id; }
public Hash getNextHop() { return _nextHop; }
public void setNextHop(Hash nextHopRouterIdentity) { _nextHop = nextHopRouterIdentity; }
public TunnelId getNextHopId() { return _nextHopId; }
public void setNextHopId(TunnelId id) { _nextHopId = id; }
public Hash getThisHop() { return _thisHop; }
public void setThisHop(Hash thisHopRouterIdentity) { _thisHop = thisHopRouterIdentity; }
public TunnelInfo getNextHopInfo() { return _nextHopInfo; }
public void setNextHopInfo(TunnelInfo info) { _nextHopInfo = info; }
public TunnelConfigurationSessionKey getConfigurationKey() { return _configurationKey; }
public void setConfigurationKey(TunnelConfigurationSessionKey key) { _configurationKey = key; }
public void setConfigurationKey(SessionKey key) {
TunnelConfigurationSessionKey tk = new TunnelConfigurationSessionKey();
tk.setKey(key);
_configurationKey = tk;
}
public TunnelSigningPublicKey getVerificationKey() { return _verificationKey; }
public void setVerificationKey(TunnelSigningPublicKey key) { _verificationKey = key; }
public void setVerificationKey(SigningPublicKey key) {
TunnelSigningPublicKey tk = new TunnelSigningPublicKey();
tk.setKey(key);
_verificationKey = tk;
}
public TunnelSigningPrivateKey getSigningKey() { return _signingKey; }
public void setSigningKey(TunnelSigningPrivateKey key) { _signingKey = key; }
public void setSigningKey(SigningPrivateKey key) {
TunnelSigningPrivateKey tk = new TunnelSigningPrivateKey();
tk.setKey(key);
_signingKey = tk;
}
public TunnelSessionKey getEncryptionKey() { return _encryptionKey; }
public void setEncryptionKey(TunnelSessionKey key) { _encryptionKey = key; }
public void setEncryptionKey(SessionKey key) {
TunnelSessionKey tk = new TunnelSessionKey();
tk.setKey(key);
_encryptionKey = tk;
}
public Destination getDestination() { return _destination; }
public void setDestination(Destination dest) { _destination = dest; }
public String getProperty(String key) { return _options.getProperty(key); }
public void setProperty(String key, String val) { _options.setProperty(key, val); }
public void clearProperties() { _options.clear(); }
public Set getPropertyNames() { return new HashSet(_options.keySet()); }
public TunnelSettings getSettings() { return _settings; }
public void setSettings(TunnelSettings settings) { _settings = settings; }
public interface TunnelInfo {
/** how many peers are there in the tunnel (including the creator)? */
public int getLength();
/**
* Have all of the routers in this tunnel confirmed participation, and we're ok to
* start sending messages through this tunnel?
*/
public boolean getIsReady() { return _ready; }
public void setIsReady(boolean ready) {
_ready = ready;
if (ready)
_wasEverReady = true;
}
/**
* true if this tunnel was ever working (aka rebuildable)
* retrieve the tunnelId that the given hop receives messages on.
* the gateway is hop 0.
*
*/
public boolean getWasEverReady() { return _wasEverReady; }
public long getCreated() { return _created; }
/** when was the peer last tested (or -1 if never)? */
public long getLastTested() { return _lastTested; }
public void setLastTested(long when) { _lastTested = when; }
public TunnelId getReceiveTunnelId(int hop);
/**
* Number of hops left in the tunnel (including this one)
* retrieve the tunnelId that the given hop sends messages on.
* the gateway is hop 0.
*
*/
public final int getLength() {
int len = 0;
TunnelInfo info = this;
while (info != null) {
info = info.getNextHopInfo();
len++;
}
return len;
}
public TunnelId getSendTunnelId(int hop);
/** how many messages have passed through this tunnel in its lifetime? */
public int getMessagesProcessed() {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Tunnel " + _id.getTunnelId() + " processed " + _messagesProcessed + " messages");
return _messagesProcessed;
}
/** we have just processed a message for this tunnel */
public void messageProcessed(int size) {
_messagesProcessed++;
_bytesProcessed += size;
}
/** how many bytes have been pumped through this tunnel in its lifetime? */
public long getBytesProcessed() { return _bytesProcessed; }
/** retrieve the peer at the given hop. the gateway is hop 0 */
public Hash getPeer(int hop);
/** is this an inbound tunnel? */
public boolean isInbound();
/** if this is a client tunnel, what destination is it for? */
public Hash getDestination();
public long getExpiration();
/**
* the tunnel was (potentially) unable to pass a message through.
*
* @return the new number of tunnel failures ever for this tunnel
* take note that the tunnel was able to measurably Do Good
* in the given time
*/
public int incrementFailures() { return ++_tunnelFailures; }
public void testSuccessful(int responseTime);
public void readBytes(InputStream in) throws DataFormatException, IOException {
_options = DataHelper.readProperties(in);
Boolean includeDest = DataHelper.readBoolean(in);
if (includeDest.booleanValue()) {
_destination = new Destination();
_destination.readBytes(in);
} else {
_destination = null;
}
Boolean includeThis = DataHelper.readBoolean(in);
if (includeThis.booleanValue()) {
_thisHop = new Hash();
_thisHop.readBytes(in);
} else {
_thisHop = null;
}
Boolean includeNext = DataHelper.readBoolean(in);
if (includeNext.booleanValue()) {
_nextHop = new Hash();
_nextHop.readBytes(in);
_nextHopId = new TunnelId();
_nextHopId.readBytes(in);
} else {
_nextHop = null;
}
Boolean includeNextInfo = DataHelper.readBoolean(in);
if (includeNextInfo.booleanValue()) {
_nextHopInfo = new TunnelInfo(_context);
_nextHopInfo.readBytes(in);
} else {
_nextHopInfo = null;
}
_id = new TunnelId();
_id.readBytes(in);
Boolean includeConfigKey = DataHelper.readBoolean(in);
if (includeConfigKey.booleanValue()) {
_configurationKey = new TunnelConfigurationSessionKey();
_configurationKey.readBytes(in);
} else {
_configurationKey = null;
}
Boolean includeEncryptionKey = DataHelper.readBoolean(in);
if (includeEncryptionKey.booleanValue()) {
_encryptionKey = new TunnelSessionKey();
_encryptionKey.readBytes(in);
} else {
_encryptionKey = null;
}
Boolean includeSigningKey = DataHelper.readBoolean(in);
if (includeSigningKey.booleanValue()) {
_signingKey = new TunnelSigningPrivateKey();
_signingKey.readBytes(in);
} else {
_signingKey = null;
}
Boolean includeVerificationKey = DataHelper.readBoolean(in);
if (includeVerificationKey.booleanValue()) {
_verificationKey = new TunnelSigningPublicKey();
_verificationKey.readBytes(in);
} else {
_verificationKey = null;
}
_settings = new TunnelSettings(_context);
_settings.readBytes(in);
Boolean ready = DataHelper.readBoolean(in);
if (ready != null)
setIsReady(ready.booleanValue());
}
public void writeBytes(OutputStream out) throws DataFormatException, IOException {
if (_id == null) throw new DataFormatException("Invalid tunnel ID: " + _id);
if (_options == null) throw new DataFormatException("Options are null");
if (_settings == null) throw new DataFormatException("Settings are null");
// everything else is optional in the serialization
DataHelper.writeProperties(out, _options);
if (_destination != null) {
DataHelper.writeBoolean(out, Boolean.TRUE);
_destination.writeBytes(out);
} else {
DataHelper.writeBoolean(out, Boolean.FALSE);
}
if (_thisHop != null) {
DataHelper.writeBoolean(out, Boolean.TRUE);
_thisHop.writeBytes(out);
} else {
DataHelper.writeBoolean(out, Boolean.FALSE);
}
if (_nextHop != null) {
DataHelper.writeBoolean(out, Boolean.TRUE);
_nextHop.writeBytes(out);
_nextHopId.writeBytes(out);
} else {
DataHelper.writeBoolean(out, Boolean.FALSE);
}
if (_nextHopInfo != null) {
DataHelper.writeBoolean(out, Boolean.TRUE);
_nextHopInfo.writeBytes(out);
} else {
DataHelper.writeBoolean(out, Boolean.FALSE);
}
_id.writeBytes(out);
if (_configurationKey != null) {
DataHelper.writeBoolean(out, Boolean.TRUE);
_configurationKey.writeBytes(out);
} else {
DataHelper.writeBoolean(out, Boolean.FALSE);
}
if (_encryptionKey != null) {
DataHelper.writeBoolean(out, Boolean.TRUE);
_encryptionKey.writeBytes(out);
} else {
DataHelper.writeBoolean(out, Boolean.FALSE);
}
if (_signingKey != null) {
DataHelper.writeBoolean(out, Boolean.TRUE);
_signingKey.writeBytes(out);
} else {
DataHelper.writeBoolean(out, Boolean.FALSE);
}
if (_verificationKey != null) {
DataHelper.writeBoolean(out, Boolean.TRUE);
_verificationKey.writeBytes(out);
} else {
DataHelper.writeBoolean(out, Boolean.FALSE);
}
_settings.writeBytes(out);
DataHelper.writeBoolean(out, new Boolean(_ready));
}
public String toString() {
StringBuffer buf = new StringBuffer();
buf.append("[Tunnel ").append(_id.getTunnelId());
TunnelInfo cur = this;
int i = 0;
while (cur != null) {
buf.append("\n*Hop ").append(i).append(": ").append(cur.getThisHop());
//if (cur.getEncryptionKey() != null)
// buf.append("\n Encryption key: ").append(cur.getEncryptionKey());
//if (cur.getSigningKey() != null)
// buf.append("\n Signing key: ").append(cur.getSigningKey());
//if (cur.getVerificationKey() != null)
// buf.append("\n Verification key: ").append(cur.getVerificationKey());
if (cur.getDestination() != null)
buf.append("\n Destination: ").append(cur.getDestination().calculateHash().toBase64());
if (cur.getNextHop() != null)
buf.append("\n Next: ").append(cur.getNextHop());
if (cur.getNextHop() != null)
buf.append("\n NextId: ").append(cur.getNextHopId());
if (cur.getSettings() == null)
buf.append("\n Expiration: ").append("none");
else
buf.append("\n Expiration: ").append(new Date(cur.getSettings().getExpiration()));
//buf.append("\n Ready: ").append(getIsReady());
cur = cur.getNextHopInfo();
i++;
}
buf.append("]");
return buf.toString();
}
public int hashCode() {
int rv = 0;
rv = 7*rv + DataHelper.hashCode(_options);
rv = 7*rv + DataHelper.hashCode(_destination);
rv = 7*rv + DataHelper.hashCode(_nextHop);
rv = 7*rv + DataHelper.hashCode(_nextHopId);
rv = 7*rv + DataHelper.hashCode(_thisHop);
rv = 7*rv + DataHelper.hashCode(_id);
rv = 7*rv + DataHelper.hashCode(_configurationKey);
rv = 7*rv + DataHelper.hashCode(_encryptionKey);
rv = 7*rv + DataHelper.hashCode(_signingKey);
rv = 7*rv + DataHelper.hashCode(_verificationKey);
rv = 7*rv + DataHelper.hashCode(_settings);
rv = 7*rv + (_ready ? 0 : 1);
return rv;
}
public boolean equals(Object obj) {
if ( (obj != null) && (obj instanceof TunnelInfo) ) {
TunnelInfo info = (TunnelInfo)obj;
return DataHelper.eq(getConfigurationKey(), info.getConfigurationKey()) &&
DataHelper.eq(getDestination(), info.getDestination()) &&
getIsReady() == info.getIsReady() &&
DataHelper.eq(getEncryptionKey(), info.getEncryptionKey()) &&
DataHelper.eq(getNextHop(), info.getNextHop()) &&
DataHelper.eq(getNextHopId(), info.getNextHopId()) &&
DataHelper.eq(getNextHopInfo(), info.getNextHopInfo()) &&
DataHelper.eq(getSettings(), info.getSettings()) &&
DataHelper.eq(getSigningKey(), info.getSigningKey()) &&
DataHelper.eq(getThisHop(), info.getThisHop()) &&
DataHelper.eq(getTunnelId(), info.getTunnelId()) &&
DataHelper.eq(getVerificationKey(), info.getVerificationKey()) &&
DataHelper.eq(_options, info._options);
} else {
return false;
}
}
public long getProcessedMessagesCount();
}

View File

@ -19,42 +19,22 @@ import net.i2p.data.TunnelId;
*
*/
public interface TunnelManagerFacade extends Service {
/**
* React to a request to join the specified tunnel.
*
* @return true if the router will accept participation, else false.
*/
boolean joinTunnel(TunnelInfo info);
/**
* Retrieve the information related to a particular tunnel
*
* @param id the tunnelId as seen at the gateway
*
*/
TunnelInfo getTunnelInfo(TunnelId id);
/**
* Retrieve a set of tunnels from the existing ones for various purposes
*/
List selectOutboundTunnelIds(TunnelSelectionCriteria criteria);
/**
* Retrieve a set of tunnels from the existing ones for various purposes
*/
List selectInboundTunnelIds(TunnelSelectionCriteria criteria);
/** pick an inbound tunnel not bound to a particular destination */
TunnelInfo selectInboundTunnel();
/** pick an inbound tunnel bound to the given destination */
TunnelInfo selectInboundTunnel(Hash destination);
/** pick an outbound tunnel not bound to a particular destination */
TunnelInfo selectOutboundTunnel();
/** pick an outbound tunnel bound to the given destination */
TunnelInfo selectOutboundTunnel(Hash destination);
/**
* Make sure appropriate outbound tunnels are in place, builds requested
* inbound tunnels, then fire off a job to ask the ClientManagerFacade to
* validate the leaseSet, then publish it in the network database.
*
*/
void createTunnels(Destination destination, ClientTunnelSettings clientSettings, long timeoutMs);
/**
* Called when a peer becomes unreachable - go through all of the current
* tunnels and rebuild them if we can, or drop them if we can't.
*
*/
void peerFailed(Hash peer);
/**
* True if the peer currently part of a tunnel
*
@ -70,4 +50,21 @@ public interface TunnelManagerFacade extends Service {
/** When does the last tunnel we are participating in expire? */
public long getLastParticipatingExpiration();
/**
* the client connected (or updated their settings), so make sure we have
* the tunnels for them, and whenever necessary, ask them to authorize
* leases.
*
*/
public void buildTunnels(Destination client, ClientTunnelSettings settings);
public TunnelPoolSettings getInboundSettings();
public TunnelPoolSettings getOutboundSettings();
public TunnelPoolSettings getInboundSettings(Hash client);
public TunnelPoolSettings getOutboundSettings(Hash client);
public void setInboundSettings(TunnelPoolSettings settings);
public void setOutboundSettings(TunnelPoolSettings settings);
public void setInboundSettings(Hash client, TunnelPoolSettings settings);
public void setOutboundSettings(Hash client, TunnelPoolSettings settings);
}

View File

@ -343,7 +343,9 @@ public class ClientConnectionRunner {
*/
void requestLeaseSet(LeaseSet set, long expirationTime, Job onCreateJob, Job onFailedJob) {
if (_dead) return;
_context.jobQueue().addJob(new RequestLeaseSetJob(_context, this, set, expirationTime, onCreateJob, onFailedJob));
if ( (_currentLeaseSet != null) && (_currentLeaseSet.equals(set)) )
return; // no change
_context.jobQueue().addJob(new RequestLeaseSetJob(_context, this, set, _context.clock().now() + expirationTime, onCreateJob, onFailedJob));
}
void disconnected() {

View File

@ -222,7 +222,15 @@ public class ClientManager {
runner.requestLeaseSet(set, _ctx.clock().now() + timeout, onCreateJob, onFailedJob);
}
}
private static final int REQUEST_LEASESET_TIMEOUT = 20*1000;
public void requestLeaseSet(Hash dest, LeaseSet ls) {
ClientConnectionRunner runner = getRunner(dest);
if (runner != null) {
// no need to fire off any jobs...
runner.requestLeaseSet(ls, REQUEST_LEASESET_TIMEOUT, null, null);
}
}
public boolean isLocal(Destination dest) {
boolean rv = false;
@ -261,6 +269,15 @@ public class ClientManager {
return false;
}
public Set listClients() {
Set rv = new HashSet();
synchronized (_runners) {
rv.addAll(_runners.keySet());
}
return rv;
}
ClientConnectionRunner getRunner(Destination dest) {
ClientConnectionRunner rv = null;
long beforeLock = _ctx.clock().now();

View File

@ -10,7 +10,9 @@ package net.i2p.router.client;
import java.io.IOException;
import java.io.Writer;
import java.util.Collections;
import java.util.Iterator;
import java.util.Set;
import net.i2p.data.DataHelper;
import net.i2p.data.Destination;
@ -112,6 +114,12 @@ public class ClientManagerFacadeImpl extends ClientManagerFacade {
_log.error("Null manager on requestLeaseSet!");
}
public void requestLeaseSet(Hash dest, LeaseSet set) {
if (_manager != null)
_manager.requestLeaseSet(dest, set);
}
/**
* Instruct the client (or all clients) that they are under attack. This call
* does not block.
@ -186,4 +194,16 @@ public class ClientManagerFacadeImpl extends ClientManagerFacade {
if (_manager != null)
_manager.renderStatusHTML(out);
}
/**
* Return the list of locally connected clients
*
* @return set of Destination objects
*/
public Set listClients() {
if (_manager != null)
return _manager.listClients();
else
return Collections.EMPTY_SET;
}
}

View File

@ -78,7 +78,8 @@ class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventLi
handleDestroySession(reader, (DestroySessionMessage)message);
break;
default:
_log.warn("Unhandled I2CP type received: " + message.getType());
if (_log.shouldLog(Log.ERROR))
_log.error("Unhandled I2CP type received: " + message.getType());
}
}
@ -88,7 +89,8 @@ class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventLi
*/
public void readError(I2CPMessageReader reader, Exception error) {
if (_runner.isDead()) return;
_log.error("Error occurred", error);
if (_log.shouldLog(Log.ERROR))
_log.error("Error occurred", error);
_runner.stopRunning();
}
@ -101,7 +103,8 @@ class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventLi
try {
_runner.doSend(new SetDateMessage());
} catch (I2CPMessageException ime) {
_log.error("Error writing out the setDate message", ime);
if (_log.shouldLog(Log.ERROR))
_log.error("Error writing out the setDate message", ime);
}
}
private void handleSetDate(I2CPMessageReader reader, SetDateMessage message) {
@ -118,7 +121,8 @@ class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventLi
if (_log.shouldLog(Log.DEBUG))
_log.debug("Signature verified correctly on create session message");
} else {
_log.error("Signature verification *FAILED* on a create session message. Hijack attempt?");
if (_log.shouldLog(Log.ERROR))
_log.error("Signature verification *FAILED* on a create session message. Hijack attempt?");
_runner.disconnectClient("Invalid signature on CreateSessionMessage");
return;
}
@ -152,12 +156,13 @@ class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventLi
*
*/
private void handleSendMessage(I2CPMessageReader reader, SendMessageMessage message) {
_log.debug("handleSendMessage called");
if (_log.shouldLog(Log.DEBUG))
_log.debug("handleSendMessage called");
long beforeDistribute = _context.clock().now();
MessageId id = _runner.distributeMessage(message);
long timeToDistribute = _context.clock().now() - beforeDistribute;
_runner.ackSendMessage(id, message.getNonce());
if (timeToDistribute > 50)
if ( (timeToDistribute > 50) && (_log.shouldLog(Log.WARN)) )
_log.warn("Took too long to distribute the message (which holds up the ack): " + timeToDistribute);
}
@ -168,14 +173,16 @@ class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventLi
*/
private void handleReceiveBegin(I2CPMessageReader reader, ReceiveMessageBeginMessage message) {
if (_runner.isDead()) return;
_log.debug("Handling recieve begin: id = " + message.getMessageId());
if (_log.shouldLog(Log.DEBUG))
_log.debug("Handling recieve begin: id = " + message.getMessageId());
MessagePayloadMessage msg = new MessagePayloadMessage();
msg.setMessageId(message.getMessageId());
msg.setSessionId(_runner.getSessionId());
Payload payload = _runner.getPayload(message.getMessageId());
if (payload == null) {
_log.error("Payload for message id [" + message.getMessageId()
+ "] is null! Unknown message id?");
if (_log.shouldLog(Log.ERROR))
_log.error("Payload for message id [" + message.getMessageId()
+ "] is null! Unknown message id?");
return;
}
msg.setPayload(payload);
@ -197,17 +204,21 @@ class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventLi
}
private void handleDestroySession(I2CPMessageReader reader, DestroySessionMessage message) {
_log.info("Destroying client session " + _runner.getSessionId());
if (_log.shouldLog(Log.INFO))
_log.info("Destroying client session " + _runner.getSessionId());
_runner.stopRunning();
}
private void handleCreateLeaseSet(I2CPMessageReader reader, CreateLeaseSetMessage message) {
if ( (message.getLeaseSet() == null) || (message.getPrivateKey() == null) || (message.getSigningPrivateKey() == null) ) {
_log.error("Null lease set granted: " + message);
if (_log.shouldLog(Log.ERROR))
_log.error("Null lease set granted: " + message);
return;
}
_log.info("New lease set granted for destination " + message.getLeaseSet().getDestination().calculateHash().toBase64());
if (_log.shouldLog(Log.INFO))
_log.info("New lease set granted for destination "
+ message.getLeaseSet().getDestination().calculateHash().toBase64());
_context.keyManager().registerKeys(message.getLeaseSet().getDestination(), message.getSigningPrivateKey(), message.getPrivateKey());
_context.netDb().publish(message.getLeaseSet());

View File

@ -27,8 +27,6 @@ class CreateSessionJob extends JobImpl {
private Log _log;
private ClientConnectionRunner _runner;
private final static long LEASE_CREATION_TIMEOUT = 30*1000;
public CreateSessionJob(RouterContext context, ClientConnectionRunner runner) {
super(context);
_log = context.logManager().getLog(CreateSessionJob.class);
@ -65,6 +63,6 @@ class CreateSessionJob extends JobImpl {
// and load 'em up (using anything not yet set as the software defaults)
settings.readFromProperties(props);
getContext().tunnelManager().createTunnels(_runner.getConfig().getDestination(), settings, LEASE_CREATION_TIMEOUT);
getContext().tunnelManager().buildTunnels(_runner.getConfig().getDestination(), settings);
}
}

View File

@ -52,7 +52,9 @@ class MessageReceivedJob extends JobImpl {
*
*/
public void messageAvailable(MessageId id, long size) {
_log.debug("Sending message available: " + id + " to sessionId " + _runner.getSessionId() + " (with nonce=1)", new Exception("available"));
if (_log.shouldLog(Log.DEBUG))
_log.debug("Sending message available: " + id + " to sessionId " + _runner.getSessionId()
+ " (with nonce=1)", new Exception("available"));
MessageStatusMessage msg = new MessageStatusMessage();
msg.setMessageId(id);
msg.setSessionId(_runner.getSessionId());
@ -62,7 +64,8 @@ class MessageReceivedJob extends JobImpl {
try {
_runner.doSend(msg);
} catch (I2CPMessageException ime) {
_log.error("Error writing out the message status message", ime);
if (_log.shouldLog(Log.ERROR))
_log.error("Error writing out the message status message", ime);
}
}
}

View File

@ -49,7 +49,8 @@ class RequestLeaseSetJob extends JobImpl {
LeaseRequestState oldReq = _runner.getLeaseRequest();
if (oldReq != null) {
if (oldReq.getExpiration() > getContext().clock().now()) {
_log.error("Old *current* leaseRequest already exists! Why are we trying to request too quickly?", getAddedBy());
_log.info("request of a leaseSet is still active, wait a little bit before asking again");
requeue(5*1000);
return;
} else {
_log.error("Old *expired* leaseRequest exists! Why did the old request not get killed? (expiration = " + new Date(oldReq.getExpiration()) + ")", getAddedBy());
@ -70,7 +71,7 @@ class RequestLeaseSetJob extends JobImpl {
msg.setSessionId(_runner.getSessionId());
for (int i = 0; i < state.getRequested().getLeaseCount(); i++) {
msg.addEndpoint(state.getRequested().getLease(i).getRouterIdentity(), state.getRequested().getLease(i).getTunnelId());
msg.addEndpoint(state.getRequested().getLease(i).getGateway(), state.getRequested().getLease(i).getTunnelId());
}
try {

View File

@ -141,7 +141,7 @@ public class BuildTestMessageJob extends JobImpl {
ackInstructions.setEncrypted(false);
DeliveryStatusMessage msg = new DeliveryStatusMessage(getContext());
msg.setArrival(new Date(getContext().clock().now()));
msg.setArrival(getContext().clock().now());
msg.setMessageId(_testMessageKey);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Delivery status message key: " + _testMessageKey + " arrival: " + msg.getArrival());

View File

@ -107,8 +107,7 @@ public class GarlicMessageBuilder {
byte encData[] = ctx.elGamalAESEngine().encrypt(cloveSet, target, encryptKey, wrappedTags, encryptTag, 128);
msg.setData(encData);
Date exp = new Date(config.getExpiration());
msg.setMessageExpiration(exp);
msg.setMessageExpiration(config.getExpiration());
if (log.shouldLog(Log.WARN))
log.warn("CloveSet size for message " + msg.getUniqueId() + " is " + cloveSet.length
@ -133,33 +132,42 @@ public class GarlicMessageBuilder {
*
*/
private static byte[] buildCloveSet(RouterContext ctx, GarlicConfig config) {
ByteArrayOutputStream baos = new ByteArrayOutputStream(1024);
ByteArrayOutputStream baos = null;
Log log = ctx.logManager().getLog(GarlicMessageBuilder.class);
try {
if (config instanceof PayloadGarlicConfig) {
byte clove[] = buildClove(ctx, (PayloadGarlicConfig)config);
baos = new ByteArrayOutputStream(clove.length + 16);
DataHelper.writeLong(baos, 1, 1);
baos.write(buildClove(ctx, (PayloadGarlicConfig)config));
baos.write(clove);
} else {
DataHelper.writeLong(baos, 1, config.getCloveCount());
byte cloves[][] = new byte[config.getCloveCount()][];
for (int i = 0; i < config.getCloveCount(); i++) {
GarlicConfig c = config.getClove(i);
byte clove[] = null;
if (c instanceof PayloadGarlicConfig) {
log.debug("Subclove IS a payload garlic clove");
clove = buildClove(ctx, (PayloadGarlicConfig)c);
cloves[i] = buildClove(ctx, (PayloadGarlicConfig)c);
} else {
log.debug("Subclove IS NOT a payload garlic clove");
clove = buildClove(ctx, c);
cloves[i] = buildClove(ctx, c);
}
if (clove == null)
if (cloves[i] == null)
throw new DataFormatException("Unable to build clove");
else
baos.write(clove);
}
int len = 1;
for (int i = 0; i < cloves.length; i++)
len += cloves[i].length;
baos = new ByteArrayOutputStream(len + 16);
DataHelper.writeLong(baos, 1, cloves.length);
for (int i = 0; i < cloves.length; i++)
baos.write(cloves[i]);
}
if (baos == null)
new ByteArrayOutputStream(16);
config.getCertificate().writeBytes(baos);
DataHelper.writeLong(baos, 4, config.getId());
DataHelper.writeDate(baos, new Date(config.getExpiration()));
DataHelper.writeLong(baos, DataHelper.DATE_LENGTH, config.getExpiration());
} catch (IOException ioe) {
log.error("Error building the clove set", ioe);
} catch (DataFormatException dfe) {
@ -189,7 +197,8 @@ public class GarlicMessageBuilder {
clove.setCloveId(config.getId());
clove.setExpiration(new Date(config.getExpiration()));
clove.setInstructions(config.getDeliveryInstructions());
ByteArrayOutputStream baos = new ByteArrayOutputStream(1024);
int size = clove.estimateSize();
ByteArrayOutputStream baos = new ByteArrayOutputStream(size);
clove.writeBytes(baos);
return baos.toByteArray();
}

View File

@ -18,8 +18,11 @@ import java.util.Set;
import net.i2p.data.Hash;
import net.i2p.data.RouterIdentity;
import net.i2p.data.i2np.DeliveryInstructions;
import net.i2p.data.i2np.GarlicClove;
import net.i2p.data.i2np.GarlicMessage;
import net.i2p.data.i2np.I2NPMessage;
import net.i2p.data.i2np.TunnelGatewayMessage;
import net.i2p.router.JobImpl;
import net.i2p.router.LeaseSetKeys;
import net.i2p.router.Router;
@ -33,13 +36,13 @@ import net.i2p.util.Log;
* need to be. soon)
*
*/
public class HandleGarlicMessageJob extends JobImpl {
public class HandleGarlicMessageJob extends JobImpl implements GarlicMessageReceiver.CloveReceiver {
private Log _log;
private GarlicMessage _message;
private RouterIdentity _from;
private Hash _fromHash;
private Map _cloves; // map of clove Id --> Expiration of cloves we've already seen
private MessageHandler _handler;
//private MessageHandler _handler;
private GarlicMessageParser _parser;
private final static int FORWARD_PRIORITY = 50;
@ -54,126 +57,56 @@ public class HandleGarlicMessageJob extends JobImpl {
_from = from;
_fromHash = fromHash;
_cloves = new HashMap();
_handler = new MessageHandler(context);
//_handler = new MessageHandler(context);
_parser = new GarlicMessageParser(context);
}
public String getName() { return "Handle Inbound Garlic Message"; }
public void runJob() {
CloveSet set = _parser.getGarlicCloves(_message, getContext().keyManager().getPrivateKey());
if (set == null) {
Set keys = getContext().keyManager().getAllKeys();
if (_log.shouldLog(Log.DEBUG))
_log.debug("Decryption with the router's key failed, now try with the " + keys.size() + " leaseSet keys");
// our router key failed, which means that it was either encrypted wrong
// or it was encrypted to a LeaseSet's PublicKey
for (Iterator iter = keys.iterator(); iter.hasNext();) {
LeaseSetKeys lskeys = (LeaseSetKeys)iter.next();
set = _parser.getGarlicCloves(_message, lskeys.getDecryptionKey());
if (set != null) {
GarlicMessageReceiver recv = new GarlicMessageReceiver(getContext(), this);
recv.receive(_message);
}
public void handleClove(DeliveryInstructions instructions, I2NPMessage data) {
switch (instructions.getDeliveryMode()) {
case DeliveryInstructions.DELIVERY_MODE_LOCAL:
if (_log.shouldLog(Log.DEBUG))
_log.debug("local delivery instructions for clove: " + data);
getContext().inNetMessagePool().add(data, null, null);
return;
case DeliveryInstructions.DELIVERY_MODE_DESTINATION:
if (_log.shouldLog(Log.ERROR))
_log.error("this message didn't come down a tunnel, not forwarding to a destination: "
+ instructions + " - " + data);
return;
case DeliveryInstructions.DELIVERY_MODE_ROUTER:
if (getContext().routerHash().equals(instructions.getRouter())) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Decrypted garlic message with lease set key for destination "
+ lskeys.getDestination().calculateHash().toBase64() + " SUCCEEDED: " + set);
break;
_log.debug("router delivery instructions targetting us");
getContext().inNetMessagePool().add(data, null, null);
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Decrypting garlic message with lease set key for destination "
+ lskeys.getDestination().calculateHash().toBase64() + " failed");
_log.debug("router delivery instructions targetting "
+ instructions.getRouter().toBase64().substring(0,4));
SendMessageDirectJob j = new SendMessageDirectJob(getContext(), data,
instructions.getRouter(),
10*1000, 100);
getContext().jobQueue().addJob(j);
}
}
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Decrypted clove set found " + set.getCloveCount() + " cloves: " + set);
return;
case DeliveryInstructions.DELIVERY_MODE_TUNNEL:
TunnelGatewayMessage gw = new TunnelGatewayMessage(getContext());
gw.setMessage(data);
gw.setTunnelId(instructions.getTunnelId());
gw.setMessageExpiration(data.getMessageExpiration());
getContext().jobQueue().addJob(new SendMessageDirectJob(getContext(), gw,
instructions.getRouter(),
10*1000, 100));
return;
default:
_log.error("Unknown instruction " + instructions.getDeliveryMode() + ": " + instructions);
return;
}
if (set != null) {
for (int i = 0; i < set.getCloveCount(); i++) {
GarlicClove clove = set.getClove(i);
handleClove(clove);
}
} else {
if (_log.shouldLog(Log.ERROR))
_log.error("CloveMessageParser failed to decrypt the message [" + _message.getUniqueId()
+ "] to us when received from [" + _fromHash + "] / [" + _from + "]",
new Exception("Decrypt garlic failed"));
getContext().statManager().addRateData("crypto.garlic.decryptFail", 1, 0);
getContext().messageHistory().messageProcessingError(_message.getUniqueId(),
_message.getClass().getName(),
"Garlic could not be decrypted");
}
}
private boolean isKnown(long cloveId) {
boolean known = false;
synchronized (_cloves) {
known = _cloves.containsKey(new Long(cloveId));
}
if (_log.shouldLog(Log.DEBUG))
_log.debug("isKnown("+cloveId+"): " + known);
return known;
}
private void cleanupCloves() {
// this should be in its own thread perhaps? and maybe _cloves should be
// synced to disk?
List toRemove = new ArrayList(32);
long now = getContext().clock().now();
synchronized (_cloves) {
for (Iterator iter = _cloves.keySet().iterator(); iter.hasNext();) {
Long id = (Long)iter.next();
Date exp = (Date)_cloves.get(id);
if (exp == null) continue; // wtf, not sure how this can happen yet, but i've seen it. grr.
if (now > exp.getTime())
toRemove.add(id);
}
for (int i = 0; i < toRemove.size(); i++)
_cloves.remove(toRemove.get(i));
}
}
private boolean isValid(GarlicClove clove) {
if (isKnown(clove.getCloveId())) {
if (_log.shouldLog(Log.ERROR))
_log.error("Duplicate garlic clove received - replay attack in progress? [cloveId = "
+ clove.getCloveId() + " expiration = " + clove.getExpiration());
return false;
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Clove " + clove.getCloveId() + " expiring on " + clove.getExpiration()
+ " is not known");
}
long now = getContext().clock().now();
if (clove.getExpiration().getTime() < now) {
if (clove.getExpiration().getTime() < now + Router.CLOCK_FUDGE_FACTOR) {
if (_log.shouldLog(Log.WARN))
_log.warn("Expired garlic received, but within our fudge factor ["
+ clove.getExpiration() + "]");
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Expired garlic clove received - replay attack in progress? [cloveId = "
+ clove.getCloveId() + " expiration = " + clove.getExpiration()
+ " now = " + (new Date(getContext().clock().now())));
return false;
}
}
synchronized (_cloves) {
_cloves.put(new Long(clove.getCloveId()), clove.getExpiration());
}
cleanupCloves();
return true;
}
private void handleClove(GarlicClove clove) {
if (!isValid(clove)) {
if (_log.shouldLog(Log.DEBUG))
_log.warn("Invalid clove " + clove);
return;
}
long sendExpiration = clove.getExpiration().getTime();
// if the clove targets something remote, tunnel route it
boolean sendDirect = false;
_handler.handleMessage(clove.getInstructions(), clove.getData(),
clove.getCloveId(), _from, _fromHash,
sendExpiration, FORWARD_PRIORITY, sendDirect);
}
public void dropped() {

View File

@ -52,38 +52,45 @@ class OutboundClientMessageJobHelper {
*
* @param bundledReplyLeaseSet if specified, the given LeaseSet will be packaged with the message (allowing
* much faster replies, since their netDb search will return almost instantly)
* @return garlic, or null if no tunnels were found (or other errors)
*/
static GarlicMessage createGarlicMessage(RouterContext ctx, long replyToken, long expiration, PublicKey recipientPK,
Payload data, Destination dest, SessionKey wrappedKey, Set wrappedTags,
Payload data, Hash from, Destination dest, SessionKey wrappedKey, Set wrappedTags,
boolean requireAck, LeaseSet bundledReplyLeaseSet) {
PayloadGarlicConfig dataClove = buildDataClove(ctx, data, dest, expiration);
return createGarlicMessage(ctx, replyToken, expiration, recipientPK, dataClove, dest, wrappedKey,
return createGarlicMessage(ctx, replyToken, expiration, recipientPK, dataClove, from, dest, wrappedKey,
wrappedTags, requireAck, bundledReplyLeaseSet);
}
/**
* Allow the app to specify the data clove directly, which enables OutboundClientMessage to resend the
* same payload (including expiration and unique id) in different garlics (down different tunnels)
*
* @return garlic, or null if no tunnels were found (or other errors)
*/
static GarlicMessage createGarlicMessage(RouterContext ctx, long replyToken, long expiration, PublicKey recipientPK,
PayloadGarlicConfig dataClove, Destination dest, SessionKey wrappedKey,
PayloadGarlicConfig dataClove, Hash from, Destination dest, SessionKey wrappedKey,
Set wrappedTags, boolean requireAck, LeaseSet bundledReplyLeaseSet) {
GarlicConfig config = createGarlicConfig(ctx, replyToken, expiration, recipientPK, dataClove, dest, requireAck, bundledReplyLeaseSet);
GarlicConfig config = createGarlicConfig(ctx, replyToken, expiration, recipientPK, dataClove, from, dest, requireAck, bundledReplyLeaseSet);
if (config == null)
return null;
GarlicMessage msg = GarlicMessageBuilder.buildMessage(ctx, config, wrappedKey, wrappedTags);
return msg;
}
private static GarlicConfig createGarlicConfig(RouterContext ctx, long replyToken, long expiration, PublicKey recipientPK,
PayloadGarlicConfig dataClove, Destination dest, boolean requireAck,
PayloadGarlicConfig dataClove, Hash from, Destination dest, boolean requireAck,
LeaseSet bundledReplyLeaseSet) {
Log log = ctx.logManager().getLog(OutboundClientMessageJobHelper.class);
log.debug("Reply token: " + replyToken);
if (log.shouldLog(Log.DEBUG))
log.debug("Reply token: " + replyToken);
GarlicConfig config = new GarlicConfig();
config.addClove(dataClove);
if (requireAck) {
PayloadGarlicConfig ackClove = buildAckClove(ctx, replyToken, expiration);
PayloadGarlicConfig ackClove = buildAckClove(ctx, from, replyToken, expiration);
if (ackClove == null)
return null; // no tunnels
config.addClove(ackClove);
}
@ -108,7 +115,9 @@ class OutboundClientMessageJobHelper {
config.setRecipientPublicKey(recipientPK);
config.setRequestAck(false);
log.info("Creating garlic config to be encrypted to " + recipientPK + " for destination " + dest.calculateHash().toBase64());
if (log.shouldLog(Log.INFO))
log.info("Creating garlic config to be encrypted to " + recipientPK
+ " for destination " + dest.calculateHash().toBase64());
return config;
}
@ -116,28 +125,25 @@ class OutboundClientMessageJobHelper {
/**
* Build a clove that sends a DeliveryStatusMessage to us
*/
private static PayloadGarlicConfig buildAckClove(RouterContext ctx, long replyToken, long expiration) {
private static PayloadGarlicConfig buildAckClove(RouterContext ctx, Hash from, long replyToken, long expiration) {
Log log = ctx.logManager().getLog(OutboundClientMessageJobHelper.class);
PayloadGarlicConfig ackClove = new PayloadGarlicConfig();
Hash replyToTunnelRouter = null; // inbound tunnel gateway
TunnelId replyToTunnelId = null; // tunnel id on that gateway
TunnelSelectionCriteria criteria = new TunnelSelectionCriteria();
criteria.setMaximumTunnelsRequired(1);
criteria.setMinimumTunnelsRequired(1);
criteria.setReliabilityPriority(50); // arbitrary. fixme
criteria.setAnonymityPriority(50); // arbitrary. fixme
criteria.setLatencyPriority(50); // arbitrary. fixme
List tunnelIds = ctx.tunnelManager().selectInboundTunnelIds(criteria);
if (tunnelIds.size() <= 0) {
log.error("No inbound tunnels to receive an ack through!?");
TunnelInfo replyToTunnel = ctx.tunnelManager().selectInboundTunnel(from);
if (replyToTunnel == null) {
if (log.shouldLog(Log.ERROR))
log.error("Unable to send client message from " + from.toBase64()
+ ", as there are no inbound tunnels available");
return null;
}
replyToTunnelId = (TunnelId)tunnelIds.get(0);
TunnelInfo info = ctx.tunnelManager().getTunnelInfo(replyToTunnelId);
replyToTunnelRouter = info.getThisHop(); // info is the chain, and the first hop is the gateway
log.debug("Ack for the data message will come back along tunnel " + replyToTunnelId + ":\n" + info);
replyToTunnelId = replyToTunnel.getReceiveTunnelId(0);
replyToTunnelRouter = replyToTunnel.getPeer(0);
if (log.shouldLog(Log.DEBUG))
log.debug("Ack for the data message will come back along tunnel " + replyToTunnelId
+ ":\n" + replyToTunnel);
DeliveryInstructions ackInstructions = new DeliveryInstructions();
ackInstructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_TUNNEL);
@ -148,9 +154,10 @@ class OutboundClientMessageJobHelper {
ackInstructions.setEncrypted(false);
DeliveryStatusMessage msg = new DeliveryStatusMessage(ctx);
msg.setArrival(new Date(ctx.clock().now()));
msg.setArrival(ctx.clock().now());
msg.setMessageId(replyToken);
log.debug("Delivery status message key: " + replyToken + " arrival: " + msg.getArrival());
if (log.shouldLog(Log.DEBUG))
log.debug("Delivery status message key: " + replyToken + " arrival: " + msg.getArrival());
ackClove.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null));
ackClove.setDeliveryInstructions(ackInstructions);
@ -160,7 +167,11 @@ class OutboundClientMessageJobHelper {
ackClove.setRecipient(ctx.router().getRouterInfo());
ackClove.setRequestAck(false);
log.debug("Delivery status message is targetting us [" + ackClove.getRecipient().getIdentity().getHash().toBase64() + "] via tunnel " + replyToTunnelId.getTunnelId() + " on " + replyToTunnelRouter.toBase64());
if (log.shouldLog(Log.DEBUG))
log.debug("Delivery status message is targetting us ["
+ ackClove.getRecipient().getIdentity().getHash().toBase64()
+ "] via tunnel " + replyToTunnelId.getTunnelId() + " on "
+ replyToTunnelRouter.toBase64());
return ackClove;
}
@ -211,7 +222,7 @@ class OutboundClientMessageJobHelper {
clove.setId(ctx.random().nextLong(I2NPMessage.MAX_ID_VALUE));
DatabaseStoreMessage msg = new DatabaseStoreMessage(ctx);
msg.setLeaseSet(replyLeaseSet);
msg.setMessageExpiration(new Date(expiration));
msg.setMessageExpiration(expiration);
msg.setKey(replyLeaseSet.getDestination().calculateHash());
clove.setPayload(msg);
clove.setRecipientPublicKey(null);

View File

@ -34,7 +34,7 @@ import net.i2p.router.JobImpl;
import net.i2p.router.ReplyJob;
import net.i2p.router.Router;
import net.i2p.router.RouterContext;
import net.i2p.router.TunnelSelectionCriteria;
import net.i2p.router.TunnelInfo;
import net.i2p.router.MessageSelector;
import net.i2p.util.Log;
@ -147,9 +147,16 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
if (_log.shouldLog(Log.DEBUG))
_log.debug(getJobId() + ": Clove built");
long timeoutMs = _overallExpiration - getContext().clock().now();
if (_log.shouldLog(Log.DEBUG))
_log.debug(getJobId() + ": preparing to search for the leaseSet");
Hash key = _to.calculateHash();
SendJob success = new SendJob(getContext());
LookupLeaseSetFailedJob failed = new LookupLeaseSetFailedJob(getContext());
if (_log.shouldLog(Log.DEBUG))
_log.debug(getJobId() + ": Send outbound client message - sending off leaseSet lookup job");
getContext().netDb().lookupLeaseSet(_to.calculateHash(), new SendJob(getContext()), new LookupLeaseSetFailedJob(getContext()), timeoutMs);
getContext().netDb().lookupLeaseSet(key, success, failed, timeoutMs);
if (_log.shouldLog(Log.DEBUG))
_log.debug(getJobId() + ": after sending off leaseSet lookup job");
}
private boolean getShouldBundle() {
@ -281,10 +288,17 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
GarlicMessage msg = OutboundClientMessageJobHelper.createGarlicMessage(getContext(), token,
_overallExpiration, key,
_clove,
_clove, _from.calculateHash(),
_to,
sessKey, tags,
true, replyLeaseSet);
if (msg == null) {
// set to null if there are no tunnels to ack the reply back through
// (should we always fail for this? or should we send it anyway, even if
// we dont receive the reply? hmm...)
dieFatal();
return;
}
if (_log.shouldLog(Log.DEBUG))
_log.debug(getJobId() + ": send() - token expected " + token);
@ -296,21 +310,17 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
if (_log.shouldLog(Log.DEBUG))
_log.debug(getJobId() + ": Placing GarlicMessage into the new tunnel message bound for "
+ _lease.getTunnelId() + " on "
+ _lease.getRouterIdentity().getHash().toBase64());
+ _lease.getGateway().toBase64());
TunnelId outTunnelId = selectOutboundTunnel();
if (outTunnelId != null) {
TunnelInfo outTunnel = selectOutboundTunnel();
if (outTunnel != null) {
if (_log.shouldLog(Log.DEBUG))
_log.debug(getJobId() + ": Sending tunnel message out " + outTunnelId + " to "
_log.debug(getJobId() + ": Sending tunnel message out " + outTunnel.getSendTunnelId(0) + " to "
+ _lease.getTunnelId() + " on "
+ _lease.getRouterIdentity().getHash().toBase64());
SendTunnelMessageJob j = new SendTunnelMessageJob(getContext(), msg, outTunnelId,
_lease.getRouterIdentity().getHash(),
_lease.getTunnelId(), null, onReply,
onFail, selector,
_overallExpiration-getContext().clock().now(),
SEND_PRIORITY);
getContext().jobQueue().addJob(j);
+ _lease.getGateway().toBase64());
// dispatch may take 100+ms, so toss it in its own job
getContext().jobQueue().addJob(new DispatchJob(getContext(), msg, outTunnel, selector, onReply, onFail, (int)(_overallExpiration-getContext().clock().now())));
} else {
if (_log.shouldLog(Log.ERROR))
_log.error(getJobId() + ": Could not find any outbound tunnels to send the payload through... wtf?");
@ -319,21 +329,38 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
_clientMessage = null;
_clove = null;
}
private class DispatchJob extends JobImpl {
private GarlicMessage _msg;
private TunnelInfo _outTunnel;
private ReplySelector _selector;
private SendSuccessJob _replyFound;
private SendTimeoutJob _replyTimeout;
private int _timeoutMs;
public DispatchJob(RouterContext ctx, GarlicMessage msg, TunnelInfo out, ReplySelector sel, SendSuccessJob success, SendTimeoutJob timeout, int timeoutMs) {
super(ctx);
_msg = msg;
_outTunnel = out;
_selector = sel;
_replyFound = success;
_replyTimeout = timeout;
_timeoutMs = timeoutMs;
}
public String getName() { return "Dispatch outbound client message"; }
public void runJob() {
getContext().messageRegistry().registerPending(_selector, _replyFound, _replyTimeout, _timeoutMs);
getContext().tunnelDispatcher().dispatchOutbound(_msg, _outTunnel.getSendTunnelId(0), _lease.getTunnelId(), _lease.getGateway());
}
}
/**
* Pick an arbitrary outbound tunnel to send the message through, or null if
* there aren't any around
*
*/
private TunnelId selectOutboundTunnel() {
TunnelSelectionCriteria crit = new TunnelSelectionCriteria();
crit.setMaximumTunnelsRequired(1);
crit.setMinimumTunnelsRequired(1);
List tunnelIds = getContext().tunnelManager().selectOutboundTunnelIds(crit);
if (tunnelIds.size() <= 0)
return null;
else
return (TunnelId)tunnelIds.get(0);
private TunnelInfo selectOutboundTunnel() {
return getContext().tunnelManager().selectOutboundTunnel(_from.calculateHash());
}
/**
@ -405,13 +432,24 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
private long _pendingToken;
public ReplySelector(long token) {
_pendingToken = token;
if (_log.shouldLog(Log.INFO))
_log.info(OutboundClientMessageOneShotJob.this.getJobId()
+ "Reply selector for client message: token=" + token);
}
public boolean continueMatching() { return false; }
public boolean continueMatching() {
if (_log.shouldLog(Log.DEBUG))
_log.debug(OutboundClientMessageOneShotJob.this.getJobId()
+ "dont continue matching for token=" + _pendingToken);
return false;
}
public long getExpiration() { return _overallExpiration; }
public boolean isMatch(I2NPMessage inMsg) {
if (inMsg.getType() == DeliveryStatusMessage.MESSAGE_TYPE) {
if (_log.shouldLog(Log.INFO))
_log.info(OutboundClientMessageOneShotJob.this.getJobId()
+ "delivery status message received: " + inMsg + " our token: " + _pendingToken);
return _pendingToken == ((DeliveryStatusMessage)inMsg).getMessageId();
} else {
return false;
@ -439,7 +477,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
_tags = tags;
}
public String getName() { return "Send client message successful to a lease"; }
public String getName() { return "Send client message successful"; }
public void runJob() {
if (_finished) return;
_finished = true;
@ -477,7 +515,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
super(enclosingContext);
}
public String getName() { return "Send client message timed out through a lease"; }
public String getName() { return "Send client message timed out"; }
public void runJob() {
if (_log.shouldLog(Log.DEBUG))
_log.debug(OutboundClientMessageOneShotJob.this.getJobId()

View File

@ -80,9 +80,11 @@ public class SendGarlicJob extends JobImpl {
_message = GarlicMessageBuilder.buildMessage(getContext(), _config, _wrappedKey, _wrappedTags);
long after = getContext().clock().now();
if ( (after - before) > 1000) {
_log.warn("Building the garlic took too long [" + (after-before)+" ms]", getAddedBy());
if (_log.shouldLog(Log.WARN))
_log.warn("Building the garlic took too long [" + (after-before)+" ms]", getAddedBy());
} else {
_log.debug("Building the garlic was fast! " + (after - before) + " ms");
if (_log.shouldLog(Log.DEBUG))
_log.debug("Building the garlic was fast! " + (after - before) + " ms");
}
getContext().jobQueue().addJob(new SendJob(getContext()));
}
@ -103,7 +105,7 @@ public class SendGarlicJob extends JobImpl {
private void sendGarlic() {
OutNetMessage msg = new OutNetMessage(getContext());
long when = _message.getMessageExpiration().getTime(); // + Router.CLOCK_FUDGE_FACTOR;
long when = _message.getMessageExpiration(); // + Router.CLOCK_FUDGE_FACTOR;
msg.setExpiration(when);
msg.setMessage(_message);
msg.setOnFailedReplyJob(_onReplyFailed);

View File

@ -13,7 +13,6 @@ import java.util.Date;
import net.i2p.data.Hash;
import net.i2p.data.RouterInfo;
import net.i2p.data.i2np.I2NPMessage;
import net.i2p.router.InNetMessage;
import net.i2p.router.Job;
import net.i2p.router.JobImpl;
import net.i2p.router.MessageSelector;
@ -141,10 +140,7 @@ public class SendMessageDirectJob extends JobImpl {
if (_onSend != null)
getContext().jobQueue().addJob(_onSend);
InNetMessage msg = new InNetMessage(getContext());
msg.setFromRouter(_router.getIdentity());
msg.setMessage(_message);
getContext().inNetMessagePool().add(msg);
getContext().inNetMessagePool().add(_message, _router.getIdentity(), null);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Adding " + _message.getClass().getName()

View File

@ -25,13 +25,12 @@ import net.i2p.data.i2np.DatabaseLookupMessage;
import net.i2p.data.i2np.DatabaseSearchReplyMessage;
import net.i2p.data.i2np.DatabaseStoreMessage;
import net.i2p.data.i2np.I2NPMessage;
import net.i2p.data.i2np.TunnelMessage;
import net.i2p.data.i2np.TunnelGatewayMessage;
import net.i2p.router.Job;
import net.i2p.router.JobImpl;
import net.i2p.router.RouterContext;
import net.i2p.router.TunnelInfo;
import net.i2p.router.message.SendMessageDirectJob;
import net.i2p.router.message.SendTunnelMessageJob;
import net.i2p.util.Log;
/**
@ -158,17 +157,21 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
}
private void sendThroughTunnel(I2NPMessage message, Hash toPeer, TunnelId replyTunnel) {
TunnelInfo info = getContext().tunnelManager().getTunnelInfo(replyTunnel);
// the sendTunnelMessageJob can't handle injecting into the tunnel anywhere but the beginning
// (and if we are the beginning, we have the signing key)
if ( (info == null) || (info.getSigningKey() != null)) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Sending reply through " + replyTunnel + " on " + toPeer);
getContext().jobQueue().addJob(new SendTunnelMessageJob(getContext(), message, replyTunnel, toPeer, null, null, null, null, null, REPLY_TIMEOUT, MESSAGE_PRIORITY));
if (getContext().routerHash().equals(toPeer)) {
// if we are the gateway, act as if we received it
TunnelGatewayMessage m = new TunnelGatewayMessage(getContext());
m.setMessage(message);
m.setTunnelId(replyTunnel);
m.setMessageExpiration(message.getMessageExpiration());
getContext().tunnelDispatcher().dispatch(m);
} else {
// its a tunnel we're participating in, but we're NOT the gateway, so
sendToGateway(message, toPeer, replyTunnel, info);
// if we aren't the gateway, forward it on
TunnelGatewayMessage m = new TunnelGatewayMessage(getContext());
m.setMessage(message);
m.setMessageExpiration(message.getMessageExpiration());
m.setTunnelId(replyTunnel);
SendMessageDirectJob j = new SendMessageDirectJob(getContext(), m, toPeer, 10*1000, 100);
getContext().jobQueue().addJob(j);
}
}
@ -184,14 +187,14 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
long expiration = REPLY_TIMEOUT + getContext().clock().now();
TunnelMessage msg = new TunnelMessage(getContext());
msg.setData(message.toByteArray());
TunnelGatewayMessage msg = new TunnelGatewayMessage(getContext());
msg.setMessage(message);
msg.setTunnelId(replyTunnel);
msg.setMessageExpiration(new Date(expiration));
msg.setMessageExpiration(expiration);
getContext().jobQueue().addJob(new SendMessageDirectJob(getContext(), msg, toPeer, null, null, null, null, REPLY_TIMEOUT, MESSAGE_PRIORITY));
String bodyType = message.getClass().getName();
getContext().messageHistory().wrap(bodyType, message.getUniqueId(), TunnelMessage.class.getName(), msg.getUniqueId());
getContext().messageHistory().wrap(bodyType, message.getUniqueId(), TunnelGatewayMessage.class.getName(), msg.getUniqueId());
}
public String getName() { return "Handle Database Lookup Message"; }

View File

@ -18,8 +18,7 @@ import net.i2p.data.i2np.DatabaseStoreMessage;
import net.i2p.data.i2np.DeliveryStatusMessage;
import net.i2p.router.JobImpl;
import net.i2p.router.RouterContext;
import net.i2p.router.TunnelSelectionCriteria;
import net.i2p.router.message.SendTunnelMessageJob;
import net.i2p.router.TunnelInfo;
import net.i2p.util.Log;
/**
@ -32,7 +31,7 @@ public class HandleDatabaseStoreMessageJob extends JobImpl {
private RouterIdentity _from;
private Hash _fromHash;
private static final long ACK_TIMEOUT = 15*1000;
private static final int ACK_TIMEOUT = 15*1000;
private static final int ACK_PRIORITY = 100;
public HandleDatabaseStoreMessageJob(RouterContext ctx, DatabaseStoreMessage receivedMessage, RouterIdentity from, Hash fromHash) {
@ -93,33 +92,19 @@ public class HandleDatabaseStoreMessageJob extends JobImpl {
private void sendAck() {
DeliveryStatusMessage msg = new DeliveryStatusMessage(getContext());
msg.setMessageId(_message.getReplyToken());
msg.setArrival(new Date(getContext().clock().now()));
TunnelId outTunnelId = selectOutboundTunnel();
if (outTunnelId == null) {
msg.setArrival(getContext().clock().now());
TunnelInfo outTunnel = selectOutboundTunnel();
if (outTunnel == null) {
if (_log.shouldLog(Log.WARN))
_log.warn("No outbound tunnel could be found");
return;
} else {
getContext().jobQueue().addJob(new SendTunnelMessageJob(getContext(), msg, outTunnelId,
_message.getReplyGateway(), _message.getReplyTunnel(),
null, null, null, null, ACK_TIMEOUT, ACK_PRIORITY));
getContext().tunnelDispatcher().dispatchOutbound(msg, outTunnel.getSendTunnelId(0), _message.getReplyTunnel(), _message.getReplyGateway());
}
}
private TunnelId selectOutboundTunnel() {
TunnelSelectionCriteria criteria = new TunnelSelectionCriteria();
criteria.setAnonymityPriority(80);
criteria.setLatencyPriority(50);
criteria.setReliabilityPriority(20);
criteria.setMaximumTunnelsRequired(1);
criteria.setMinimumTunnelsRequired(1);
List tunnelIds = getContext().tunnelManager().selectOutboundTunnelIds(criteria);
if (tunnelIds.size() <= 0) {
_log.error("No outbound tunnels?!");
return null;
} else {
return (TunnelId)tunnelIds.get(0);
}
private TunnelInfo selectOutboundTunnel() {
return getContext().tunnelManager().selectOutboundTunnel();
}
public String getName() { return "Handle Database Store Message"; }

View File

@ -16,6 +16,7 @@ import net.i2p.data.RouterInfo;
import net.i2p.data.SigningPrivateKey;
import net.i2p.router.JobImpl;
import net.i2p.router.RouterContext;
import net.i2p.router.Router;
import net.i2p.util.Log;
/**
@ -38,6 +39,7 @@ public class PublishLocalRouterInfoJob extends JobImpl {
_log.debug("Old routerInfo contains " + ri.getAddresses().size()
+ " addresses and " + ri.getOptions().size() + " options");
Properties stats = getContext().statPublisher().publishStatistics();
stats.setProperty(RouterInfo.PROP_NETWORK_ID, ""+Router.NETWORK_ID);
try {
ri.setPublished(getContext().clock().now());
ri.setOptions(stats);

View File

@ -72,7 +72,7 @@ class ExploreJob extends SearchJob {
msg.setSearchKey(getState().getTarget());
msg.setFrom(replyGateway.getIdentity().getHash());
msg.setDontIncludePeers(getState().getAttempted());
msg.setMessageExpiration(new Date(expiration));
msg.setMessageExpiration(expiration);
msg.setReplyTunnel(replyTunnelId);
Set attempted = getState().getAttempted();

View File

@ -110,7 +110,7 @@ class HarvesterJob extends JobImpl {
long now = getContext().clock().now();
DatabaseLookupMessage msg = new DatabaseLookupMessage(getContext(), true);
msg.setFrom(getContext().routerHash());
msg.setMessageExpiration(new Date(10*1000+now));
msg.setMessageExpiration(10*1000+now);
msg.setSearchKey(peer);
msg.setReplyTunnel(null);
SendMessageDirectJob job = new SendMessageDirectJob(getContext(), msg, peer, 10*1000, PRIORITY);

View File

@ -75,9 +75,9 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
private Set _publishingLeaseSets;
/**
* Hash of the key currently being searched for, pointing at a List of
* DeferredSearchJob elements for each additional request waiting for that
* search to complete.
* Hash of the key currently being searched for, pointing the SearchJob that
* is currently operating. Subsequent requests for that same key are simply
* added on to the list of jobs fired on success/failure
*
*/
private Map _activeRequests;
@ -87,72 +87,14 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
*
*/
void searchComplete(Hash key) {
List deferred = null;
if (_log.shouldLog(Log.DEBUG))
_log.debug("search Complete: " + key);
SearchJob removed = null;
synchronized (_activeRequests) {
deferred = (List)_activeRequests.remove(key);
}
if (deferred != null) {
for (int i = 0; i < deferred.size(); i++) {
DeferredSearchJob j = (DeferredSearchJob)deferred.get(i);
_context.jobQueue().addJob(j);
}
removed = (SearchJob)_activeRequests.remove(key);
}
}
/**
* We want to search for a given key, but since there is already a job
* out searching for it, we can just sit back and wait for them to finish.
* Perhaps we should also queue up a 'wakeup' job, in case that already
* active search won't expire/complete until after we time out? Though in
* practice, pretty much all of the searches are the same duration...
*
* Anyway, this job is fired when that already active search completes -
* successfully or not - and either fires off the success task (or the fail
* task if we have expired), or it runs up its own search.
*
*/
private class DeferredSearchJob extends JobImpl {
private Hash _key;
private Job _onFind;
private Job _onFailed;
private long _expiration;
private boolean _isLease;
public DeferredSearchJob(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs, boolean isLease) {
super(KademliaNetworkDatabaseFacade.this._context);
_key = key;
_onFind = onFindJob;
_onFailed = onFailedLookupJob;
_isLease = isLease;
_expiration = getContext().clock().now() + timeoutMs;
}
public String getName() { return "Execute deferred search"; }
public void runJob() {
long remaining = getContext().clock().now() - _expiration;
if (remaining <= 0) {
if (_log.shouldLog(Log.WARN))
_log.warn("Deferred search for " + _key.toBase64() + " expired prior to sending");
if (_onFailed != null)
getContext().jobQueue().addJob(_onFailed);
} else {
// ok, didn't time out - either we have the key or we can search
// for it
LeaseSet ls = lookupLeaseSetLocally(_key);
if (ls == null) {
RouterInfo ri = lookupRouterInfoLocally(_key);
if (ri == null) {
search(_key, _onFind, _onFailed, remaining, _isLease);
} else {
if (_onFind != null)
getContext().jobQueue().addJob(_onFind);
}
} else {
if (_onFind != null)
getContext().jobQueue().addJob(_onFind);
}
}
}
}
/**
@ -165,6 +107,10 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
/** don't probe or broadcast data, just respond and search when explicitly needed */
private boolean _quiet = false;
public static final String PROP_ENFORCE_NETID = "router.networkDatabase.enforceNetId";
private static final boolean DEFAULT_ENFORCE_NETID = false;
private boolean _enforceNetId = DEFAULT_ENFORCE_NETID;
public final static String PROP_DB_DIR = "router.networkDatabase.dbDir";
public final static String DEFAULT_DB_DIR = "netDb";
@ -185,6 +131,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
_publishingLeaseSets = new HashSet(8);
_lastExploreNew = 0;
_activeRequests = new HashMap(8);
_enforceNetId = DEFAULT_ENFORCE_NETID;
}
KBucketSet getKBuckets() { return _kb; }
@ -280,6 +227,11 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
_log.info("No DB dir specified [" + PROP_DB_DIR + "], using [" + DEFAULT_DB_DIR + "]");
_dbDir = DEFAULT_DB_DIR;
}
String enforce = _context.getProperty(PROP_ENFORCE_NETID);
if (enforce != null)
_enforceNetId = Boolean.valueOf(enforce).booleanValue();
else
_enforceNetId = DEFAULT_ENFORCE_NETID;
_ds.restart();
synchronized (_explicitSendKeys) { _explicitSendKeys.clear(); }
synchronized (_exploreKeys) { _exploreKeys.clear(); }
@ -301,6 +253,11 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
_log.info("No DB dir specified [" + PROP_DB_DIR + "], using [" + DEFAULT_DB_DIR + "]");
dbDir = DEFAULT_DB_DIR;
}
String enforce = _context.getProperty(PROP_ENFORCE_NETID);
if (enforce != null)
_enforceNetId = Boolean.valueOf(enforce).booleanValue();
else
_enforceNetId = DEFAULT_ENFORCE_NETID;
_kb = new KBucketSet(_context, ri.getIdentity().getHash());
_ds = new PersistentDataStore(_context, dbDir, this);
@ -406,11 +363,17 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
if (!_initialized) return;
LeaseSet ls = lookupLeaseSetLocally(key);
if (ls != null) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("leaseSet found locally, firing " + onFindJob);
if (onFindJob != null)
_context.jobQueue().addJob(onFindJob);
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("leaseSet not found locally, running search");
search(key, onFindJob, onFailedLookupJob, timeoutMs, true);
}
if (_log.shouldLog(Log.DEBUG))
_log.debug("after lookupLeaseSet");
}
public LeaseSet lookupLeaseSetLocally(Hash key) {
@ -647,6 +610,10 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
_log.warn("Peer " + key.toBase64() + " published their routerInfo in the future?! ["
+ new Date(routerInfo.getPublished()) + "]", new Exception("Rejecting store"));
return "Peer " + key.toBase64() + " published " + DataHelper.formatDuration(age) + " in the future?!";
} else if (_enforceNetId && (routerInfo.getNetworkId() != Router.NETWORK_ID) ){
String rv = "Peer " + key.toBase64() + " is from another network, not accepting it (id="
+ routerInfo.getNetworkId() + ", want " + Router.NETWORK_ID + ")";
return rv;
}
return null;
}
@ -764,28 +731,28 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
* without any match)
*
*/
private void search(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs, boolean isLease) {
void search(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs, boolean isLease) {
if (!_initialized) return;
int pendingRequests = 0;
boolean allowSearch = false;
boolean isNew = true;
SearchJob searchJob = null;
synchronized (_activeRequests) {
List pending = (List)_activeRequests.get(key);
if (pending == null) {
_activeRequests.put(key, new ArrayList(0));
allowSearch = true;
searchJob = (SearchJob)_activeRequests.get(key);
if (searchJob == null) {
searchJob = new SearchJob(_context, this, key, onFindJob, onFailedLookupJob,
timeoutMs, true, isLease);
_activeRequests.put(key, searchJob);
} else {
pending.add(new DeferredSearchJob(key, onFindJob, onFailedLookupJob, timeoutMs, isLease));
pendingRequests = pending.size();
allowSearch = false;
isNew = false;
}
}
if (allowSearch) {
_context.jobQueue().addJob(new SearchJob(_context, this, key, onFindJob, onFailedLookupJob,
timeoutMs, true, isLease));
if (isNew) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("this is the first search for that key, fire off the SearchJob");
_context.jobQueue().addJob(searchJob);
} else {
if (_log.shouldLog(Log.INFO))
_log.info("Deferring search for " + key.toBase64() + ": there are " + pendingRequests
+ " other concurrent requests for it");
_log.info("Deferring search for " + key.toBase64() + " with " + onFindJob);
searchJob.addDeferred(onFindJob, onFailedLookupJob, timeoutMs, isLease);
}
}
@ -839,7 +806,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
buf.append("Earliest expiration date was: <i>").append(DataHelper.formatDuration(0-exp)).append(" ago</i><br />\n");
for (int i = 0; i < ls.getLeaseCount(); i++) {
buf.append("Lease ").append(i).append(": gateway <i>");
buf.append(ls.getLease(i).getRouterIdentity().getHash().toBase64().substring(0,6));
buf.append(ls.getLease(i).getGateway().toBase64().substring(0,6));
buf.append("</i> tunnelId <i>").append(ls.getLease(i).getTunnelId().getTunnelId()).append("</i><br />\n");
}
buf.append("<hr />\n");

View File

@ -43,7 +43,7 @@ public class RepublishLeaseSetJob extends JobImpl {
if (!ls.isCurrent(Router.CLOCK_FUDGE_FACTOR)) {
_log.warn("Not publishing a LOCAL lease that isn't current - " + _dest, new Exception("Publish expired LOCAL lease?"));
} else {
getContext().jobQueue().addJob(new StoreJob(getContext(), _facade, _dest, ls, null, null, REPUBLISH_LEASESET_DELAY));
getContext().jobQueue().addJob(new StoreJob(getContext(), _facade, _dest, ls, new OnSuccess(getContext()), new OnFailure(getContext()), REPUBLISH_LEASESET_DELAY));
}
} else {
_log.warn("Client " + _dest + " is local, but we can't find a valid LeaseSet? perhaps its being rebuilt?");
@ -60,4 +60,21 @@ public class RepublishLeaseSetJob extends JobImpl {
throw re;
}
}
private class OnSuccess extends JobImpl {
public OnSuccess(RouterContext ctx) { super(ctx); }
public String getName() { return "Publish leaseSet successful"; }
public void runJob() {
if (_log.shouldLog(Log.DEBUG))
_log.debug("successful publishing of the leaseSet for " + _dest.toBase64());
}
}
private class OnFailure extends JobImpl {
public OnFailure(RouterContext ctx) { super(ctx); }
public String getName() { return "Publish leaseSet failed"; }
public void runJob() {
if (_log.shouldLog(Log.ERROR))
_log.error("FAILED publishing of the leaseSet for " + _dest.toBase64());
}
}
}

View File

@ -8,11 +8,13 @@ package net.i2p.router.networkdb.kademlia;
*
*/
import java.util.ArrayList;
import java.util.Date;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import net.i2p.data.DataHelper;
import net.i2p.data.DataStructure;
import net.i2p.data.Hash;
import net.i2p.data.RouterInfo;
@ -25,7 +27,6 @@ import net.i2p.router.RouterContext;
import net.i2p.router.TunnelInfo;
import net.i2p.router.TunnelSelectionCriteria;
import net.i2p.router.message.SendMessageDirectJob;
import net.i2p.router.message.SendTunnelMessageJob;
import net.i2p.router.peermanager.PeerProfile;
import net.i2p.util.Log;
@ -46,6 +47,9 @@ class SearchJob extends JobImpl {
private boolean _isLease;
private Job _pendingRequeueJob;
private PeerSelector _peerSelector;
private List _deferredSearches;
private boolean _deferredCleared;
private long _startedOn;
private static final int SEARCH_BREDTH = 3; // 3 peers at a time
private static final int SEARCH_PRIORITY = 400; // large because the search is probably for a real search
@ -80,7 +84,10 @@ class SearchJob extends JobImpl {
_timeoutMs = timeoutMs;
_keepStats = keepStats;
_isLease = isLease;
_deferredSearches = new ArrayList(0);
_deferredCleared = false;
_peerSelector = new PeerSelector(getContext());
_startedOn = -1;
_expiration = getContext().clock().now() + timeoutMs;
getContext().statManager().createRateStat("netDb.successTime", "How long a successful search takes", "NetworkDatabase", new long[] { 60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.failedTime", "How long a failed search takes", "NetworkDatabase", new long[] { 60*60*1000l, 24*60*60*1000l });
@ -96,12 +103,13 @@ class SearchJob extends JobImpl {
}
public void runJob() {
_startedOn = getContext().clock().now();
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": Searching for " + _state.getTarget()); // , getAddedBy());
getContext().statManager().addRateData("netDb.searchCount", 1, 0);
searchNext();
}
protected SearchState getState() { return _state; }
protected KademliaNetworkDatabaseFacade getFacade() { return _facade; }
protected long getExpiration() { return _expiration; }
@ -276,15 +284,15 @@ class SearchJob extends JobImpl {
*
*/
protected void sendLeaseSearch(RouterInfo router) {
TunnelId inTunnelId = getInboundTunnelId();
if (inTunnelId == null) {
TunnelInfo inTunnel = getInboundTunnelId();
if (inTunnel == null) {
_log.error("No tunnels to get search replies through! wtf!");
getContext().jobQueue().addJob(new FailedJob(getContext(), router));
return;
}
TunnelInfo inTunnel = getContext().tunnelManager().getTunnelInfo(inTunnelId);
RouterInfo inGateway = getContext().netDb().lookupRouterInfoLocally(inTunnel.getThisHop());
TunnelId inTunnelId = inTunnel.getReceiveTunnelId(0);
RouterInfo inGateway = getContext().netDb().lookupRouterInfoLocally(inTunnel.getPeer(0));
if (inGateway == null) {
_log.error("We can't find the gateway to our inbound tunnel?! wtf");
getContext().jobQueue().addJob(new FailedJob(getContext(), router));
@ -295,12 +303,14 @@ class SearchJob extends JobImpl {
DatabaseLookupMessage msg = buildMessage(inTunnelId, inGateway, expiration);
TunnelId outTunnelId = getOutboundTunnelId();
if (outTunnelId == null) {
TunnelInfo outTunnel = getOutboundTunnelId();
if (outTunnel == null) {
_log.error("No tunnels to send search out through! wtf!");
getContext().jobQueue().addJob(new FailedJob(getContext(), router));
return;
}
}
TunnelId outTunnelId = outTunnel.getSendTunnelId(0);
if (_log.shouldLog(Log.DEBUG))
_log.debug(getJobId() + ": Sending leaseSet search to " + router.getIdentity().getHash().toBase64()
@ -310,10 +320,9 @@ class SearchJob extends JobImpl {
SearchMessageSelector sel = new SearchMessageSelector(getContext(), router, _expiration, _state);
SearchUpdateReplyFoundJob reply = new SearchUpdateReplyFoundJob(getContext(), router, _state, _facade, this);
SendTunnelMessageJob j = new SendTunnelMessageJob(getContext(), msg, outTunnelId, router.getIdentity().getHash(),
null, null, reply, new FailedJob(getContext(), router), sel,
PER_PEER_TIMEOUT, SEARCH_PRIORITY);
getContext().jobQueue().addJob(j);
getContext().messageRegistry().registerPending(sel, reply, new FailedJob(getContext(), router), PER_PEER_TIMEOUT);
getContext().tunnelDispatcher().dispatchOutbound(msg, outTunnelId, router.getIdentity().getHash());
}
/** we're searching for a router, so we can just send direct */
@ -338,16 +347,8 @@ class SearchJob extends JobImpl {
*
* @return tunnel id (or null if none are found)
*/
private TunnelId getOutboundTunnelId() {
TunnelSelectionCriteria crit = new TunnelSelectionCriteria();
crit.setMaximumTunnelsRequired(1);
crit.setMinimumTunnelsRequired(1);
List tunnelIds = getContext().tunnelManager().selectOutboundTunnelIds(crit);
if (tunnelIds.size() <= 0) {
return null;
}
return (TunnelId)tunnelIds.get(0);
private TunnelInfo getOutboundTunnelId() {
return getContext().tunnelManager().selectOutboundTunnel();
}
/**
@ -355,15 +356,8 @@ class SearchJob extends JobImpl {
*
* @return tunnel id (or null if none are found)
*/
private TunnelId getInboundTunnelId() {
TunnelSelectionCriteria crit = new TunnelSelectionCriteria();
crit.setMaximumTunnelsRequired(1);
crit.setMinimumTunnelsRequired(1);
List tunnelIds = getContext().tunnelManager().selectInboundTunnelIds(crit);
if (tunnelIds.size() <= 0) {
return null;
}
return (TunnelId)tunnelIds.get(0);
private TunnelInfo getInboundTunnelId() {
return getContext().tunnelManager().selectInboundTunnel();
}
/**
@ -378,7 +372,7 @@ class SearchJob extends JobImpl {
msg.setSearchKey(_state.getTarget());
msg.setFrom(replyGateway.getIdentity().getHash());
msg.setDontIncludePeers(_state.getAttempted());
msg.setMessageExpiration(new Date(expiration));
msg.setMessageExpiration(expiration);
msg.setReplyTunnel(replyTunnelId);
return msg;
}
@ -393,7 +387,7 @@ class SearchJob extends JobImpl {
msg.setSearchKey(_state.getTarget());
msg.setFrom(getContext().routerHash());
msg.setDontIncludePeers(_state.getAttempted());
msg.setMessageExpiration(new Date(expiration));
msg.setMessageExpiration(expiration);
msg.setReplyTunnel(null);
return msg;
}
@ -583,6 +577,8 @@ class SearchJob extends JobImpl {
_facade.searchComplete(_state.getTarget());
handleDeferred(true);
resend();
}
@ -605,6 +601,13 @@ class SearchJob extends JobImpl {
* Search totally failed
*/
protected void fail() {
if (isLocal()) {
if (_log.shouldLog(Log.ERROR))
_log.error(getJobId() + ": why did we fail if the target is local?: " + _state.getTarget().toBase64(), new Exception("failure cause"));
succeed();
return;
}
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": Failed search for key " + _state.getTarget());
if (_log.shouldLog(Log.DEBUG))
@ -613,13 +616,81 @@ class SearchJob extends JobImpl {
if (_keepStats) {
long time = getContext().clock().now() - _state.getWhenStarted();
getContext().statManager().addRateData("netDb.failedTime", time, 0);
_facade.fail(_state.getTarget());
//_facade.fail(_state.getTarget());
}
if (_onFailure != null)
getContext().jobQueue().addJob(_onFailure);
_facade.searchComplete(_state.getTarget());
handleDeferred(false);
}
public void addDeferred(Job onFind, Job onFail, long expiration, boolean isLease) {
Search search = new Search(onFind, onFail, expiration, isLease);
boolean ok = true;
synchronized (_deferredSearches) {
if (_deferredCleared)
ok = false;
else
_deferredSearches.add(search);
}
if (!ok) {
// race between adding deferred and search completing
if (_log.shouldLog(Log.WARN))
_log.warn("Race deferred before searchCompleting? our onFind=" + _onSuccess + " new one: " + onFind);
// the following /shouldn't/ be necessary, but it doesnt hurt
_facade.searchComplete(_state.getTarget());
_facade.search(_state.getTarget(), onFind, onFail, expiration - getContext().clock().now(), isLease);
}
}
private void handleDeferred(boolean success) {
List deferred = null;
synchronized (_deferredSearches) {
if (_deferredSearches.size() > 0) {
deferred = new ArrayList(_deferredSearches);
_deferredSearches.clear();
}
_deferredCleared = true;
}
if (deferred != null) {
long now = getContext().clock().now();
for (int i = 0; i < deferred.size(); i++) {
Search cur = (Search)deferred.get(i);
if (cur.getExpiration() < now)
getContext().jobQueue().addJob(cur.getOnFail());
else if (success)
getContext().jobQueue().addJob(cur.getOnFind());
else // failed search, not yet expired, but it took too long to reasonably continue
getContext().jobQueue().addJob(cur.getOnFail());
}
}
}
private class Search {
private Job _onFind;
private Job _onFail;
private long _expiration;
private boolean _isLease;
public Search(Job onFind, Job onFail, long expiration, boolean isLease) {
_onFind = onFind;
_onFail = onFail;
_expiration = expiration;
_isLease = isLease;
}
public Job getOnFind() { return _onFind; }
public Job getOnFail() { return _onFail; }
public long getExpiration() { return _expiration; }
public boolean getIsLease() { return _isLease; }
}
public String getName() { return "Kademlia NetDb Search"; }
public String toString() {
return super.toString() + " started "
+ DataHelper.formatDuration((getContext().clock().now() - _startedOn)) + " ago";
}
}

View File

@ -42,14 +42,22 @@ class SearchMessageSelector implements MessageSelector {
}
public boolean continueMatching() {
boolean expired = _context.clock().now() > _exp;
if (expired) return false;
// so we dont drop outstanding replies after receiving the value
// > 1 to account for the 'current' match
if (_state.getPending().size() > 1)
return true;
if (_found) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("[" + _id + "] Dont continue matching! looking for a reply from "
+ _peer + " with regards to " + _state.getTarget());
return false;
} else {
return true;
}
long now = _context.clock().now();
return now < _exp;
}
public long getExpiration() { return _exp; }
public boolean isMatch(I2NPMessage message) {

View File

@ -25,8 +25,6 @@ import net.i2p.router.JobImpl;
import net.i2p.router.ReplyJob;
import net.i2p.router.RouterContext;
import net.i2p.router.TunnelInfo;
import net.i2p.router.TunnelSelectionCriteria;
import net.i2p.router.message.SendTunnelMessageJob;
import net.i2p.util.Log;
class StoreJob extends JobImpl {
@ -54,7 +52,7 @@ class StoreJob extends JobImpl {
private final static int STORE_PRIORITY = 100;
/** how long we allow for an ACK to take after a store */
private final static long STORE_TIMEOUT_MS = 10*1000;
private final static int STORE_TIMEOUT_MS = 10*1000;
/**
* Create a new search for the routingKey specified
@ -189,7 +187,7 @@ class StoreJob extends JobImpl {
msg.setLeaseSet((LeaseSet)_state.getData());
else
throw new IllegalArgumentException("Storing an unknown data type! " + _state.getData());
msg.setMessageExpiration(new Date(getContext().clock().now() + _timeoutMs));
msg.setMessageExpiration(getContext().clock().now() + _timeoutMs);
if (router.getIdentity().equals(getContext().router().getRouterInfo().getIdentity())) {
// don't send it to ourselves
@ -212,19 +210,19 @@ class StoreJob extends JobImpl {
private void sendStoreThroughGarlic(DatabaseStoreMessage msg, RouterInfo peer, long expiration) {
long token = getContext().random().nextLong(I2NPMessage.MAX_ID_VALUE);
TunnelId replyTunnelId = selectInboundTunnel();
if (replyTunnelId == null) {
TunnelInfo replyTunnel = selectInboundTunnel();
if (replyTunnel == null) {
_log.error("No reply inbound tunnels available!");
return;
}
TunnelInfo replyTunnel = getContext().tunnelManager().getTunnelInfo(replyTunnelId);
TunnelId replyTunnelId = replyTunnel.getReceiveTunnelId(0);
if (replyTunnel == null) {
_log.error("No reply inbound tunnels available!");
return;
}
msg.setReplyToken(token);
msg.setReplyTunnel(replyTunnelId);
msg.setReplyGateway(replyTunnel.getThisHop());
msg.setReplyGateway(replyTunnel.getPeer(0));
if (_log.shouldLog(Log.DEBUG))
_log.debug(getJobId() + ": send(dbStore) w/ token expected " + token);
@ -235,19 +233,18 @@ class StoreJob extends JobImpl {
FailedJob onFail = new FailedJob(getContext(), peer);
StoreMessageSelector selector = new StoreMessageSelector(getContext(), getJobId(), peer, token, expiration);
TunnelId outTunnelId = selectOutboundTunnel();
if (outTunnelId != null) {
TunnelInfo outTunnel = selectOutboundTunnel();
if (outTunnel != null) {
//if (_log.shouldLog(Log.DEBUG))
// _log.debug(getJobId() + ": Sending tunnel message out " + outTunnelId + " to "
// + peer.getIdentity().getHash().toBase64());
TunnelId targetTunnelId = null; // not needed
Job onSend = null; // not wanted
SendTunnelMessageJob j = new SendTunnelMessageJob(getContext(), msg, outTunnelId,
peer.getIdentity().getHash(),
targetTunnelId, onSend, onReply,
onFail, selector, STORE_TIMEOUT_MS,
STORE_PRIORITY);
getContext().jobQueue().addJob(j);
if (_log.shouldLog(Log.DEBUG))
_log.debug("sending store to " + peer.getIdentity().getHash() + " through " + outTunnel + ": " + msg);
getContext().messageRegistry().registerPending(selector, onReply, onFail, STORE_TIMEOUT_MS);
getContext().tunnelDispatcher().dispatchOutbound(msg, outTunnel.getSendTunnelId(0), null, peer.getIdentity().getHash());
} else {
if (_log.shouldLog(Log.ERROR))
_log.error("No outbound tunnels to send a dbStore out!");
@ -255,36 +252,12 @@ class StoreJob extends JobImpl {
}
}
private TunnelId selectOutboundTunnel() {
TunnelSelectionCriteria criteria = new TunnelSelectionCriteria();
criteria.setAnonymityPriority(80);
criteria.setLatencyPriority(50);
criteria.setReliabilityPriority(20);
criteria.setMaximumTunnelsRequired(1);
criteria.setMinimumTunnelsRequired(1);
List tunnelIds = getContext().tunnelManager().selectOutboundTunnelIds(criteria);
if (tunnelIds.size() <= 0) {
_log.error("No outbound tunnels?!");
return null;
} else {
return (TunnelId)tunnelIds.get(0);
}
private TunnelInfo selectOutboundTunnel() {
return getContext().tunnelManager().selectOutboundTunnel();
}
private TunnelId selectInboundTunnel() {
TunnelSelectionCriteria criteria = new TunnelSelectionCriteria();
criteria.setAnonymityPriority(80);
criteria.setLatencyPriority(50);
criteria.setReliabilityPriority(20);
criteria.setMaximumTunnelsRequired(1);
criteria.setMinimumTunnelsRequired(1);
List tunnelIds = getContext().tunnelManager().selectInboundTunnelIds(criteria);
if (tunnelIds.size() <= 0) {
_log.error("No inbound tunnels?!");
return null;
} else {
return (TunnelId)tunnelIds.get(0);
}
private TunnelInfo selectInboundTunnel() {
return getContext().tunnelManager().selectInboundTunnel();
}
/**
@ -335,7 +308,8 @@ class StoreJob extends JobImpl {
}
public void runJob() {
if (_log.shouldLog(Log.WARN))
_log.warn(StoreJob.this.getJobId() + ": Peer " + _peer.getIdentity().getHash().toBase64() + " timed out");
_log.warn(StoreJob.this.getJobId() + ": Peer " + _peer.getIdentity().getHash().toBase64()
+ " timed out sending " + _state.getTarget());
_state.replyTimeout(_peer.getIdentity().getHash());
getContext().profileManager().dbStoreFailed(_peer.getIdentity().getHash());
@ -362,8 +336,8 @@ class StoreJob extends JobImpl {
* Send totally failed
*/
private void fail() {
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": Failed sending key " + _state.getTarget());
if (_log.shouldLog(Log.WARN))
_log.warn(getJobId() + ": Failed sending key " + _state.getTarget());
if (_log.shouldLog(Log.DEBUG))
_log.debug(getJobId() + ": State of failed send: " + _state, new Exception("Who failed me?"));
if (_onFailure != null)

View File

@ -67,7 +67,8 @@ class TransientDataStore implements DataStore {
public void put(Hash key, DataStructure data) {
if (data == null) return;
_log.debug("Storing key " + key);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Storing key " + key);
Object old = null;
synchronized (_data) {
old = _data.put(key, data);

View File

@ -66,6 +66,17 @@ public class CapacityCalculator extends Calculator {
if (tooOld(profile))
capacity = 1;
// now take into account non-rejection tunnel rejections (which haven't
// incremented the rejection counter, since they were only temporary)
long now = _context.clock().now();
if (profile.getTunnelHistory().getLastRejectedTransient() > now - 5*60*1000)
capacity = 1;
else if (profile.getTunnelHistory().getLastRejectedProbabalistic() > now - 5*60*1000)
capacity -= _context.random().nextInt(5);
if (capacity < 0)
capacity = 0;
capacity += profile.getReliabilityBonus();
return capacity;
}

View File

@ -71,6 +71,11 @@ public class IsFailingCalculator extends Calculator {
// return true;
//}
// if they have rejected us saying they're totally broken anytime in the last
// 10 minutes, dont bother 'em
if (profile.getTunnelHistory().getLastRejectedCritical() > _context.clock().now() - 10*60*1000)
return true;
return false;
}
}

View File

@ -57,7 +57,8 @@ class PeerManager {
if (peer == null) return;
PeerProfile prof = _organizer.getProfile(peer);
if (prof == null) return;
_persistenceHelper.writeProfile(prof);
if (true)
_persistenceHelper.writeProfile(prof);
}
void loadProfiles() {
Set profiles = _persistenceHelper.readProfiles();
@ -83,7 +84,7 @@ class PeerManager {
case PeerSelectionCriteria.PURPOSE_TEST:
// for now, the peers we test will be the reliable ones
//_organizer.selectWellIntegratedPeers(criteria.getMinimumRequired(), exclude, curVals);
_organizer.selectHighCapacityPeers(criteria.getMinimumRequired(), exclude, peers);
_organizer.selectNotFailingPeers(criteria.getMinimumRequired(), exclude, peers);
break;
case PeerSelectionCriteria.PURPOSE_TUNNEL:
// pull all of the fast ones, regardless of how many we

View File

@ -18,8 +18,6 @@ import net.i2p.router.PeerSelectionCriteria;
import net.i2p.router.ReplyJob;
import net.i2p.router.RouterContext;
import net.i2p.router.TunnelInfo;
import net.i2p.router.TunnelSelectionCriteria;
import net.i2p.router.message.SendTunnelMessageJob;
import net.i2p.util.Log;
/**
@ -46,7 +44,7 @@ public class PeerTestJob extends JobImpl {
/** how long should we wait before firing off new tests? */
private long getPeerTestDelay() { return DEFAULT_PEER_TEST_DELAY; }
/** how long to give each peer before marking them as unresponsive? */
private long getTestTimeout() { return 30*1000; }
private int getTestTimeout() { return 30*1000; }
/** number of peers to test each round */
private int getTestConcurrency() { return 2; }
@ -113,45 +111,44 @@ public class PeerTestJob extends JobImpl {
*
*/
private void testPeer(RouterInfo peer) {
TunnelId inTunnelId = getInboundTunnelId();
if (inTunnelId == null) {
TunnelInfo inTunnel = getInboundTunnelId();
if (inTunnel == null) {
_log.error("No tunnels to get peer test replies through! wtf!");
return;
}
TunnelId inTunnelId = inTunnel.getReceiveTunnelId(0);
TunnelInfo inTunnel = getContext().tunnelManager().getTunnelInfo(inTunnelId);
RouterInfo inGateway = getContext().netDb().lookupRouterInfoLocally(inTunnel.getThisHop());
RouterInfo inGateway = getContext().netDb().lookupRouterInfoLocally(inTunnel.getPeer(0));
if (inGateway == null) {
_log.error("We can't find the gateway to our inbound tunnel?! wtf");
if (_log.shouldLog(Log.WARN))
_log.warn("We can't find the gateway to our inbound tunnel?! wtf");
return;
}
long timeoutMs = getTestTimeout();
int timeoutMs = getTestTimeout();
long expiration = getContext().clock().now() + timeoutMs;
long nonce = getContext().random().nextLong(I2NPMessage.MAX_ID_VALUE);
DatabaseStoreMessage msg = buildMessage(peer, inTunnelId, inGateway.getIdentity().getHash(), nonce, expiration);
TunnelId outTunnelId = getOutboundTunnelId();
if (outTunnelId == null) {
TunnelInfo outTunnel = getOutboundTunnelId();
if (outTunnel == null) {
_log.error("No tunnels to send search out through! wtf!");
return;
}
TunnelInfo outTunnel = getContext().tunnelManager().getTunnelInfo(outTunnelId);
TunnelId outTunnelId = outTunnel.getSendTunnelId(0);
if (_log.shouldLog(Log.DEBUG))
_log.debug(getJobId() + ": Sending peer test to " + peer.getIdentity().getHash().toBase64()
+ "w/ replies through [" + inGateway.getIdentity().getHash().toBase64()
+ "] via tunnel [" + msg.getReplyTunnel() + "]");
+ " out " + outTunnel + " w/ replies through " + inTunnel);
ReplySelector sel = new ReplySelector(peer.getIdentity().getHash(), nonce, expiration);
PeerReplyFoundJob reply = new PeerReplyFoundJob(getContext(), peer, inTunnel, outTunnel);
PeerReplyTimeoutJob timeoutJob = new PeerReplyTimeoutJob(getContext(), peer, inTunnel, outTunnel);
SendTunnelMessageJob j = new SendTunnelMessageJob(getContext(), msg, outTunnelId, peer.getIdentity().getHash(),
null, null, reply, timeoutJob, sel,
timeoutMs, TEST_PRIORITY);
getContext().jobQueue().addJob(j);
getContext().messageRegistry().registerPending(sel, reply, timeoutJob, timeoutMs);
getContext().tunnelDispatcher().dispatchOutbound(msg, outTunnelId, null, peer.getIdentity().getHash());
}
/**
@ -159,16 +156,8 @@ public class PeerTestJob extends JobImpl {
*
* @return tunnel id (or null if none are found)
*/
private TunnelId getOutboundTunnelId() {
TunnelSelectionCriteria crit = new TunnelSelectionCriteria();
crit.setMaximumTunnelsRequired(1);
crit.setMinimumTunnelsRequired(1);
List tunnelIds = getContext().tunnelManager().selectOutboundTunnelIds(crit);
if (tunnelIds.size() <= 0) {
return null;
}
return (TunnelId)tunnelIds.get(0);
private TunnelInfo getOutboundTunnelId() {
return getContext().tunnelManager().selectOutboundTunnel();
}
/**
@ -176,15 +165,8 @@ public class PeerTestJob extends JobImpl {
*
* @return tunnel id (or null if none are found)
*/
private TunnelId getInboundTunnelId() {
TunnelSelectionCriteria crit = new TunnelSelectionCriteria();
crit.setMaximumTunnelsRequired(1);
crit.setMinimumTunnelsRequired(1);
List tunnelIds = getContext().tunnelManager().selectInboundTunnelIds(crit);
if (tunnelIds.size() <= 0) {
return null;
}
return (TunnelId)tunnelIds.get(0);
private TunnelInfo getInboundTunnelId() {
return getContext().tunnelManager().selectInboundTunnel();
}
/**
@ -197,7 +179,7 @@ public class PeerTestJob extends JobImpl {
msg.setReplyGateway(replyGateway);
msg.setReplyTunnel(replyTunnel);
msg.setReplyToken(nonce);
msg.setMessageExpiration(new Date(expiration));
msg.setMessageExpiration(expiration);
return msg;
}
@ -229,11 +211,14 @@ public class PeerTestJob extends JobImpl {
}
return false;
}
public String toString() {
StringBuffer buf = new StringBuffer(64);
buf.append("Test peer ").append(_peer.toBase64().substring(0,4));
buf.append(" with nonce ").append(_nonce);
return buf.toString();
}
}
private boolean getShouldFailTunnels() { return true; }
/**
* Called when the peer's response is found
*/
@ -256,30 +241,12 @@ public class PeerTestJob extends JobImpl {
if (_log.shouldLog(Log.DEBUG))
_log.debug("successful peer test after " + responseTime + " for "
+ _peer.getIdentity().getHash().toBase64() + " using outbound tunnel "
+ _sendTunnel.getTunnelId().getTunnelId() + " and inbound tunnel "
+ _replyTunnel.getTunnelId().getTunnelId());
+ _sendTunnel + " and inbound tunnel "
+ _replyTunnel);
getContext().profileManager().dbLookupSuccessful(_peer.getIdentity().getHash(), responseTime);
// only honor success if we also honor failure
if (getShouldFailTunnels()) {
_sendTunnel.setLastTested(getContext().clock().now());
_replyTunnel.setLastTested(getContext().clock().now());
TunnelInfo cur = _replyTunnel;
while (cur != null) {
Hash peer = cur.getThisHop();
if ( (peer != null) && (!getContext().routerHash().equals(peer)) )
getContext().profileManager().tunnelTestSucceeded(peer, responseTime);
cur = cur.getNextHopInfo();
}
cur = _sendTunnel;
while (cur != null) {
Hash peer = cur.getThisHop();
if ( (peer != null) && (!getContext().routerHash().equals(peer)) )
getContext().profileManager().tunnelTestSucceeded(peer, responseTime);
cur = cur.getNextHopInfo();
}
}
// we know the tunnels are working
_sendTunnel.testSuccessful((int)responseTime);
_replyTunnel.testSuccessful((int)responseTime);
}
public void setMessage(I2NPMessage message) {
@ -309,28 +276,11 @@ public class PeerTestJob extends JobImpl {
if (_log.shouldLog(Log.DEBUG))
_log.debug("failed peer test for "
+ _peer.getIdentity().getHash().toBase64() + " using outbound tunnel "
+ _sendTunnel.getTunnelId().getTunnelId() + " and inbound tunnel "
+ _replyTunnel.getTunnelId().getTunnelId());
if (getShouldFailTunnels()) {
_sendTunnel.setLastTested(getContext().clock().now());
_replyTunnel.setLastTested(getContext().clock().now());
TunnelInfo cur = _replyTunnel;
while (cur != null) {
Hash peer = cur.getThisHop();
if ( (peer != null) && (!getContext().routerHash().equals(peer)) )
getContext().profileManager().tunnelFailed(peer);
cur = cur.getNextHopInfo();
}
cur = _sendTunnel;
while (cur != null) {
Hash peer = cur.getThisHop();
if ( (peer != null) && (!getContext().routerHash().equals(peer)) )
getContext().profileManager().tunnelFailed(peer);
cur = cur.getNextHopInfo();
}
}
+ _sendTunnel + " and inbound tunnel "
+ _replyTunnel);
// don't fail the tunnels, as the peer might just plain be down, or
// otherwise overloaded
}
}
}

View File

@ -93,15 +93,14 @@ public class ProfileManagerImpl implements ProfileManager {
/**
* Note that a router explicitly rejected joining a tunnel.
*
* @param explicit true if the tunnel request was explicitly rejected, false
* if we just didn't get a reply back in time.
* @param severity how much the peer doesnt want to participate in the
* tunnel (large == more severe)
*/
public void tunnelRejected(Hash peer, long responseTimeMs, boolean explicit) {
public void tunnelRejected(Hash peer, long responseTimeMs, int severity) {
PeerProfile data = getProfile(peer);
if (data == null) return;
data.setLastHeardFrom(_context.clock().now());
if (explicit)
data.getTunnelHistory().incrementRejected();
data.getTunnelHistory().incrementRejected(severity);
}
/**

View File

@ -13,6 +13,8 @@ import java.util.HashSet;
import java.util.Iterator;
import java.util.Properties;
import java.util.Set;
import java.util.zip.GZIPInputStream;
import java.util.zip.GZIPOutputStream;
import net.i2p.data.DataFormatException;
import net.i2p.data.DataHelper;
@ -50,7 +52,7 @@ class ProfilePersistenceHelper {
long before = _context.clock().now();
OutputStream fos = null;
try {
fos = new BufferedOutputStream(new FileOutputStream(f));
fos = new BufferedOutputStream(new GZIPOutputStream(new FileOutputStream(f)));
writeProfile(profile, fos);
} catch (IOException ioe) {
_log.error("Error writing profile to " + f);
@ -215,9 +217,24 @@ class ProfilePersistenceHelper {
private void loadProps(Properties props, File file) {
try {
DataHelper.loadProps(props, file);
FileInputStream fin = new FileInputStream(file);
int c = fin.read();
fin.close();
fin = new FileInputStream(file); // ghetto mark+reset
if (c == '#') {
// uncompressed
if (_log.shouldLog(Log.INFO))
_log.info("Loading uncompressed profile data from " + file.getName());
DataHelper.loadProps(props, fin);
} else {
// compressed (or corrupt...)
if (_log.shouldLog(Log.INFO))
_log.info("Loading compressed profile data from " + file.getName());
DataHelper.loadProps(props, new GZIPInputStream(fin));
}
} catch (IOException ioe) {
_log.warn("Error loading properties from " + file.getName(), ioe);
if (_log.shouldLog(Log.WARN))
_log.warn("Error loading properties from " + file.getName(), ioe);
}
}

View File

@ -69,7 +69,7 @@ public class ReliabilityCalculator extends Calculator {
long now = _context.clock().now();
long timeSinceRejection = now - profile.getTunnelHistory().getLastRejected();
long timeSinceRejection = 61*60*1000; // now - profile.getTunnelHistory().getLastRejected();
if (timeSinceRejection > 60*60*1000) {
// noop. rejection was over 60 minutes ago
} else if (timeSinceRejection > 10*60*1000) {

View File

@ -18,23 +18,29 @@ public class TunnelHistory {
private volatile long _lifetimeAgreedTo;
private volatile long _lifetimeRejected;
private volatile long _lastAgreedTo;
private volatile long _lastRejected;
private volatile long _lastRejectedCritical;
private volatile long _lastRejectedBandwidth;
private volatile long _lastRejectedTransient;
private volatile long _lastRejectedProbabalistic;
private volatile long _lifetimeFailed;
private volatile long _lastFailed;
private RateStat _rejectRate;
private RateStat _failRate;
private String _statGroup;
/** probabalistic tunnel rejection due to a flood of requests */
public static final int TUNNEL_REJECT_PROBABALISTIC_REJECT = 10;
/** tunnel rejection due to temporary cpu/job/tunnel overload */
public static final int TUNNEL_REJECT_TRANSIENT_OVERLOAD = 20;
/** tunnel rejection due to excess bandwidth usage */
public static final int TUNNEL_REJECT_BANDWIDTH = 30;
/** tunnel rejection due to system failure */
public static final int TUNNEL_REJECT_CRIT = 50;
public TunnelHistory(RouterContext context, String statGroup) {
_context = context;
_log = context.logManager().getLog(TunnelHistory.class);
_statGroup = statGroup;
_lifetimeAgreedTo = 0;
_lifetimeFailed = 0;
_lifetimeRejected = 0;
_lastAgreedTo = 0;
_lastFailed = 0;
_lastRejected = 0;
createRates(statGroup);
}
@ -53,8 +59,14 @@ public class TunnelHistory {
public long getLifetimeFailed() { return _lifetimeFailed; }
/** when the peer last agreed to participate in a tunnel */
public long getLastAgreedTo() { return _lastAgreedTo; }
/** when the peer last refused to participate in a tunnel */
public long getLastRejected() { return _lastRejected; }
/** when the peer last refused to participate in a tunnel with level of critical */
public long getLastRejectedCritical() { return _lastRejectedCritical; }
/** when the peer last refused to participate in a tunnel complaining of bandwidth overload */
public long getLastRejectedBandwidth() { return _lastRejectedBandwidth; }
/** when the peer last refused to participate in a tunnel complaining of transient overload */
public long getLastRejectedTransient() { return _lastRejectedTransient; }
/** when the peer last refused to participate in a tunnel probabalistically */
public long getLastRejectedProbabalistic() { return _lastRejectedProbabalistic; }
/** when the last tunnel the peer participated in failed */
public long getLastFailed() { return _lastFailed; }
@ -62,10 +74,26 @@ public class TunnelHistory {
_lifetimeAgreedTo++;
_lastAgreedTo = _context.clock().now();
}
public void incrementRejected() {
/**
* @param severity how much the peer doesnt want to participate in the
* tunnel (large == more severe)
*/
public void incrementRejected(int severity) {
_lifetimeRejected++;
_rejectRate.addData(1, 1);
_lastRejected = _context.clock().now();
if (severity >= TUNNEL_REJECT_CRIT) {
_lastRejectedCritical = _context.clock().now();
_rejectRate.addData(1, 1);
} else if (severity >= TUNNEL_REJECT_BANDWIDTH) {
_lastRejectedBandwidth = _context.clock().now();
_rejectRate.addData(1, 1);
} else if (severity >= TUNNEL_REJECT_TRANSIENT_OVERLOAD) {
_lastRejectedTransient = _context.clock().now();
// dont increment the reject rate in this case
} else if (severity >= TUNNEL_REJECT_PROBABALISTIC_REJECT) {
_lastRejectedProbabalistic = _context.clock().now();
// dont increment the reject rate in this case
}
}
public void incrementFailed() {
_lifetimeFailed++;
@ -77,7 +105,10 @@ public class TunnelHistory {
public void setLifetimeRejected(long num) { _lifetimeRejected = num; }
public void setLifetimeFailed(long num) { _lifetimeFailed = num; }
public void setLastAgreedTo(long when) { _lastAgreedTo = when; }
public void setLastRejected(long when) { _lastRejected = when; }
public void setLastRejectedCritical(long when) { _lastRejectedCritical = when; }
public void setLastRejectedBandwidth(long when) { _lastRejectedBandwidth = when; }
public void setLastRejectedTransient(long when) { _lastRejectedTransient = when; }
public void setLastRejectedProbabalistic(long when) { _lastRejectedProbabalistic = when; }
public void setLastFailed(long when) { _lastFailed = when; }
public RateStat getRejectionRate() { return _rejectRate; }
@ -100,7 +131,10 @@ public class TunnelHistory {
buf.append("###").append(NL);
add(buf, "lastAgreedTo", _lastAgreedTo, "When did the peer last agree to participate in a tunnel? (milliseconds since the epoch)");
add(buf, "lastFailed", _lastFailed, "When was the last time a tunnel that the peer agreed to participate failed? (milliseconds since the epoch)");
add(buf, "lastRejected", _lastRejected, "When was the last time the peer refused to participate in a tunnel? (milliseconds since the epoch)");
add(buf, "lastRejectedCritical", _lastRejectedCritical, "When was the last time the peer refused to participate in a tunnel? (milliseconds since the epoch)");
add(buf, "lastRejectedBandwidth", _lastRejectedBandwidth, "When was the last time the peer refused to participate in a tunnel? (milliseconds since the epoch)");
add(buf, "lastRejectedTransient", _lastRejectedTransient, "When was the last time the peer refused to participate in a tunnel? (milliseconds since the epoch)");
add(buf, "lastRejectedProbabalistic", _lastRejectedProbabalistic, "When was the last time the peer refused to participate in a tunnel? (milliseconds since the epoch)");
add(buf, "lifetimeAgreedTo", _lifetimeAgreedTo, "How many tunnels has the peer ever agreed to participate in?");
add(buf, "lifetimeFailed", _lifetimeFailed, "How many tunnels has the peer ever agreed to participate in that failed prematurely?");
add(buf, "lifetimeRejected", _lifetimeRejected, "How many tunnels has the peer ever refused to participate in?");
@ -117,7 +151,10 @@ public class TunnelHistory {
public void load(Properties props) {
_lastAgreedTo = getLong(props, "tunnels.lastAgreedTo");
_lastFailed = getLong(props, "tunnels.lastFailed");
_lastRejected = getLong(props, "tunnels.lastRejected");
_lastRejectedCritical = getLong(props, "tunnels.lastRejectedCritical");
_lastRejectedBandwidth = getLong(props, "tunnels.lastRejectedBandwidth");
_lastRejectedTransient = getLong(props, "tunnels.lastRejectedTransient");
_lastRejectedProbabalistic = getLong(props, "tunnels.lastRejectedProbabalistic");
_lifetimeAgreedTo = getLong(props, "tunnels.lifetimeAgreedTo");
_lifetimeFailed = getLong(props, "tunnels.lifetimeFailed");
_lifetimeRejected = getLong(props, "tunnels.lifetimeRejected");

View File

@ -11,6 +11,7 @@ package net.i2p.router.startup;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.HashSet;
import java.util.Properties;
import net.i2p.data.Certificate;
import net.i2p.data.DataFormatException;
@ -50,7 +51,9 @@ public class CreateRouterInfoJob extends JobImpl {
FileOutputStream fos2 = null;
try {
info.setAddresses(getContext().commSystem().createAddresses());
info.setOptions(getContext().statPublisher().publishStatistics());
Properties stats = getContext().statPublisher().publishStatistics();
stats.setProperty(RouterInfo.PROP_NETWORK_ID, Router.NETWORK_ID+"");
info.setOptions(stats);
info.setPeers(new HashSet());
info.setPublished(getCurrentPublishDate(getContext()));
RouterIdentity ident = new RouterIdentity();

View File

@ -75,7 +75,7 @@ class LoadClientAppsJob extends JobImpl {
// fall back to use router.config's clientApp.* lines
if (!cfgFile.exists())
return new Properties(getContext().router().getConfigMap());
return getContext().router().getConfigMap();
try {
DataHelper.loadProps(rv, cfgFile);

View File

@ -12,6 +12,7 @@ import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.Properties;
import net.i2p.data.Certificate;
import net.i2p.data.DataFormatException;
@ -122,7 +123,9 @@ public class RebuildRouterInfoJob extends JobImpl {
try {
info.setAddresses(getContext().commSystem().createAddresses());
info.setOptions(getContext().statPublisher().publishStatistics());
Properties stats = getContext().statPublisher().publishStatistics();
stats.setProperty(RouterInfo.PROP_NETWORK_ID, ""+Router.NETWORK_ID);
info.setOptions(stats);
// info.setPeers(new HashSet()); // this would have the trusted peers
info.setPublished(CreateRouterInfoJob.getCurrentPublishDate(getContext()));

View File

@ -56,10 +56,14 @@ public class CommSystemFacadeImpl extends CommSystemFacade {
public List getBids(OutNetMessage msg) {
return _manager.getBids(msg);
}
public TransportBid getBid(OutNetMessage msg) {
return _manager.getBid(msg);
}
public void processMessage(OutNetMessage msg) {
GetBidsJob j = new GetBidsJob(_context, this, msg);
j.runJob();
//GetBidsJob j = new GetBidsJob(_context, this, msg);
//j.runJob();
GetBidsJob.getBids(_context, this, msg);
}
public List getMostRecentErrorMessages() {

View File

@ -75,6 +75,11 @@ public class FIFOBandwidthLimiter {
*
*/
public Request requestInbound(int bytesIn, String purpose) {
if (_inboundUnlimited) {
_totalAllocatedInboundBytes += bytesIn;
return _noop;
}
SimpleRequest req = new SimpleRequest(bytesIn, 0, purpose);
int pending = 0;
synchronized (_pendingInboundRequests) {
@ -91,6 +96,11 @@ public class FIFOBandwidthLimiter {
*
*/
public Request requestOutbound(int bytesOut, String purpose) {
if (_outboundUnlimited) {
_totalAllocatedOutboundBytes += bytesOut;
return _noop;
}
SimpleRequest req = new SimpleRequest(0, bytesOut, purpose);
int pending = 0;
synchronized (_pendingOutboundRequests) {
@ -517,4 +527,17 @@ public class FIFOBandwidthLimiter {
public boolean getAborted();
}
private static final NoopRequest _noop = new NoopRequest();
private static class NoopRequest implements Request {
public void abort() {}
public boolean getAborted() { return false; }
public int getPendingInboundRequested() { return 0; }
public int getPendingOutboundRequested() { return 0; }
public String getRequestName() { return "noop"; }
public long getRequestTime() { return 0; }
public int getTotalInboundRequested() { return 0; }
public int getTotalOutboundRequested() { return 0; }
public void waitForNextAllocation() {}
}
}

View File

@ -37,49 +37,59 @@ public class GetBidsJob extends JobImpl {
public String getName() { return "Fetch bids for a message to be delivered"; }
public void runJob() {
Hash to = _msg.getTarget().getIdentity().getHash();
getBids(getContext(), _facade, _msg);
}
static void getBids(RouterContext context, CommSystemFacadeImpl facade, OutNetMessage msg) {
Log log = context.logManager().getLog(GetBidsJob.class);
Hash to = msg.getTarget().getIdentity().getHash();
if (getContext().shitlist().isShitlisted(to)) {
_log.warn("Attempt to send a message to a shitlisted peer - " + to);
getContext().messageRegistry().peerFailed(to);
fail();
if (context.shitlist().isShitlisted(to)) {
if (log.shouldLog(Log.WARN))
log.warn("Attempt to send a message to a shitlisted peer - " + to);
context.messageRegistry().peerFailed(to);
fail(context, msg);
return;
}
Hash us = getContext().routerHash();
Hash us = context.routerHash();
if (to.equals(us)) {
_log.error("wtf, send a message to ourselves? nuh uh. msg = " + _msg, getAddedBy());
fail();
if (log.shouldLog(Log.ERROR))
log.error("wtf, send a message to ourselves? nuh uh. msg = " + msg);
fail(context, msg);
return;
}
List bids = _facade.getBids(_msg);
if (bids.size() <= 0) {
_log.warn("No bids available for the message " + _msg);
getContext().shitlist().shitlistRouter(to, "No bids");
getContext().netDb().fail(to);
fail();
TransportBid bid = facade.getBid(msg);
if (bid == null) {
// only shitlist them if we couldnt even try a single transport
if (msg.getFailedTransports().size() <= 0) {
if (log.shouldLog(Log.WARN))
log.warn("No bids available for the message " + msg);
context.shitlist().shitlistRouter(to, "No bids");
context.netDb().fail(to);
}
fail(context, msg);
} else {
TransportBid bid = (TransportBid)bids.get(0);
bid.getTransport().send(_msg);
bid.getTransport().send(msg);
}
}
private void fail() {
if (_msg.getOnFailedSendJob() != null) {
getContext().jobQueue().addJob(_msg.getOnFailedSendJob());
private static void fail(RouterContext context, OutNetMessage msg) {
if (msg.getOnFailedSendJob() != null) {
context.jobQueue().addJob(msg.getOnFailedSendJob());
}
if (_msg.getOnFailedReplyJob() != null) {
getContext().jobQueue().addJob(_msg.getOnFailedReplyJob());
if (msg.getOnFailedReplyJob() != null) {
context.jobQueue().addJob(msg.getOnFailedReplyJob());
}
MessageSelector selector = _msg.getReplySelector();
MessageSelector selector = msg.getReplySelector();
if (selector != null) {
getContext().messageRegistry().unregisterPending(_msg);
context.messageRegistry().unregisterPending(msg);
}
getContext().profileManager().messageFailed(_msg.getTarget().getIdentity().getHash());
context.profileManager().messageFailed(msg.getTarget().getIdentity().getHash());
_msg.discardData();
msg.discardData();
}
}

View File

@ -23,8 +23,10 @@ import net.i2p.router.Job;
import net.i2p.router.JobImpl;
import net.i2p.router.MessageSelector;
import net.i2p.router.OutNetMessage;
import net.i2p.router.ReplyJob;
import net.i2p.router.RouterContext;
import net.i2p.util.Log;
import net.i2p.util.SimpleTimer;
public class OutboundMessageRegistry {
private Log _log;
@ -38,7 +40,7 @@ public class OutboundMessageRegistry {
_context = context;
_log = _context.logManager().getLog(OutboundMessageRegistry.class);
_pendingMessages = new TreeMap();
_context.jobQueue().addJob(new CleanupPendingMessagesJob());
//_context.jobQueue().addJob(new CleanupPendingMessagesJob());
}
public void shutdown() {
@ -68,73 +70,77 @@ public class OutboundMessageRegistry {
long beforeSync = _context.clock().now();
Map messages = null;
synchronized (_pendingMessages) {
messages = (Map)_pendingMessages.clone();
}
long matchTime = 0;
long continueTime = 0;
int numMessages = messages.size();
int numMessages = 0;
long afterSync1 = 0;
long afterSearch = 0;
int matchedRemoveCount = 0;
StringBuffer slow = null; // new StringBuffer(256);
long afterSync1 = _context.clock().now();
ArrayList matchedRemove = null; // new ArrayList(32);
for (Iterator iter = messages.keySet().iterator(); iter.hasNext(); ) {
Long exp = (Long)iter.next();
OutNetMessage msg = (OutNetMessage)messages.get(exp);
MessageSelector selector = msg.getReplySelector();
if (selector != null) {
long before = _context.clock().now();
boolean isMatch = selector.isMatch(message);
long after = _context.clock().now();
long diff = after-before;
if (diff > 100) {
if (_log.shouldLog(Log.WARN))
_log.warn("Matching with selector took too long (" + diff + "ms) : "
+ selector.getClass().getName());
if (slow == null) slow = new StringBuffer(256);
slow.append(selector.getClass().getName()).append(": ");
slow.append(diff).append(" ");
}
matchTime += diff;
synchronized (_pendingMessages) {
messages = _pendingMessages; //(Map)_pendingMessages.clone();
numMessages = messages.size();
afterSync1 = _context.clock().now();
if (isMatch) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Selector matches [" + selector);
if (!matches.contains(msg))
matches.add(msg);
long beforeCon = _context.clock().now();
boolean continueMatching = selector.continueMatching();
long afterCon = _context.clock().now();
long diffCon = afterCon - beforeCon;
if (diffCon > 100) {
for (Iterator iter = messages.keySet().iterator(); iter.hasNext(); ) {
Long exp = (Long)iter.next();
OutNetMessage msg = (OutNetMessage)messages.get(exp);
MessageSelector selector = msg.getReplySelector();
if (selector != null) {
long before = _context.clock().now();
boolean isMatch = selector.isMatch(message);
long after = _context.clock().now();
long diff = after-before;
if (diff > 100) {
if (_log.shouldLog(Log.WARN))
_log.warn("Error continueMatching on a match took too long ("
+ diffCon + "ms) : " + selector.getClass().getName());
_log.warn("Matching with selector took too long (" + diff + "ms) : "
+ selector.getClass().getName());
if (slow == null) slow = new StringBuffer(256);
slow.append(selector.getClass().getName()).append(": ");
slow.append(diff).append(" ");
}
continueTime += diffCon;
matchTime += diff;
if (continueMatching) {
if (isMatch) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Continue matching");
// noop
_log.debug("Selector matches [" + selector);
if (!matches.contains(msg))
matches.add(msg);
long beforeCon = _context.clock().now();
boolean continueMatching = selector.continueMatching();
long afterCon = _context.clock().now();
long diffCon = afterCon - beforeCon;
if (diffCon > 100) {
if (_log.shouldLog(Log.WARN))
_log.warn("Error continueMatching on a match took too long ("
+ diffCon + "ms) : " + selector.getClass().getName());
}
continueTime += diffCon;
if (continueMatching) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Continue matching");
// noop
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Stop matching selector " + selector + " for message "
+ msg.getMessageType());
// i give in mihi, i'll use iter.remove just this once ;)
// (TreeMap supports it, and this synchronized block is a hotspot)
iter.remove();
matchedRemoveCount++;
}
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Stop matching selector " + selector + " for message "
+ msg.getMessageType());
if (matchedRemove == null)
matchedRemove = new ArrayList(4);
matchedRemove.add(exp);
//_log.debug("Selector does not match [" + selector + "]");
}
} else {
//_log.debug("Selector does not match [" + selector + "]");
}
}
afterSearch = _context.clock().now();
}
long afterSearch = _context.clock().now();
doRemove(matchedRemove);
long delay = _context.clock().now() - beforeSync;
long search = afterSearch - afterSync1;
@ -149,10 +155,7 @@ public class OutboundMessageRegistry {
buf.append(search).append("ms (match: ").append(matchTime).append("ms, continue: ");
buf.append(continueTime).append("ms, #: ").append(numMessages).append(") and sync time of ");
buf.append(sync).append("ms for ");
if (matchedRemove == null)
buf.append(0);
else
buf.append(matchedRemove.size());
buf.append(matchedRemoveCount);
buf.append(" removed, ").append(matches.size()).append(" matches: slow = ");
if (slow != null)
buf.append(slow.toString());
@ -162,39 +165,27 @@ public class OutboundMessageRegistry {
return matches;
}
/**
* Remove the specified messages from the pending list
*
* @param matchedRemove expiration (Long) of the pending message to remove
*/
private void doRemove(List matchedRemove) {
if (matchedRemove != null) {
for (int i = 0; i < matchedRemove.size(); i++) {
Long expiration = (Long)matchedRemove.get(i);
OutNetMessage m = null;
long before = _context.clock().now();
synchronized (_pendingMessages) {
m = (OutNetMessage)_pendingMessages.remove(expiration);
}
long diff = _context.clock().now() - before;
if ( (diff > 500) && (_log.shouldLog(Log.WARN)) )
_log.warn("Took too long syncing on remove (" + diff + "ms");
if (m != null) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Removing message with selector "
+ m.getReplySelector().getClass().getName()
+ " :" + m.getReplySelector().toString());
}
}
}
public OutNetMessage registerPending(MessageSelector replySelector, ReplyJob onReply, Job onTimeout, int timeoutMs) {
OutNetMessage msg = new OutNetMessage(_context);
msg.setExpiration(_context.clock().now() + timeoutMs);
msg.setOnFailedReplyJob(onTimeout);
msg.setOnFailedSendJob(onTimeout);
msg.setOnReplyJob(onReply);
msg.setReplySelector(replySelector);
registerPending(msg, true);
return msg;
}
public void registerPending(OutNetMessage msg) {
if (msg == null) {
registerPending(msg, false);
}
public void registerPending(OutNetMessage msg, boolean allowEmpty) {
if (msg == null)
throw new IllegalArgumentException("Null OutNetMessage specified? wtf");
} else if (msg.getMessage() == null) {
throw new IllegalArgumentException("OutNetMessage doesn't contain an I2NPMessage? wtf");
if (!allowEmpty) {
if (msg.getMessage() == null)
throw new IllegalArgumentException("OutNetMessage doesn't contain an I2NPMessage? wtf");
}
long beforeSync = _context.clock().now();
@ -202,25 +193,29 @@ public class OutboundMessageRegistry {
long afterDone = 0;
try {
OutNetMessage oldMsg = null;
long l = msg.getExpiration();
synchronized (_pendingMessages) {
if (_pendingMessages.containsValue(msg)) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Not adding an already pending message: "
+ msg.getMessage().getUniqueId() + "\n: " + msg,
_log.debug("Not adding an already pending message: " + msg,
new Exception("Duplicate message registration"));
return;
}
}
long l = msg.getExpiration();
while (_pendingMessages.containsKey(new Long(l)))
l++;
_pendingMessages.put(new Long(l), msg);
}
afterSync1 = _context.clock().now();
// this may get orphaned if the message is matched or explicitly
// removed, but its cheap enough to do an extra remove on the map
// that to poll the list periodically
SimpleTimer.getInstance().addEvent(new CleanupExpiredTask(l), l - _context.clock().now());
if (_log.shouldLog(Log.DEBUG))
_log.debug("Register pending: " + msg.getReplySelector().getClass().getName()
+ " for " + msg.getMessage().getClass().getName() + ": "
+ " for " + msg.getMessage() + ": "
+ msg.getReplySelector().toString(), new Exception("Register pending"));
afterDone = _context.clock().now();
} finally {
@ -230,9 +225,9 @@ public class OutboundMessageRegistry {
String warn = delay + "ms (sync = " + sync1 + "ms, done = " + done + "ms)";
if ( (delay > 1000) && (_log.shouldLog(Log.WARN)) ) {
_log.error("Synchronizing in the registry.register took too long! " + warn);
_context.messageHistory().messageProcessingError(msg.getMessage().getUniqueId(),
msg.getMessage().getClass().getName(),
"RegisterPending took too long: " + warn);
//_context.messageHistory().messageProcessingError(msg.getMessage().getUniqueId(),
// msg.getMessage().getClass().getName(),
// "RegisterPending took too long: " + warn);
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Synchronizing in the registry.register was quick: " + warn);
@ -329,7 +324,8 @@ public class OutboundMessageRegistry {
OutNetMessage msg = (OutNetMessage)msgs.get(exp);
buf.append("<li>").append(msg.getMessageType());
buf.append(": expiring on ").append(new Date(exp.longValue()));
buf.append(" targetting ").append(msg.getTarget().getIdentity().getHash());
if (msg.getTarget() != null)
buf.append(" targetting ").append(msg.getTarget().getIdentity().getHash());
if (msg.getReplySelector() != null)
buf.append(" with reply selector ").append(msg.getReplySelector().toString());
else
@ -340,11 +336,40 @@ public class OutboundMessageRegistry {
out.write(buf.toString());
out.flush();
}
private class CleanupExpiredTask implements SimpleTimer.TimedEvent {
private long _expiration;
public CleanupExpiredTask(long expiration) {
_expiration = expiration;
}
public void timeReached() {
OutNetMessage msg = null;
synchronized (_pendingMessages) {
msg = (OutNetMessage)_pendingMessages.remove(new Long(_expiration));
}
if (msg != null) {
_context.messageHistory().replyTimedOut(msg);
Job fail = msg.getOnFailedReplyJob();
if (fail != null) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Removing message with selector " + msg.getReplySelector()
+ ": " + msg.getMessageType()
+ " and firing fail job: " + fail.getClass().getName());
_context.jobQueue().addJob(fail);
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Removing message with selector " + msg.getReplySelector()
+ " and not firing any job");
}
}
}
}
/**
* Cleanup any messages that were pending replies but have expired
*
*/
/*
private class CleanupPendingMessagesJob extends JobImpl {
public CleanupPendingMessagesJob() {
super(OutboundMessageRegistry.this._context);
@ -361,14 +386,14 @@ public class OutboundMessageRegistry {
OutNetMessage msg = (OutNetMessage)removed.get(i);
if (msg != null) {
ctx.messageHistory().replyTimedOut(msg);
_context.messageHistory().replyTimedOut(msg);
Job fail = msg.getOnFailedReplyJob();
if (fail != null) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Removing message with selector " + msg.getReplySelector()
+ ": " + msg.getMessageType()
+ " and firing fail job: " + fail.getClass().getName());
ctx.jobQueue().addJob(fail);
_context.jobQueue().addJob(fail);
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Removing message with selector " + msg.getReplySelector()
@ -384,7 +409,7 @@ public class OutboundMessageRegistry {
* Remove any messages whose expirations are in the past
*
* @return list of OutNetMessage objects that have expired
*/
*/ /*
private List removeMessages() {
long now = OutboundMessageRegistry.this._context.clock().now();
List removedMessages = new ArrayList(2);
@ -416,4 +441,5 @@ public class OutboundMessageRegistry {
return removedMessages;
}
}
*/
}

View File

@ -8,8 +8,6 @@ package net.i2p.router.transport;
*
*/
import java.util.Date;
import net.i2p.data.RouterInfo;
/**
@ -22,16 +20,16 @@ public class TransportBid {
private int _bandwidthBytes;
private int _msgSize;
private RouterInfo _router;
private Date _bidExpiration;
private long _bidExpiration;
private Transport _transport;
public TransportBid() {
setLatencyMs(-1);
setBandwidthBytes(-1);
setMessageSize(-1);
setRouter(null);
setExpiration(null);
setTransport(null);
setLatencyMs(-1);
setBandwidthBytes(-1);
setMessageSize(-1);
setRouter(null);
setExpiration(0);
setTransport(null);
}
/**
@ -65,9 +63,9 @@ public class TransportBid {
/**
* Specifies how long this bid is "good for"
*/
public Date getExpiration() { return _bidExpiration; }
public void setExpiration(Date expirationDate) { _bidExpiration = expirationDate; }
public void setExpiration(long expirationDate) { setExpiration(new Date(expirationDate)); }
public long getExpiration() { return _bidExpiration; }
public void setExpiration(long expirationDate) { _bidExpiration = expirationDate; }
//public void setExpiration(long expirationDate) { setExpiration(new Date(expirationDate)); }
/**
* Specifies the transport that offered this bid

View File

@ -120,7 +120,10 @@ public abstract class TransportImpl implements Transport {
*/
protected void afterSend(OutNetMessage msg, boolean sendSuccessful, boolean allowRequeue, long msToSend) {
boolean log = false;
msg.timestamp("afterSend(" + sendSuccessful + ")");
if (sendSuccessful)
msg.timestamp("afterSend(successful)");
else
msg.timestamp("afterSend(failed)");
if (!sendSuccessful)
msg.transportFailed(getStyle());
@ -201,7 +204,7 @@ public abstract class TransportImpl implements Transport {
if (log) {
String type = msg.getMessageType();
_context.messageHistory().sendMessage(type, msg.getMessageId(),
new Date(msg.getExpiration()),
msg.getExpiration(),
msg.getTarget().getIdentity().getHash(),
sendSuccessful);
}

View File

@ -27,7 +27,6 @@ import net.i2p.data.i2np.DatabaseLookupMessage;
import net.i2p.data.i2np.DatabaseSearchReplyMessage;
import net.i2p.data.i2np.DatabaseStoreMessage;
import net.i2p.data.i2np.I2NPMessage;
import net.i2p.router.InNetMessage;
import net.i2p.router.OutNetMessage;
import net.i2p.router.RouterContext;
import net.i2p.router.transport.tcp.TCPTransport;
@ -120,14 +119,16 @@ public class TransportManager implements TransportEventListener {
}
public List getBids(OutNetMessage msg) {
List rv = new ArrayList(1);
rv.add(getBid(msg));
return rv;
}
public TransportBid getBid(OutNetMessage msg) {
if (msg == null)
throw new IllegalArgumentException("Null message? no bidding on a null outNetMessage!");
if (_context.router().getRouterInfo().equals(msg.getTarget()))
throw new IllegalArgumentException("WTF, bids for a message bound to ourselves?");
HashSet bids = new HashSet();
Set addrs = msg.getTarget().getAddresses();
Set failedTransports = msg.getFailedTransports();
for (int i = 0; i < _transports.size(); i++) {
Transport t = (Transport)_transports.get(i);
@ -138,122 +139,23 @@ public class TransportManager implements TransportEventListener {
// we always want to try all transports, in case there is a faster bidirectional one
// already connected (e.g. peer only has a public PHTTP address, but they've connected
// to us via TCP, send via TCP)
if (true || isSupported(addrs, t)) {
TransportBid bid = t.bid(msg.getTarget(), msg.getMessageSize());
if (bid != null) {
bids.add(bid);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Transport " + t.getStyle() + " bid: " + bid);
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Transport " + t.getStyle() + " did not produce a bid");
}
}
}
List ordered = orderBids(bids, msg);
long delay = _context.clock().now() - msg.getCreated();
if (ordered.size() > 0) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Winning bid: " + ((TransportBid)ordered.get(0)).getTransport().getStyle());
if (delay > 5*1000) {
if (_log.shouldLog(Log.INFO))
_log.info("Took too long to find this bid (" + delay + "ms)");
TransportBid bid = t.bid(msg.getTarget(), msg.getMessageSize());
if (bid != null) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Transport " + t.getStyle() + " bid: " + bid);
return bid;
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Took a while to find this bid (" + delay + "ms)");
}
} else {
if (_log.shouldLog(Log.INFO))
_log.info("NO WINNING BIDS! peer: " + msg.getTarget());
if (delay > 5*1000) {
if (_log.shouldLog(Log.INFO))
_log.info("Took too long to fail (" + delay + "ms)");
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Took a while to fail (" + delay + "ms)");
_log.debug("Transport " + t.getStyle() + " did not produce a bid");
}
}
return ordered;
}
private List orderBids(HashSet bids, OutNetMessage msg) {
if (bids.size() <= 1)
return new ArrayList(bids);
// db messages should go as fast as possible, while the others
// should use as little bandwidth as possible.
I2NPMessage message = msg.getMessage();
if (message == null) return Collections.EMPTY_LIST;
switch (message.getType()) {
case DatabaseLookupMessage.MESSAGE_TYPE:
case DatabaseSearchReplyMessage.MESSAGE_TYPE:
case DatabaseStoreMessage.MESSAGE_TYPE:
if (_log.shouldLog(Log.DEBUG))
_log.debug("Ordering by fastest");
return orderByFastest(bids, msg);
default:
if (_log.shouldLog(Log.DEBUG))
_log.debug("Ordering by bandwidth");
return orderByBandwidth(bids, msg);
}
}
private int getCost(RouterInfo target, String transportStyle) {
for (Iterator iter = target.getAddresses().iterator(); iter.hasNext();) {
RouterAddress addr = (RouterAddress)iter.next();
if (addr.getTransportStyle().equals(transportStyle))
return addr.getCost();
}
return 1;
}
private List orderByFastest(HashSet bids, OutNetMessage msg) {
Map ordered = new TreeMap();
for (Iterator iter = bids.iterator(); iter.hasNext(); ) {
TransportBid bid = (TransportBid)iter.next();
int cur = bid.getLatencyMs();
int cost = getCost(msg.getTarget(), bid.getTransport().getStyle());
if (_log.shouldLog(Log.DEBUG))
_log.debug("Bid latency: " + (cur*cost) + " for transport "
+ bid.getTransport().getStyle());
while (ordered.containsKey(new Integer(cur*cost)))
cur++;
ordered.put(new Integer(cur*cost), bid);
}
List bidList = new ArrayList(ordered.size());
for (Iterator iter = ordered.keySet().iterator(); iter.hasNext(); ) {
Object k = iter.next();
bidList.add(ordered.get(k));
}
return bidList;
}
private List orderByBandwidth(HashSet bids, OutNetMessage msg) {
Map ordered = new TreeMap();
for (Iterator iter = bids.iterator(); iter.hasNext(); ) {
TransportBid bid = (TransportBid)iter.next();
int cur = bid.getBandwidthBytes();
int cost = getCost(msg.getTarget(), bid.getTransport().getStyle());
if (_log.shouldLog(Log.DEBUG))
_log.debug("Bid size: " + (cur*cost) + " for transport " + bid.getTransport().getStyle());
while (ordered.containsKey(new Integer(cur*cost)))
cur++;
ordered.put(new Integer(cur*cost), bid);
}
List bidList = new ArrayList(ordered.size());
for (Iterator iter = ordered.keySet().iterator(); iter.hasNext(); ) {
Object k = iter.next();
bidList.add(ordered.get(k));
}
return bidList;
return null;
}
public void messageReceived(I2NPMessage message, RouterIdentity fromRouter, Hash fromRouterHash) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("I2NPMessage received: " + message.getClass().getName(), new Exception("Where did I come from again?"));
InNetMessage msg = new InNetMessage(_context);
msg.setFromRouter(fromRouter);
msg.setFromRouterHash(fromRouterHash);
msg.setMessage(message);
int num = _context.inNetMessagePool().add(msg);
int num = _context.inNetMessagePool().add(message, fromRouter, fromRouterHash);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Added to in pool: "+ num);
}

View File

@ -10,7 +10,6 @@ import net.i2p.data.Hash;
import net.i2p.data.i2np.I2NPMessage;
import net.i2p.data.i2np.I2NPMessageHandler;
import net.i2p.router.CommSystemFacade;
import net.i2p.router.InNetMessage;
import net.i2p.router.JobImpl;
import net.i2p.router.OutNetMessage;
import net.i2p.router.RouterContext;
@ -110,9 +109,6 @@ public class VMCommSystem extends CommSystemFacade {
try {
I2NPMessage msg = handler.readMessage(new ByteArrayInputStream(_msg));
int size = _msg.length;
InNetMessage inMsg = new InNetMessage(ReceiveJob.this.getContext());
inMsg.setFromRouterHash(_from);
inMsg.setMessage(msg);
_ctx.profileManager().messageReceived(_from, "vm", 1, size);
_ctx.statManager().addRateData("transport.receiveMessageSize", size, 1);
@ -123,7 +119,7 @@ public class VMCommSystem extends CommSystemFacade {
else
ReceiveJob.this.getContext().statManager().addRateData("transport.receiveMessageLarge", 1, 1);
_ctx.inNetMessagePool().add(inMsg);
_ctx.inNetMessagePool().add(msg, null, _from);
} catch (Exception e) {
_log.error("wtf, error reading/formatting a VM message?", e);
}

View File

@ -799,7 +799,8 @@ public class ConnectionHandler {
byte ip[] = _from.getBytes();
_rawOut.write(ip.length);
_rawOut.write(ip);
DataHelper.writeDate(_rawOut, new Date(_context.clock().now()));
DataHelper.writeLong(_rawOut, DataHelper.DATE_LENGTH, _context.clock().now());
//DataHelper.writeDate(_rawOut, new Date(_context.clock().now()));
DataHelper.writeProperties(_rawOut, null);
_rawOut.flush();

View File

@ -8,7 +8,7 @@ import net.i2p.data.DataHelper;
import net.i2p.data.DataFormatException;
import net.i2p.data.RouterInfo;
import net.i2p.data.i2np.I2NPMessage;
import net.i2p.data.i2np.DeliveryStatusMessage;
import net.i2p.data.i2np.DateMessage;
import net.i2p.router.OutNetMessage;
import net.i2p.router.Router;
import net.i2p.router.RouterContext;
@ -139,13 +139,9 @@ class ConnectionRunner implements Runnable {
*
*/
private I2NPMessage buildTimeMessage() {
// holy crap this is a kludge - strapping ourselves into a
// deliveryStatusMessage
DeliveryStatusMessage tm = new DeliveryStatusMessage(_context);
tm.setArrival(new Date(_context.clock().now()));
tm.setMessageId(0);
tm.setUniqueId(0);
return tm;
DateMessage dm = new DateMessage(_context);
dm.setNow(_context.clock().now());
return dm;
}
/**

View File

@ -5,7 +5,7 @@ import net.i2p.data.Hash;
import net.i2p.data.RouterIdentity;
import net.i2p.data.i2np.I2NPMessageReader;
import net.i2p.data.i2np.I2NPMessage;
import net.i2p.data.i2np.DeliveryStatusMessage;
import net.i2p.data.i2np.DateMessage;
import net.i2p.router.Router;
import net.i2p.util.Log;
@ -39,13 +39,11 @@ public class MessageHandler implements I2NPMessageReader.I2NPMessageEventListene
_log.debug("Just received message " + message.getUniqueId() + " from "
+ _identHash.toBase64().substring(0,6)
+ " readTime = " + msToRead + "ms type = " + message.getClass().getName());
if (message instanceof DeliveryStatusMessage) {
DeliveryStatusMessage msg = (DeliveryStatusMessage)message;
if ( (msg.getMessageId() == 0) && (msg.getUniqueId() == 0) ) {
timeMessageReceived(msg.getArrival().getTime());
// dont propogate the message, its just a fake
return;
}
if (message instanceof DateMessage) {
DateMessage msg = (DateMessage)message;
timeMessageReceived(msg.getNow());
// dont propogate the message, its just a fake
return;
}
_transport.messageReceived(message, _ident, _identHash, msToRead, size);
}

View File

@ -173,10 +173,15 @@ public class TCPConnection {
List expired = null;
int remaining = 0;
long remainingSize = 0;
long curSize = msg.getMessageSize(); // so we don't serialize while locked
synchronized (_pendingMessages) {
_pendingMessages.add(msg);
expired = locked_expireOld();
locked_throttle();
List throttled = locked_throttle();
if (expired == null)
expired = throttled;
else if (throttled != null)
expired.addAll(throttled);
for (int i = 0; i < _pendingMessages.size(); i++) {
OutNetMessage cur = (OutNetMessage)_pendingMessages.get(i);
remaining++;
@ -200,16 +205,17 @@ public class TCPConnection {
}
private boolean shouldDropProbabalistically() {
return Boolean.valueOf(_context.getProperty("tcp.dropProbabalistically", "true")).booleanValue();
return Boolean.valueOf(_context.getProperty("tcp.dropProbabalistically", "false")).booleanValue();
}
/**
* Implement a probabalistic dropping of messages on the queue to the
* peer along the lines of RFC2309.
*
* @return list of OutNetMessages that were expired, or null
*/
private void locked_throttle() {
if (!shouldDropProbabalistically()) return;
private List locked_throttle() {
if (!shouldDropProbabalistically()) return null;
int bytesQueued = 0;
long earliestExpiration = -1;
for (int i = 0; i < _pendingMessages.size(); i++) {
@ -230,7 +236,9 @@ public class TCPConnection {
// drop more than is necessary (leaving a fraction of the queue 'free' for bursts)
long excessQueued = (long)(bytesQueued - ((double)bytesSendableUntilFirstExpire * (1.0-getQueueFreeFactor())));
if ( (excessQueued > 0) && (_pendingMessages.size() > 1) && (_transport != null) )
locked_probabalisticDrop(excessQueued);
return locked_probabalisticDrop(excessQueued);
else
return null;
}
/**
@ -279,7 +287,8 @@ public class TCPConnection {
* Probabalistically drop messages in relation to their size vs how much
* we've exceeded our target queue usage.
*/
private void locked_probabalisticDrop(long excessBytesQueued) {
private List locked_probabalisticDrop(long excessBytesQueued) {
List rv = null;
for (int i = 0; i < _pendingMessages.size() && excessBytesQueued > 0; i++) {
OutNetMessage msg = (OutNetMessage)_pendingMessages.get(i);
int p = getDropProbability(msg.getMessageSize(), excessBytesQueued);
@ -287,13 +296,17 @@ public class TCPConnection {
_pendingMessages.remove(i);
i--;
msg.timestamp("Probabalistically dropped due to queue size " + excessBytesQueued);
sent(msg, false, -1);
if (rv == null)
rv = new ArrayList(1);
rv.add(msg);
//sent(msg, false, -1);
_context.statManager().addRateData("tcp.probabalisticDropQueueSize", excessBytesQueued, msg.getLifetime());
// since we've already dropped down this amount, lets reduce the
// number of additional messages dropped
excessBytesQueued -= msg.getMessageSize();
}
}
return rv;
}
private int getDropProbability(long msgSize, long excessBytesQueued) {

View File

@ -20,6 +20,7 @@ import net.i2p.router.OutNetMessage;
import net.i2p.router.RouterContext;
import net.i2p.router.transport.TransportImpl;
import net.i2p.router.transport.TransportBid;
import net.i2p.router.transport.Transport;
import net.i2p.util.I2PThread;
import net.i2p.util.Log;
@ -67,6 +68,9 @@ public class TCPTransport extends TransportImpl {
/** All of the operating TCPConnectionEstablisher objects */
private List _connectionEstablishers;
private TransportBid _fastBid;
private TransportBid _slowBid;
/** What is this transport's identifier? */
public static final String STYLE = "TCP";
/** Should the TCP listener bind to all interfaces? */
@ -83,7 +87,7 @@ public class TCPTransport extends TransportImpl {
public static final int DEFAULT_ESTABLISHERS = 3;
/** Ordered list of supported I2NP protocols */
public static final int[] SUPPORTED_PROTOCOLS = new int[] { 1 };
public static final int[] SUPPORTED_PROTOCOLS = new int[] { 2 };
/** blah, people shouldnt use defaults... */
public static final int DEFAULT_LISTEN_PORT = 8887;
@ -101,6 +105,8 @@ public class TCPTransport extends TransportImpl {
_connectionLock = new Object();
_pendingMessages = new HashMap(16);
_lastConnectionErrors = new ArrayList();
_fastBid = new SharedBid(200);
_slowBid = new SharedBid(5000);
String str = _context.getProperty(PROP_ESTABLISHERS);
int establishers = 0;
@ -134,19 +140,11 @@ public class TCPTransport extends TransportImpl {
if ( (_myAddress != null) && (_myAddress.equals(addr)) )
return null; // dont talk to yourself
TransportBid bid = new TransportBid();
bid.setBandwidthBytes((int)dataSize);
bid.setExpiration(_context.clock().now() + 30*1000);
bid.setMessageSize((int)dataSize);
bid.setRouter(toAddress);
bid.setTransport(this);
int latency = 200;
if (!getIsConnected(toAddress.getIdentity()))
latency += 5000;
bid.setLatencyMs(latency);
return bid;
if (getIsConnected(toAddress.getIdentity()))
return _fastBid;
else
return _slowBid;
}
private boolean getIsConnected(RouterIdentity ident) {
@ -174,8 +172,8 @@ public class TCPTransport extends TransportImpl {
TCPConnection con = null;
boolean newPeer = false;
Hash peer = msg.getTarget().getIdentity().calculateHash();
synchronized (_connectionLock) {
Hash peer = msg.getTarget().getIdentity().calculateHash();
con = (TCPConnection)_connectionsByIdent.get(peer);
if (con == null) {
if (_log.shouldLog(Log.DEBUG)) {
@ -196,10 +194,10 @@ public class TCPTransport extends TransportImpl {
newPeer = true;
}
msgs.add(msg);
if (newPeer)
_connectionLock.notifyAll();
}
if (newPeer)
_connectionLock.notifyAll();
}
if (con != null)
@ -815,5 +813,14 @@ public class TCPTransport extends TransportImpl {
return buf.toString();
}
/**
* Cache the bid to reduce object churn
*/
private class SharedBid extends TransportBid {
private int _ms;
public SharedBid(int ms) { _ms = ms; }
public int getLatency() { return _ms; }
public Transport getTransport() { return TCPTransport.this; }
}
}

View File

@ -5,13 +5,16 @@ import java.util.HashMap;
import java.util.Map;
import net.i2p.I2PAppContext;
import net.i2p.crypto.SHA256EntryCache;
import net.i2p.data.Base64;
import net.i2p.data.ByteArray;
import net.i2p.data.DataHelper;
import net.i2p.data.Hash;
import net.i2p.data.TunnelId;
import net.i2p.data.i2np.I2NPMessage;
import net.i2p.data.i2np.I2NPMessageException;
import net.i2p.data.i2np.I2NPMessageHandler;
import net.i2p.util.ByteCache;
import net.i2p.util.Log;
import net.i2p.util.SimpleTimer;
@ -27,14 +30,22 @@ public class FragmentHandler {
private Map _fragmentedMessages;
private DefragmentedReceiver _receiver;
/** don't wait more than 20s to defragment the partial message */
private static final long MAX_DEFRAGMENT_TIME = 20*1000;
/** don't wait more than 60s to defragment the partial message */
private static final long MAX_DEFRAGMENT_TIME = 60*1000;
public FragmentHandler(I2PAppContext context, DefragmentedReceiver receiver) {
_context = context;
_log = context.logManager().getLog(FragmentHandler.class);
_fragmentedMessages = new HashMap(4);
_receiver = receiver;
_context.statManager().createRateStat("tunnel.smallFragments", "How many pad bytes are in small fragments?",
"Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000 });
_context.statManager().createRateStat("tunnel.fullFragments", "How many tunnel messages use the full data area?",
"Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000 });
_context.statManager().createRateStat("tunnel.fragmentedComplete", "How many fragments were in a completely received message?",
"Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000 });
_context.statManager().createRateStat("tunnel.fragmentedDropped", "How many fragments were in a partially received yet failed message?",
"Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000 });
}
/**
@ -47,7 +58,8 @@ public class FragmentHandler {
public void receiveTunnelMessage(byte preprocessed[], int offset, int length) {
boolean ok = verifyPreprocessed(preprocessed, offset, length);
if (!ok) {
_log.error("Unable to verify preprocessed data");
_log.error("Unable to verify preprocessed data (pre.length=" + preprocessed.length
+ " off=" +offset + " len=" + length, new Exception("failed"));
return;
}
offset += HopProcessor.IV_LENGTH; // skip the IV
@ -60,17 +72,20 @@ public class FragmentHandler {
offset++; // skip the final 0x00, terminating the padding
if (_log.shouldLog(Log.DEBUG)) {
_log.debug("Fragments begin at offset=" + offset + " padding=" + padding);
_log.debug("fragments: " + Base64.encode(preprocessed, offset, preprocessed.length-offset));
//_log.debug("fragments: " + Base64.encode(preprocessed, offset, preprocessed.length-offset));
}
try {
while (offset < length)
offset = receiveFragment(preprocessed, offset, length);
} catch (Exception e) {
} catch (RuntimeException e) {
if (_log.shouldLog(Log.ERROR))
_log.error("Corrupt fragment received: offset = " + offset, e);
throw e;
}
}
private static final ByteCache _validateCache = ByteCache.getInstance(512, TrivialPreprocessor.PREPROCESSED_SIZE);
/**
* Verify that the preprocessed data hasn't been modified by checking the
* H(payload+IV)[0:3] vs preprocessed[16:19], where payload is the data
@ -87,19 +102,44 @@ public class FragmentHandler {
paddingEnd++;
paddingEnd++; // skip the last
byte preV[] = new byte[length - offset - paddingEnd + HopProcessor.IV_LENGTH];
System.arraycopy(preprocessed, offset + paddingEnd, preV, 0, preV.length - HopProcessor.IV_LENGTH);
System.arraycopy(preprocessed, 0, preV, preV.length - HopProcessor.IV_LENGTH, HopProcessor.IV_LENGTH);
Hash v = _context.sha().calculateHash(preV);
ByteArray ba = _validateCache.acquire(); // larger than necessary, but always sufficient
byte preV[] = ba.getData();
int validLength = length - offset - paddingEnd + HopProcessor.IV_LENGTH;
System.arraycopy(preprocessed, offset + paddingEnd, preV, 0, validLength - HopProcessor.IV_LENGTH);
System.arraycopy(preprocessed, 0, preV, validLength - HopProcessor.IV_LENGTH, HopProcessor.IV_LENGTH);
if (_log.shouldLog(Log.DEBUG))
_log.debug("endpoint IV: " + Base64.encode(preV, validLength - HopProcessor.IV_LENGTH, HopProcessor.IV_LENGTH));
SHA256EntryCache.CacheEntry cache = _context.sha().cache().acquire(TrivialPreprocessor.PREPROCESSED_SIZE);
Hash v = _context.sha().calculateHash(preV, 0, validLength, cache);
//Hash v = _context.sha().calculateHash(preV, 0, validLength);
boolean eq = DataHelper.eq(v.getData(), 0, preprocessed, offset + HopProcessor.IV_LENGTH, 4);
if (!eq)
_log.error("Endpoint data doesn't match:\n" + Base64.encode(preprocessed, offset + paddingEnd, preV.length-HopProcessor.IV_LENGTH));
if (!eq) {
if (_log.shouldLog(Log.ERROR))
_log.error("Endpoint data doesn't match: # pad bytes: " + (paddingEnd-(HopProcessor.IV_LENGTH+4)-1));
if (_log.shouldLog(Log.DEBUG))
_log.debug("nomatching endpoint: # pad bytes: " + (paddingEnd-(HopProcessor.IV_LENGTH+4)-1) + "\n"
+ Base64.encode(preprocessed, offset + paddingEnd, preV.length-HopProcessor.IV_LENGTH));
}
_context.sha().cache().release(cache);
_validateCache.release(ba);
if (eq) {
int excessPadding = paddingEnd - (HopProcessor.IV_LENGTH + 4 + 1);
if (excessPadding > 0) // suboptimal fragmentation
_context.statManager().addRateData("tunnel.smallFragments", excessPadding, 0);
else
_context.statManager().addRateData("tunnel.fullFragments", 1, 0);
}
return eq;
}
/** is this a follw up byte? */
static final byte MASK_IS_SUBSEQUENT = (byte)(1 << 7);
/** how should this be delivered? shift this 5 the right and get TYPE_* */
/** how should this be delivered. shift this 5 the right and get TYPE_* */
static final byte MASK_TYPE = (byte)(3 << 5);
/** is this the first of a fragmented message? */
static final byte MASK_FRAGMENTED = (byte)(1 << 3);
@ -157,7 +197,9 @@ public class FragmentHandler {
messageId = DataHelper.fromLong(preprocessed, offset, 4);
if (_log.shouldLog(Log.DEBUG))
_log.debug("reading messageId " + messageId + " at offset "+ offset
+ " type = " + type + "tunnelId = " + tunnelId);
+ " type = " + type + " router = "
+ (router != null ? router.toBase64().substring(0,4) : "n/a")
+ " tunnelId = " + tunnelId);
offset += 4;
}
if (extended) {
@ -184,13 +226,6 @@ public class FragmentHandler {
msg = new FragmentedMessage(_context);
}
if (isNew && fragmented) {
RemoveFailed evt = new RemoveFailed(msg);
msg.setExpireEvent(evt);
_log.debug("In " + MAX_DEFRAGMENT_TIME + " dropping " + messageId);
SimpleTimer.getInstance().addEvent(evt, MAX_DEFRAGMENT_TIME);
}
msg.receive(messageId, preprocessed, offset, size, !fragmented, router, tunnelId);
if (msg.isComplete()) {
if (fragmented) {
@ -201,6 +236,16 @@ public class FragmentHandler {
if (msg.getExpireEvent() != null)
SimpleTimer.getInstance().removeEvent(msg.getExpireEvent());
receiveComplete(msg);
} else {
noteReception(msg.getMessageId(), 0);
}
if (isNew && fragmented && !msg.isComplete()) {
RemoveFailed evt = new RemoveFailed(msg);
msg.setExpireEvent(evt);
if (_log.shouldLog(Log.DEBUG))
_log.debug("In " + MAX_DEFRAGMENT_TIME + " dropping " + messageId);
SimpleTimer.getInstance().addEvent(evt, MAX_DEFRAGMENT_TIME);
}
offset += size;
@ -225,6 +270,10 @@ public class FragmentHandler {
int size = (int)DataHelper.fromLong(preprocessed, offset, 2);
offset += 2;
if (messageId < 0)
throw new RuntimeException("Preprocessed message was invalid [messageId =" + messageId + " size="
+ size + " offset=" + offset + " fragment=" + fragmentNum);
boolean isNew = false;
FragmentedMessage msg = null;
synchronized (_fragmentedMessages) {
@ -236,13 +285,6 @@ public class FragmentHandler {
}
}
if (isNew) {
RemoveFailed evt = new RemoveFailed(msg);
msg.setExpireEvent(evt);
_log.debug("In " + MAX_DEFRAGMENT_TIME + " dropping " + msg.getMessageId() + "/" + fragmentNum);
SimpleTimer.getInstance().addEvent(evt, MAX_DEFRAGMENT_TIME);
}
msg.receive(messageId, fragmentNum, preprocessed, offset, size, isLast);
if (msg.isComplete()) {
@ -251,7 +293,18 @@ public class FragmentHandler {
}
if (msg.getExpireEvent() != null)
SimpleTimer.getInstance().removeEvent(msg.getExpireEvent());
_context.statManager().addRateData("tunnel.fragmentedComplete", msg.getFragmentCount(), msg.getLifetime());
receiveComplete(msg);
} else {
noteReception(msg.getMessageId(), fragmentNum);
}
if (isNew && !msg.isComplete()) {
RemoveFailed evt = new RemoveFailed(msg);
msg.setExpireEvent(evt);
if (_log.shouldLog(Log.DEBUG))
_log.debug("In " + MAX_DEFRAGMENT_TIME + " dropping " + msg.getMessageId() + "/" + fragmentNum);
SimpleTimer.getInstance().addEvent(evt, MAX_DEFRAGMENT_TIME);
}
offset += size;
@ -265,16 +318,21 @@ public class FragmentHandler {
_log.debug("RECV(" + data.length + "): " + Base64.encode(data)
+ " " + _context.sha().calculateHash(data).toBase64());
I2NPMessage m = new I2NPMessageHandler(_context).readMessage(data);
noteCompletion(m.getUniqueId());
_receiver.receiveComplete(m, msg.getTargetRouter(), msg.getTargetTunnel());
} catch (IOException ioe) {
if (_log.shouldLog(Log.WARN))
_log.warn("Error receiving fragmented message (corrupt?): " + msg, ioe);
if (_log.shouldLog(Log.ERROR))
_log.error("Error receiving fragmented message (corrupt?): " + msg, ioe);
} catch (I2NPMessageException ime) {
if (_log.shouldLog(Log.WARN))
_log.warn("Error receiving fragmented message (corrupt?): " + msg, ime);
if (_log.shouldLog(Log.ERROR))
_log.error("Error receiving fragmented message (corrupt?): " + msg, ime);
}
}
protected void noteReception(long messageId, int fragmentId) {}
protected void noteCompletion(long messageId) {}
protected void noteFailure(long messageId) {}
/**
* Receive messages out of the tunnel endpoint. There should be a single
* instance of this object per tunnel so that it can tell what tunnel various
@ -303,9 +361,12 @@ public class FragmentHandler {
synchronized (_fragmentedMessages) {
removed = (null != _fragmentedMessages.remove(new Long(_msg.getMessageId())));
}
if (removed) {
if (_log.shouldLog(Log.WARN))
_log.warn("Dropped failed fragmented message: " + _msg);
if (removed && !_msg.getReleased()) {
noteFailure(_msg.getMessageId());
if (_log.shouldLog(Log.ERROR))
_log.error("Dropped failed fragmented message: " + _msg);
_context.statManager().addRateData("tunnel.fragmentedDropped", _msg.getFragmentCount(), _msg.getLifetime());
_msg.failed();
} else {
// succeeded before timeout
}

View File

@ -37,7 +37,7 @@ public class FragmentTest {
_context.random().nextBytes(data);
m.setData(data);
m.setUniqueId(42);
m.setMessageExpiration(new Date(_context.clock().now() + 60*1000));
m.setMessageExpiration(_context.clock().now() + 60*1000);
ArrayList messages = new ArrayList();
TunnelGateway.Pending pending = new TunnelGateway.Pending(m, null, null);
messages.add(pending);
@ -61,7 +61,7 @@ public class FragmentTest {
_context.random().nextBytes(data);
m.setData(data);
m.setUniqueId(42);
m.setMessageExpiration(new Date(_context.clock().now() + 60*1000));
m.setMessageExpiration(_context.clock().now() + 60*1000);
ArrayList messages = new ArrayList();
TunnelGateway.Pending pending = new TunnelGateway.Pending(m, null, null);
messages.add(pending);
@ -86,7 +86,7 @@ public class FragmentTest {
_context.random().nextBytes(data);
m.setData(data);
m.setUniqueId(42);
m.setMessageExpiration(new Date(_context.clock().now() + 60*1000));
m.setMessageExpiration(_context.clock().now() + 60*1000);
ArrayList messages = new ArrayList();
TunnelGateway.Pending pending = new TunnelGateway.Pending(m, null, null);
messages.add(pending);

View File

@ -10,11 +10,13 @@ import java.util.Date;
import net.i2p.I2PAppContext;
import net.i2p.data.Base64;
import net.i2p.data.ByteArray;
import net.i2p.data.DataHelper;
import net.i2p.data.Hash;
import net.i2p.data.TunnelId;
import net.i2p.data.i2np.I2NPMessage;
import net.i2p.data.i2np.DataMessage;
import net.i2p.data.i2np.I2NPMessageHandler;
import net.i2p.util.ByteCache;
import net.i2p.util.Log;
import net.i2p.util.SimpleTimer;
@ -29,23 +31,31 @@ public class FragmentedMessage {
private long _messageId;
private Hash _toRouter;
private TunnelId _toTunnel;
private Map _fragments;
private ByteArray _fragments[];
private boolean _lastReceived;
private int _highFragmentNum;
private long _createdOn;
private boolean _completed;
private long _releasedAfter;
private SimpleTimer.TimedEvent _expireEvent;
private static final ByteCache _cache = ByteCache.getInstance(512, TrivialPreprocessor.PREPROCESSED_SIZE);
// 64 is pretty absurd, 32 is too, most likely
private static final int MAX_FRAGMENTS = 64;
public FragmentedMessage(I2PAppContext ctx) {
_context = ctx;
_log = ctx.logManager().getLog(FragmentedMessage.class);
_messageId = -1;
_toRouter = null;
_toTunnel = null;
_fragments = new HashMap(1);
_fragments = new ByteArray[MAX_FRAGMENTS];
_lastReceived = false;
_highFragmentNum = -1;
_releasedAfter = -1;
_createdOn = ctx.clock().now();
_expireEvent = null;
_completed = false;
}
/**
@ -60,17 +70,26 @@ public class FragmentedMessage {
* @param isLast is this the last fragment in the message?
*/
public void receive(long messageId, int fragmentNum, byte payload[], int offset, int length, boolean isLast) {
if (fragmentNum < 0) throw new RuntimeException("Fragment # == " + fragmentNum + " for messageId " + messageId);
if (payload == null) throw new RuntimeException("Payload is null for messageId " + messageId);
if (length <= 0) throw new RuntimeException("Length is impossible (" + length + ") for messageId " + messageId);
if (offset + length > payload.length) throw new RuntimeException("Length is impossible (" + length + "/" + offset + " out of " + payload.length + ") for messageId " + messageId);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Receive message " + messageId + " fragment " + fragmentNum + " with " + length + " bytes (last? " + isLast + ") offset = " + offset);
_messageId = messageId;
ByteArray ba = new ByteArray(new byte[length]);
System.arraycopy(payload, offset, ba.getData(), 0, length);
_log.debug("fragment[" + fragmentNum + "/" + offset + "/" + length + "]: " + Base64.encode(ba.getData()));
// we should just use payload[] and use an offset/length on it
ByteArray ba = new ByteArray(payload, offset, length); //new byte[length]);
//System.arraycopy(payload, offset, ba.getData(), 0, length);
if (_log.shouldLog(Log.DEBUG))
_log.debug("fragment[" + fragmentNum + "/" + offset + "/" + length + "]: "
+ Base64.encode(ba.getData(), ba.getOffset(), ba.getValid()));
_fragments.put(new Integer(fragmentNum), ba);
_fragments[fragmentNum] = ba;
_lastReceived = isLast;
if (isLast)
if (fragmentNum > _highFragmentNum)
_highFragmentNum = fragmentNum;
if (isLast && fragmentNum <= 0)
throw new RuntimeException("hmm, isLast and fragmentNum=" + fragmentNum + " for message " + messageId);
}
/**
@ -86,13 +105,18 @@ public class FragmentedMessage {
* @param toTunnel what tunnel is this destined for (may be null)
*/
public void receive(long messageId, byte payload[], int offset, int length, boolean isLast, Hash toRouter, TunnelId toTunnel) {
if (payload == null) throw new RuntimeException("Payload is null for messageId " + messageId);
if (length <= 0) throw new RuntimeException("Length is impossible (" + length + ") for messageId " + messageId);
if (offset + length > payload.length) throw new RuntimeException("Length is impossible (" + length + "/" + offset + " out of " + payload.length + ") for messageId " + messageId);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Receive message " + messageId + " with " + length + " bytes (last? " + isLast + ") targetting " + toRouter + " / " + toTunnel + " offset=" + offset);
_messageId = messageId;
ByteArray ba = new ByteArray(new byte[length]);
System.arraycopy(payload, offset, ba.getData(), 0, length);
_log.debug("fragment[0/" + offset + "/" + length + "]: " + Base64.encode(ba.getData()));
_fragments.put(new Integer(0), ba);
ByteArray ba = new ByteArray(payload, offset, length); // new byte[length]);
//System.arraycopy(payload, offset, ba.getData(), 0, length);
if (_log.shouldLog(Log.DEBUG))
_log.debug("fragment[0/" + offset + "/" + length + "]: "
+ Base64.encode(ba.getData(), ba.getOffset(), ba.getValid()));
_fragments[0] = ba;
_lastReceived = isLast;
_toRouter = toRouter;
_toTunnel = toTunnel;
@ -103,6 +127,13 @@ public class FragmentedMessage {
public long getMessageId() { return _messageId; }
public Hash getTargetRouter() { return _toRouter; }
public TunnelId getTargetTunnel() { return _toTunnel; }
public int getFragmentCount() {
int found = 0;
for (int i = 0; i < _fragments.length; i++)
if (_fragments[i] != null)
found++;
return found;
}
/** used in the fragment handler so we can cancel the expire event on success */
SimpleTimer.TimedEvent getExpireEvent() { return _expireEvent; }
void setExpireEvent(SimpleTimer.TimedEvent evt) { _expireEvent = evt; }
@ -112,7 +143,7 @@ public class FragmentedMessage {
if (!_lastReceived)
return false;
for (int i = 0; i <= _highFragmentNum; i++)
if (!_fragments.containsKey(new Integer(i)))
if (_fragments[i] == null)
return false;
return true;
}
@ -121,35 +152,59 @@ public class FragmentedMessage {
throw new IllegalStateException("wtf, don't get the completed size when we're not complete");
int size = 0;
for (int i = 0; i <= _highFragmentNum; i++) {
ByteArray ba = (ByteArray)_fragments.get(new Integer(i));
size += ba.getData().length;
ByteArray ba = _fragments[i];
size += ba.getValid();
}
return size;
}
/** how long has this fragmented message been alive? */
public long getLifetime() { return _context.clock().now() - _createdOn; }
public boolean getReleased() { return _completed; }
public void writeComplete(OutputStream out) throws IOException {
for (int i = 0; i <= _highFragmentNum; i++) {
ByteArray ba = (ByteArray)_fragments.get(new Integer(i));
out.write(ba.getData());
ByteArray ba = _fragments[i];
out.write(ba.getData(), ba.getOffset(), ba.getValid());
}
_completed = true;
}
public void writeComplete(byte target[], int offset) {
for (int i = 0; i <= _highFragmentNum; i++) {
ByteArray ba = (ByteArray)_fragments.get(new Integer(i));
System.arraycopy(ba.getData(), 0, target, offset, ba.getData().length);
offset += ba.getData().length;
ByteArray ba = _fragments[i];
System.arraycopy(ba.getData(), ba.getOffset(), target, offset, ba.getValid());
offset += ba.getValid();
}
_completed = true;
}
public byte[] toByteArray() {
byte rv[] = new byte[getCompleteSize()];
writeComplete(rv, 0);
releaseFragments();
return rv;
}
public long getReleasedAfter() { return _releasedAfter; }
public void failed() {
releaseFragments();
}
/**
* Called as one of the endpoints for the tunnel cache pipeline (see TunnelDataMessage)
*
*/
private void releaseFragments() {
_releasedAfter = getLifetime();
for (int i = 0; i <= _highFragmentNum; i++) {
ByteArray ba = _fragments[i];
if ( (ba != null) && (ba.getData().length == TrivialPreprocessor.PREPROCESSED_SIZE) ) {
_cache.release(ba);
_fragments[i] = null;
}
}
}
public InputStream getInputStream() { return new FragmentInputStream(); }
private class FragmentInputStream extends InputStream {
private int _fragment;
@ -160,13 +215,13 @@ public class FragmentedMessage {
}
public int read() throws IOException {
while (true) {
ByteArray ba = (ByteArray)_fragments.get(new Integer(_fragment));
ByteArray ba = _fragments[_fragment];
if (ba == null) return -1;
if (_offset >= ba.getData().length) {
if (_offset >= ba.getValid()) {
_fragment++;
_offset = 0;
} else {
byte rv = ba.getData()[_offset];
byte rv = ba.getData()[ba.getOffset()+_offset];
_offset++;
return rv;
}
@ -174,13 +229,38 @@ public class FragmentedMessage {
}
}
public String toString() {
StringBuffer buf = new StringBuffer(128);
buf.append("Fragments for ").append(_messageId).append(": ");
for (int i = 0; i <= _highFragmentNum; i++) {
ByteArray ba = _fragments[i];
if (ba != null)
buf.append(i).append(":").append(ba.getValid()).append(" bytes ");
else
buf.append(i).append(": missing ");
}
buf.append(" highest received: ").append(_highFragmentNum);
buf.append(" last received? ").append(_lastReceived);
buf.append(" lifetime: ").append(DataHelper.formatDuration(_context.clock().now()-_createdOn));
if (_toRouter != null) {
buf.append(" targetting ").append(_toRouter.toBase64().substring(0,4));
if (_toTunnel != null)
buf.append(":").append(_toTunnel.getTunnelId());
}
if (_completed)
buf.append(" completed");
if (_releasedAfter > 0)
buf.append(" released after " + DataHelper.formatDuration(_releasedAfter));
return buf.toString();
}
public static void main(String args[]) {
try {
I2PAppContext ctx = I2PAppContext.getGlobalContext();
DataMessage m = new DataMessage(ctx);
m.setData(new byte[1024]);
java.util.Arrays.fill(m.getData(), (byte)0xFF);
m.setMessageExpiration(new Date(ctx.clock().now() + 60*1000));
m.setMessageExpiration(ctx.clock().now() + 60*1000);
m.setUniqueId(ctx.random().nextLong(I2NPMessage.MAX_ID_VALUE));
byte data[] = m.toByteArray();

View File

@ -2,8 +2,10 @@ package net.i2p.router.tunnel;
import java.util.Map;
import net.i2p.data.DataHelper;
import net.i2p.data.Hash;
import net.i2p.data.SessionKey;
import net.i2p.data.TunnelId;
/**
* Defines the general configuration for a hop in a tunnel.
@ -11,13 +13,16 @@ import net.i2p.data.SessionKey;
*/
public class HopConfig {
private byte _receiveTunnelId[];
private TunnelId _receiveTunnel;
private Hash _receiveFrom;
private byte _sendTunnelId[];
private TunnelId _sendTunnel;
private Hash _sendTo;
private SessionKey _layerKey;
private SessionKey _ivKey;
private long _expiration;
private Map _options;
private long _messagesProcessed;
public HopConfig() {
_receiveTunnelId = null;
@ -32,7 +37,13 @@ public class HopConfig {
/** what tunnel ID are we receiving on? */
public byte[] getReceiveTunnelId() { return _receiveTunnelId; }
public TunnelId getReceiveTunnel() {
if (_receiveTunnel == null)
_receiveTunnel = getTunnel(_receiveTunnelId);
return _receiveTunnel;
}
public void setReceiveTunnelId(byte id[]) { _receiveTunnelId = id; }
public void setReceiveTunnelId(TunnelId id) { _receiveTunnelId = DataHelper.toLong(4, id.getTunnelId()); }
/** what is the previous peer in the tunnel (if any)? */
public Hash getReceiveFrom() { return _receiveFrom; }
@ -40,8 +51,20 @@ public class HopConfig {
/** what is the next tunnel ID we are sending to? */
public byte[] getSendTunnelId() { return _sendTunnelId; }
public TunnelId getSendTunnel() {
if (_sendTunnel == null)
_sendTunnel = getTunnel(_sendTunnelId);
return _sendTunnel;
}
public void setSendTunnelId(byte id[]) { _sendTunnelId = id; }
private TunnelId getTunnel(byte id[]) {
if (id == null)
return null;
else
return new TunnelId(DataHelper.fromLong(id, 0, id.length));
}
/** what is the next peer in the tunnel (if any)? */
public Hash getSendTo() { return _sendTo; }
public void setSendTo(Hash to) { _sendTo = to; }
@ -59,7 +82,7 @@ public class HopConfig {
public void setExpiration(long when) { _expiration = when; }
/**
* what are the configuration options for this tunnel (if any)? keys to
* what are the configuration options for this tunnel (if any). keys to
* this map should be strings and values should be Objects of an
* option-specific type (e.g. "maxMessages" would be an Integer, "shouldPad"
* would be a Boolean, etc).
@ -67,4 +90,27 @@ public class HopConfig {
*/
public Map getOptions() { return _options; }
public void setOptions(Map options) { _options = options; }
/** take note of a message being pumped through this tunnel */
public void incrementProcessedMessages() { _messagesProcessed++; }
public long getProcessedMessagesCount() { return _messagesProcessed; }
public String toString() {
StringBuffer buf = new StringBuffer(64);
if (_receiveTunnelId != null) {
buf.append("recv on ");
buf.append(DataHelper.fromLong(_receiveTunnelId, 0, 4));
buf.append(" ");
}
if (_sendTo != null) {
buf.append("send to ").append(_sendTo.toBase64().substring(0,4)).append(":");
if (_sendTunnelId != null)
buf.append(DataHelper.fromLong(_sendTunnelId, 0, 4));
}
buf.append(" expiring on ").append(TunnelCreatorConfig.format(_expiration));
buf.append(" having transferred ").append(_messagesProcessed).append("KB");
return buf.toString();
}
}

View File

@ -2,8 +2,10 @@ package net.i2p.router.tunnel;
import net.i2p.I2PAppContext;
import net.i2p.data.Base64;
import net.i2p.data.ByteArray;
import net.i2p.data.DataHelper;
import net.i2p.data.Hash;
import net.i2p.util.ByteCache;
import net.i2p.util.Log;
/**
@ -24,15 +26,19 @@ public class HopProcessor {
/** helpful flag for debugging */
static final boolean USE_ENCRYPTION = true;
static final int IV_LENGTH = 16;
private static final ByteCache _cache = ByteCache.getInstance(128, IV_LENGTH);
public HopProcessor(I2PAppContext ctx, HopConfig config) {
this(ctx, config, createValidator());
}
public HopProcessor(I2PAppContext ctx, HopConfig config, IVValidator validator) {
_context = ctx;
_log = ctx.logManager().getLog(HopProcessor.class);
_config = config;
_validator = createValidator();
_validator = validator;
}
protected IVValidator createValidator() {
protected static IVValidator createValidator() {
// yeah, we'll use an O(1) validator later (e.g. bloom filter)
return new HashSetIVValidator();
}
@ -61,7 +67,8 @@ public class HopProcessor {
}
}
byte iv[] = new byte[IV_LENGTH];
ByteArray ba = _cache.acquire();
byte iv[] = ba.getData(); // new byte[IV_LENGTH];
System.arraycopy(orig, offset, iv, 0, IV_LENGTH);
boolean okIV = _validator.receiveIV(iv);
if (!okIV) {
@ -82,6 +89,7 @@ public class HopProcessor {
//_log.debug("Data after processing: " + Base64.encode(orig, IV_LENGTH, orig.length - IV_LENGTH));
//_log.debug("IV sent: " + Base64.encode(orig, 0, IV_LENGTH));
}
_cache.release(ba);
return true;
}

View File

@ -1,8 +1,5 @@
package net.i2p.router.tunnel;
import java.util.HashSet;
import net.i2p.data.ByteArray;
/**
* Provide a generic interface for IV validation which may be implemented
* through something as simple as a hashtable or more a complicated
@ -17,29 +14,3 @@ public interface IVValidator {
*/
public boolean receiveIV(byte iv[]);
}
/** accept everything */
class DummyValidator implements IVValidator {
private static final DummyValidator _instance = new DummyValidator();
public static DummyValidator getInstance() { return _instance; }
private DummyValidator() {}
public boolean receiveIV(byte[] iv) { return true; }
}
/** waste lots of RAM */
class HashSetIVValidator implements IVValidator {
private HashSet _received;
public HashSetIVValidator() {
_received = new HashSet();
}
public boolean receiveIV(byte[] iv) {
ByteArray ba = new ByteArray(iv);
boolean isNew = false;
synchronized (_received) {
isNew = _received.add(ba);
}
return isNew;
}
}

View File

@ -2,8 +2,10 @@ package net.i2p.router.tunnel;
import net.i2p.I2PAppContext;
import net.i2p.data.Base64;
import net.i2p.data.ByteArray;
import net.i2p.data.DataHelper;
import net.i2p.data.Hash;
import net.i2p.util.ByteCache;
import net.i2p.util.Log;
/**
@ -21,14 +23,21 @@ public class InboundEndpointProcessor {
private IVValidator _validator;
static final boolean USE_ENCRYPTION = HopProcessor.USE_ENCRYPTION;
private static final ByteCache _cache = ByteCache.getInstance(128, HopProcessor.IV_LENGTH);
public InboundEndpointProcessor(I2PAppContext ctx, TunnelCreatorConfig cfg) {
this(ctx, cfg, DummyValidator.getInstance());
}
public InboundEndpointProcessor(I2PAppContext ctx, TunnelCreatorConfig cfg, IVValidator validator) {
_context = ctx;
_log = ctx.logManager().getLog(InboundEndpointProcessor.class);
_config = cfg;
_validator = DummyValidator.getInstance();
_validator = validator;
}
public Hash getDestination() { return _config.getDestination(); }
public TunnelCreatorConfig getConfig() { return _config; }
/**
* Undo all of the encryption done by the peers in the tunnel, recovering the
* preprocessed data sent by the gateway.
@ -37,7 +46,7 @@ public class InboundEndpointProcessor {
* if it was a duplicate or from the wrong peer.
*/
public boolean retrievePreprocessedData(byte orig[], int offset, int length, Hash prev) {
Hash last = _config.getPeer(_config.getLength()-1);
Hash last = _config.getPeer(_config.getLength()-2);
if (!last.equals(prev)) {
if (_log.shouldLog(Log.ERROR))
_log.error("Invalid previous peer - attempted hostile loop? from " + prev
@ -45,19 +54,40 @@ public class InboundEndpointProcessor {
return false;
}
byte iv[] = new byte[HopProcessor.IV_LENGTH];
ByteArray ba = _cache.acquire();
byte iv[] = ba.getData(); //new byte[HopProcessor.IV_LENGTH];
System.arraycopy(orig, offset, iv, 0, iv.length);
//if (_config.getLength() > 1)
// _log.debug("IV at inbound endpoint before decrypt: " + Base64.encode(iv));
boolean ok = _validator.receiveIV(iv);
if (!ok) {
if (_log.shouldLog(Log.WARN))
_log.warn("Invalid IV received");
_cache.release(ba);
return false;
}
// inbound endpoints and outbound gateways have to undo the crypto in the same way
if (USE_ENCRYPTION)
OutboundGatewayProcessor.decrypt(_context, _config, iv, orig, offset, length);
decrypt(_context, _config, iv, orig, offset, length);
_cache.release(ba);
return true;
}
private void decrypt(I2PAppContext ctx, TunnelCreatorConfig cfg, byte iv[], byte orig[], int offset, int length) {
Log log = ctx.logManager().getLog(OutboundGatewayProcessor.class);
ByteArray ba = _cache.acquire();
byte cur[] = ba.getData(); // new byte[HopProcessor.IV_LENGTH]; // so we dont malloc
for (int i = cfg.getLength()-2; i >= 0; i--) { // dont include the endpoint, since that is the creator
OutboundGatewayProcessor.decrypt(ctx, iv, orig, offset, length, cur, cfg.getConfig(i));
if (log.shouldLog(Log.DEBUG)) {
//log.debug("IV at hop " + i + ": " + Base64.encode(orig, offset, HopProcessor.IV_LENGTH));
//log.debug("hop " + i + ": " + Base64.encode(orig, offset + HopProcessor.IV_LENGTH, length - HopProcessor.IV_LENGTH));
}
}
_cache.release(ba);
}
}

View File

@ -10,12 +10,7 @@ import net.i2p.util.Log;
*/
public class InboundGatewayProcessor extends HopProcessor {
public InboundGatewayProcessor(I2PAppContext ctx, HopConfig config) {
super(ctx, config);
}
/** we are the gateway, no need to validate the IV */
protected IVValidator createValidator() {
return DummyValidator.getInstance();
super(ctx, config, DummyValidator.getInstance());
}
/**

View File

@ -61,7 +61,7 @@ public class InboundGatewayTest {
DataMessage m = new DataMessage(_context);
m.setData(new byte[64]);
java.util.Arrays.fill(m.getData(), (byte)0xFF);
m.setMessageExpiration(new Date(_context.clock().now() + 60*1000));
m.setMessageExpiration(_context.clock().now() + 60*1000);
m.setUniqueId(_context.random().nextLong(I2NPMessage.MAX_ID_VALUE));
_log.debug("Sending " + m.getUniqueId());
byte data[] = m.toByteArray();
@ -89,7 +89,7 @@ public class InboundGatewayTest {
DataMessage m = new DataMessage(_context);
m.setData(new byte[64]);
java.util.Arrays.fill(m.getData(), (byte)0xFF);
m.setMessageExpiration(new Date(_context.clock().now() + 60*1000));
m.setMessageExpiration(_context.clock().now() + 60*1000);
m.setUniqueId(_context.random().nextLong(I2NPMessage.MAX_ID_VALUE));
Hash to = new Hash(new byte[Hash.HASH_LENGTH]);
java.util.Arrays.fill(to.getData(), (byte)0xFF);
@ -119,7 +119,7 @@ public class InboundGatewayTest {
DataMessage m = new DataMessage(_context);
m.setData(new byte[64]);
java.util.Arrays.fill(m.getData(), (byte)0xFF);
m.setMessageExpiration(new Date(_context.clock().now() + 60*1000));
m.setMessageExpiration(_context.clock().now() + 60*1000);
m.setUniqueId(_context.random().nextLong(I2NPMessage.MAX_ID_VALUE));
Hash to = new Hash(new byte[Hash.HASH_LENGTH]);
java.util.Arrays.fill(to.getData(), (byte)0xFF);
@ -150,7 +150,7 @@ public class InboundGatewayTest {
DataMessage m = new DataMessage(_context);
m.setData(new byte[1024]);
java.util.Arrays.fill(m.getData(), (byte)0xFF);
m.setMessageExpiration(new Date(_context.clock().now() + 60*1000));
m.setMessageExpiration(_context.clock().now() + 60*1000);
m.setUniqueId(_context.random().nextLong(I2NPMessage.MAX_ID_VALUE));
_log.debug("Sending " + m.getUniqueId());
byte data[] = m.toByteArray();
@ -183,7 +183,7 @@ public class InboundGatewayTest {
public void receiveEncrypted(byte[] encrypted) {
// fake all the hops...
for (int i = 1; i <= _config.getLength() - 1; i++) {
for (int i = 1; i <= _config.getLength() - 2; i++) {
HopProcessor hop = new HopProcessor(_context, _config.getConfig(i));
boolean ok = hop.process(encrypted, 0, encrypted.length, _config.getConfig(i).getReceiveFrom());
if (!ok)
@ -194,7 +194,7 @@ public class InboundGatewayTest {
// now handle it at the endpoint
InboundEndpointProcessor end = new InboundEndpointProcessor(_context, _config);
boolean ok = end.retrievePreprocessedData(encrypted, 0, encrypted.length, _config.getPeer(_config.getLength()-1));
boolean ok = end.retrievePreprocessedData(encrypted, 0, encrypted.length, _config.getPeer(_config.getLength()-2));
if (!ok)
_log.error("Error retrieving cleartext at the endpoint");

View File

@ -40,7 +40,7 @@ public class InboundTest {
InboundGatewayProcessor p = new InboundGatewayProcessor(_context, config.getConfig(0));
p.process(message, 0, message.length, null);
for (int i = 1; i < numHops; i++) {
for (int i = 1; i < numHops-1; i++) {
HopProcessor hop = new HopProcessor(_context, config.getConfig(i));
Hash prev = config.getConfig(i).getReceiveFrom();
boolean ok = hop.process(message, 0, message.length, prev);
@ -51,7 +51,7 @@ public class InboundTest {
}
InboundEndpointProcessor end = new InboundEndpointProcessor(_context, config);
boolean ok = end.retrievePreprocessedData(message, 0, message.length, config.getPeer(numHops-1));
boolean ok = end.retrievePreprocessedData(message, 0, message.length, config.getPeer(numHops-2));
if (!ok) {
_log.error("Error retrieving cleartext at the endpoint");
try { Thread.sleep(5*1000); } catch (Exception e) {}

View File

@ -2,8 +2,10 @@ package net.i2p.router.tunnel;
import net.i2p.I2PAppContext;
import net.i2p.data.Base64;
import net.i2p.data.ByteArray;
import net.i2p.data.DataHelper;
import net.i2p.data.Hash;
import net.i2p.util.ByteCache;
import net.i2p.util.Log;
/**
@ -18,6 +20,7 @@ public class OutboundGatewayProcessor {
private TunnelCreatorConfig _config;
static final boolean USE_ENCRYPTION = HopProcessor.USE_ENCRYPTION;
private static final ByteCache _cache = ByteCache.getInstance(128, HopProcessor.IV_LENGTH);
public OutboundGatewayProcessor(I2PAppContext ctx, TunnelCreatorConfig cfg) {
_context = ctx;
@ -34,17 +37,21 @@ public class OutboundGatewayProcessor {
* @param length how much of orig can we write to (must be a multiple of 16).
*/
public void process(byte orig[], int offset, int length) {
byte iv[] = new byte[HopProcessor.IV_LENGTH];
ByteArray ba = _cache.acquire();
byte iv[] = ba.getData(); // new byte[HopProcessor.IV_LENGTH];
//_context.random().nextBytes(iv);
//System.arraycopy(iv, 0, orig, offset, HopProcessor.IV_LENGTH);
System.arraycopy(orig, offset, iv, 0, HopProcessor.IV_LENGTH);
if (_log.shouldLog(Log.DEBUG)) {
//_log.debug("Original random IV: " + Base64.encode(iv));
_log.debug("Orig random IV: " + Base64.encode(iv));
//_log.debug("data: " + Base64.encode(orig, iv.length, length - iv.length));
}
if (USE_ENCRYPTION)
decrypt(_context, _config, iv, orig, offset, length);
if (_log.shouldLog(Log.DEBUG))
_log.debug("finished processing the preprocessed data");
_cache.release(ba);
}
/**
@ -53,19 +60,21 @@ public class OutboundGatewayProcessor {
* and by the inbound endpoint.
*
*/
static void decrypt(I2PAppContext ctx, TunnelCreatorConfig cfg, byte iv[], byte orig[], int offset, int length) {
private void decrypt(I2PAppContext ctx, TunnelCreatorConfig cfg, byte iv[], byte orig[], int offset, int length) {
Log log = ctx.logManager().getLog(OutboundGatewayProcessor.class);
byte cur[] = new byte[HopProcessor.IV_LENGTH]; // so we dont malloc
for (int i = cfg.getLength()-1; i >= 0; i--) {
ByteArray ba = _cache.acquire();
byte cur[] = ba.getData(); // new byte[HopProcessor.IV_LENGTH]; // so we dont malloc
for (int i = cfg.getLength()-1; i >= 1; i--) { // dont include hop 0, since that is the creator
decrypt(ctx, iv, orig, offset, length, cur, cfg.getConfig(i));
if (log.shouldLog(Log.DEBUG)) {
//log.debug("IV at hop " + i + ": " + Base64.encode(orig, offset, HopProcessor.IV_LENGTH));
log.debug("IV at hop " + i + ": " + Base64.encode(orig, offset, HopProcessor.IV_LENGTH));
//log.debug("hop " + i + ": " + Base64.encode(orig, offset + HopProcessor.IV_LENGTH, length - HopProcessor.IV_LENGTH));
}
}
_cache.release(ba);
}
private static void decrypt(I2PAppContext ctx, byte iv[], byte orig[], int offset, int length, byte cur[], HopConfig config) {
static void decrypt(I2PAppContext ctx, byte iv[], byte orig[], int offset, int length, byte cur[], HopConfig config) {
// update the IV for the previous (next?) hop
ctx.aes().decryptBlock(orig, offset, config.getIVKey(), orig, offset);

View File

@ -61,7 +61,7 @@ public class OutboundGatewayTest {
DataMessage m = new DataMessage(_context);
m.setData(new byte[64]);
java.util.Arrays.fill(m.getData(), (byte)0xFF);
m.setMessageExpiration(new Date(_context.clock().now() + 60*1000));
m.setMessageExpiration(_context.clock().now() + 60*1000);
m.setUniqueId(_context.random().nextLong(I2NPMessage.MAX_ID_VALUE));
_log.debug("Sending " + m.getUniqueId());
byte data[] = m.toByteArray();
@ -89,7 +89,7 @@ public class OutboundGatewayTest {
DataMessage m = new DataMessage(_context);
m.setData(new byte[64]);
java.util.Arrays.fill(m.getData(), (byte)0xFF);
m.setMessageExpiration(new Date(_context.clock().now() + 60*1000));
m.setMessageExpiration(_context.clock().now() + 60*1000);
m.setUniqueId(_context.random().nextLong(I2NPMessage.MAX_ID_VALUE));
Hash to = new Hash(new byte[Hash.HASH_LENGTH]);
java.util.Arrays.fill(to.getData(), (byte)0xFF);
@ -119,7 +119,7 @@ public class OutboundGatewayTest {
DataMessage m = new DataMessage(_context);
m.setData(new byte[64]);
java.util.Arrays.fill(m.getData(), (byte)0xFF);
m.setMessageExpiration(new Date(_context.clock().now() + 60*1000));
m.setMessageExpiration(_context.clock().now() + 60*1000);
m.setUniqueId(_context.random().nextLong(I2NPMessage.MAX_ID_VALUE));
Hash to = new Hash(new byte[Hash.HASH_LENGTH]);
java.util.Arrays.fill(to.getData(), (byte)0xFF);
@ -150,7 +150,7 @@ public class OutboundGatewayTest {
DataMessage m = new DataMessage(_context);
m.setData(new byte[1024]);
java.util.Arrays.fill(m.getData(), (byte)0xFF);
m.setMessageExpiration(new Date(_context.clock().now() + 60*1000));
m.setMessageExpiration(_context.clock().now() + 60*1000);
m.setUniqueId(_context.random().nextLong(I2NPMessage.MAX_ID_VALUE));
_log.debug("Sending " + m.getUniqueId());
byte data[] = m.toByteArray();
@ -183,7 +183,7 @@ public class OutboundGatewayTest {
public void receiveEncrypted(byte[] encrypted) {
// fake all the hops...
for (int i = 0; i < _config.getLength(); i++) {
for (int i = 1; i < _config.getLength(); i++) {
HopProcessor hop = new HopProcessor(_context, _config.getConfig(i));
boolean ok = hop.process(encrypted, 0, encrypted.length, _config.getConfig(i).getReceiveFrom());
if (!ok)

View File

@ -12,6 +12,7 @@ import net.i2p.util.Log;
public class OutboundSender implements TunnelGateway.Sender {
private I2PAppContext _context;
private Log _log;
private TunnelCreatorConfig _config;
private OutboundGatewayProcessor _processor;
static final boolean USE_ENCRYPTION = HopProcessor.USE_ENCRYPTION;
@ -19,12 +20,19 @@ public class OutboundSender implements TunnelGateway.Sender {
public OutboundSender(I2PAppContext ctx, TunnelCreatorConfig config) {
_context = ctx;
_log = ctx.logManager().getLog(OutboundSender.class);
_config = config;
_processor = new OutboundGatewayProcessor(_context, config);
}
public void sendPreprocessed(byte[] preprocessed, TunnelGateway.Receiver receiver) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("preprocessed data going out " + _config + ": " + Base64.encode(preprocessed));
if (USE_ENCRYPTION)
_processor.process(preprocessed, 0, preprocessed.length);
if (_log.shouldLog(Log.DEBUG))
_log.debug("after wrapping up the preprocessed data on " + _config);
receiver.receiveEncrypted(preprocessed);
if (_log.shouldLog(Log.DEBUG))
_log.debug("after receiving on " + _config + ": receiver = " + receiver);
}
}

View File

@ -4,10 +4,13 @@ import java.util.ArrayList;
import java.util.List;
import net.i2p.I2PAppContext;
import net.i2p.crypto.SHA256EntryCache;
import net.i2p.data.Base64;
import net.i2p.data.ByteArray;
import net.i2p.data.DataHelper;
import net.i2p.data.Hash;
import net.i2p.data.i2np.I2NPMessage;
import net.i2p.util.ByteCache;
import net.i2p.util.Log;
/**
@ -21,8 +24,10 @@ public class TrivialPreprocessor implements TunnelGateway.QueuePreprocessor {
private I2PAppContext _context;
private Log _log;
private static final int PREPROCESSED_SIZE = 1024;
static final int PREPROCESSED_SIZE = 1024;
private static final int IV_SIZE = HopProcessor.IV_LENGTH;
private static final ByteCache _dataCache = ByteCache.getInstance(512, PREPROCESSED_SIZE);
private static final ByteCache _ivCache = ByteCache.getInstance(128, IV_SIZE);
public TrivialPreprocessor(I2PAppContext ctx) {
_context = ctx;
@ -35,19 +40,24 @@ public class TrivialPreprocessor implements TunnelGateway.QueuePreprocessor {
byte preprocessed[][] = preprocess(msg);
for (int i = 0; i < preprocessed.length; i++) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Preprocessed: " + Base64.encode(preprocessed[i]));
_log.debug("Preprocessed: fragment " + i + "/" + (preprocessed.length-1) + " in "
+ msg.getMessageId() + ": " + Base64.encode(preprocessed[i]));
sender.sendPreprocessed(preprocessed[i], rec);
}
notePreprocessing(msg.getMessageId(), preprocessed.length);
}
return false;
}
protected void notePreprocessing(long messageId, int numFragments) {}
private byte[][] preprocess(TunnelGateway.Pending msg) {
List fragments = new ArrayList(1);
while (msg.getOffset() < msg.getData().length) {
fragments.add(preprocessFragment(msg));
_log.debug("\n\nafter preprocessing fragment\n\n");
if (_log.shouldLog(Log.DEBUG))
_log.debug("\n\nafter preprocessing fragment\n\n");
}
byte rv[][] = new byte[fragments.size()][];
@ -86,10 +96,11 @@ public class TrivialPreprocessor implements TunnelGateway.QueuePreprocessor {
private byte[] preprocessFirstFragment(TunnelGateway.Pending msg) {
boolean fragmented = false;
byte iv[] = new byte[IV_SIZE];
ByteArray ivBuf = _ivCache.acquire();
byte iv[] = ivBuf.getData(); // new byte[IV_SIZE];
_context.random().nextBytes(iv);
byte target[] = new byte[PREPROCESSED_SIZE];
byte target[] = _dataCache.acquire().getData(); //new byte[PREPROCESSED_SIZE];
int instructionsLength = getInstructionsSize(msg);
int payloadLength = msg.getData().length;
@ -110,7 +121,8 @@ public class TrivialPreprocessor implements TunnelGateway.QueuePreprocessor {
if (fragmented)
target[offset] |= MASK_FRAGMENTED;
_log.debug("CONTROL: " + Integer.toHexString(target[offset]));
if (_log.shouldLog(Log.DEBUG))
_log.debug("CONTROL: " + Integer.toHexString(target[offset]));
offset++;
@ -124,29 +136,38 @@ public class TrivialPreprocessor implements TunnelGateway.QueuePreprocessor {
}
if (fragmented) {
DataHelper.toLong(target, offset, 4, msg.getMessageId());
_log.debug("writing messageId= " + msg.getMessageId() + " at offset " + offset);
if (_log.shouldLog(Log.DEBUG))
_log.debug("writing messageId= " + msg.getMessageId() + " at offset " + offset);
offset += 4;
}
DataHelper.toLong(target, offset, 2, payloadLength);
offset += 2;
//_log.debug("raw data : " + Base64.encode(msg.getData()));
System.arraycopy(msg.getData(), 0, target, offset, payloadLength);
_log.debug("fragment[" + msg.getFragmentNumber()+ "/" + (PREPROCESSED_SIZE - offset - payloadLength) + "/" + payloadLength + "]: " + Base64.encode(target, offset, payloadLength));
if (_log.shouldLog(Log.DEBUG))
_log.debug("initial fragment[" + msg.getMessageId() + "/" + msg.getFragmentNumber()+ "/"
+ (PREPROCESSED_SIZE - offset - payloadLength) + "/" + payloadLength + "]: "
+ Base64.encode(target, offset, payloadLength));
offset += payloadLength;
SHA256EntryCache.CacheEntry cache = _context.sha().cache().acquire(PREPROCESSED_SIZE);
// payload ready, now H(instructions+payload+IV)
System.arraycopy(iv, 0, target, offset, IV_SIZE);
Hash h = _context.sha().calculateHash(target, 0, offset + IV_SIZE);
_log.debug("before shift: " + Base64.encode(target));
Hash h = _context.sha().calculateHash(target, 0, offset + IV_SIZE, cache);
//Hash h = _context.sha().calculateHash(target, 0, offset + IV_SIZE);
//_log.debug("before shift: " + Base64.encode(target));
// now shiiiiiift
int distance = PREPROCESSED_SIZE - offset;
System.arraycopy(target, 0, target, distance, offset);
_log.debug("fragments begin at " + distance + " (size=" + payloadLength + " offset=" + offset +")");
if (_log.shouldLog(Log.DEBUG))
_log.debug(msg.getMessageId() + ": fragments begin at " + distance + " (size="
+ payloadLength + " offset=" + offset +")");
java.util.Arrays.fill(target, 0, distance, (byte)0x0);
_log.debug("after shift: " + Base64.encode(target));
//_log.debug("after shift: " + Base64.encode(target));
offset = 0;
System.arraycopy(iv, 0, target, offset, IV_SIZE);
@ -155,6 +176,9 @@ public class TrivialPreprocessor implements TunnelGateway.QueuePreprocessor {
offset += 4;
//_log.debug("before pad : " + Base64.encode(target));
_context.sha().cache().release(cache);
_ivCache.release(ivBuf);
if (!fragmented) {
// fits in a single message, so may be smaller than the full size
int numPadBytes = PREPROCESSED_SIZE // max
@ -166,20 +190,25 @@ public class TrivialPreprocessor implements TunnelGateway.QueuePreprocessor {
//_log.debug("# pad bytes: " + numPadBytes + " payloadLength: " + payloadLength + " instructions: " + instructionsLength);
for (int i = 0; i < numPadBytes; i++) {
if (false) {
target[offset] = 0x0;
int paddingRemaining = numPadBytes;
while (paddingRemaining > 0) {
byte b = (byte)(_context.random().nextInt() & 0xFF);
if (b != 0x00) {
target[offset] = b;
offset++;
} else {
// wouldn't it be nice if random could write to an array?
byte rnd = (byte)_context.random().nextInt();
if (rnd != 0x0) {
target[offset] = rnd;
offset++;
} else {
i--;
}
paddingRemaining--;
}
/*
long rnd = _context.random().nextLong();
for (long i = 0; i < 8; i++) {
byte b = (byte)(((rnd >>> i * 8l) & 0xFF));
if (b == 0x00)
continue;
target[offset] = b;
offset++;
paddingRemaining--;
}
*/
}
}
target[offset] = 0x0; // no padding here
@ -191,11 +220,12 @@ public class TrivialPreprocessor implements TunnelGateway.QueuePreprocessor {
}
private byte[] preprocessSubsequentFragment(TunnelGateway.Pending msg) {
ByteArray ivBuf = _ivCache.acquire();
boolean isLast = true;
byte iv[] = new byte[IV_SIZE];
byte iv[] = ivBuf.getData(); // new byte[IV_SIZE];
_context.random().nextBytes(iv);
byte target[] = new byte[PREPROCESSED_SIZE];
byte target[] = _dataCache.acquire().getData(); // new byte[PREPROCESSED_SIZE];
int instructionsLength = getInstructionsSize(msg);
int payloadLength = msg.getData().length - msg.getOffset();
@ -213,7 +243,9 @@ public class TrivialPreprocessor implements TunnelGateway.QueuePreprocessor {
if (isLast)
target[offset] |= 1;
_log.debug("CONTROL: " + Integer.toHexString((int)target[offset]) + "/" + Base64.encode(target, offset, 1) + " at offset " + offset);
if (_log.shouldLog(Log.DEBUG))
_log.debug("CONTROL: " + Integer.toHexString((int)target[offset]) + "/"
+ Base64.encode(target, offset, 1) + " at offset " + offset);
offset++;
@ -222,26 +254,37 @@ public class TrivialPreprocessor implements TunnelGateway.QueuePreprocessor {
DataHelper.toLong(target, offset, 2, payloadLength);
offset += 2;
System.arraycopy(msg.getData(), msg.getOffset(), target, offset, payloadLength);
_log.debug("fragment[" + msg.getFragmentNumber()+ "/" + offset + "/" + payloadLength + "]: " + Base64.encode(target, offset, payloadLength));
if (_log.shouldLog(Log.DEBUG))
_log.debug("subsequent fragment[" + msg.getMessageId() + "/" + msg.getFragmentNumber()+ "/"
+ offset + "/" + payloadLength + "]: "
+ Base64.encode(target, offset, payloadLength));
offset += payloadLength;
SHA256EntryCache.CacheEntry cache = _context.sha().cache().acquire(PREPROCESSED_SIZE);
// payload ready, now H(instructions+payload+IV)
System.arraycopy(iv, 0, target, offset, IV_SIZE);
Hash h = _context.sha().calculateHash(target, 0, offset + IV_SIZE);
Hash h = _context.sha().calculateHash(target, 0, offset + IV_SIZE, cache);
//Hash h = _context.sha().calculateHash(target, 0, offset + IV_SIZE);
// now shiiiiiift
int distance = PREPROCESSED_SIZE - offset;
System.arraycopy(target, 0, target, distance, offset);
_log.debug("fragments begin at " + distance + " (size=" + payloadLength + " offset=" + offset +")");
if (_log.shouldLog(Log.DEBUG))
_log.debug(msg.getMessageId() + ": fragments begin at " + distance + " (size="
+ payloadLength + " offset=" + offset +")");
offset = 0;
System.arraycopy(iv, 0, target, 0, IV_SIZE);
offset += IV_SIZE;
_ivCache.release(ivBuf);
System.arraycopy(h.getData(), 0, target, offset, 4);
offset += 4;
_context.sha().cache().release(cache);
if (isLast) {
// this is the last message, so may be smaller than the full size
int numPadBytes = PREPROCESSED_SIZE // max
@ -262,7 +305,8 @@ public class TrivialPreprocessor implements TunnelGateway.QueuePreprocessor {
}
}
_log.debug("# pad bytes: " + numPadBytes);
if (_log.shouldLog(Log.DEBUG))
_log.debug("# pad bytes: " + numPadBytes);
}
target[offset] = 0x0; // end of padding
offset++;

View File

@ -1,26 +1,37 @@
package net.i2p.router.tunnel;
import java.util.Date;
import java.util.Locale;
import java.text.SimpleDateFormat;
import net.i2p.data.Base64;
import net.i2p.data.Destination;
import net.i2p.data.Hash;
import net.i2p.data.TunnelId;
import net.i2p.router.TunnelInfo;
/**
* Coordinate the info that the tunnel creator keeps track of, including what
* peers are in the tunnel and what their configuration is
*
*/
public class TunnelCreatorConfig {
public class TunnelCreatorConfig implements TunnelInfo {
/** only necessary for client tunnels */
private Destination _destination;
private Hash _destination;
/** gateway first */
private HopConfig _config[];
/** gateway first */
private Hash _peers[];
private long _expiration;
private boolean _isInbound;
private long _messagesProcessed;
public TunnelCreatorConfig(int length, boolean isInbound) {
this(length, isInbound, null);
}
public TunnelCreatorConfig(int length, boolean isInbound, Destination destination) {
public TunnelCreatorConfig(int length, boolean isInbound, Hash destination) {
if (length <= 0)
throw new IllegalArgumentException("0 length? 0 hop tunnels are 1 length!");
_config = new HopConfig[length];
_peers = new Hash[length];
for (int i = 0; i < length; i++) {
@ -28,6 +39,7 @@ public class TunnelCreatorConfig {
}
_isInbound = isInbound;
_destination = destination;
_messagesProcessed = 0;
}
/** how many hops are there in the tunnel? */
@ -38,6 +50,18 @@ public class TunnelCreatorConfig {
* hop 0.
*/
public HopConfig getConfig(int hop) { return _config[hop]; }
/**
* retrieve the tunnelId that the given hop receives messages on.
* the gateway is hop 0.
*
*/
public TunnelId getReceiveTunnelId(int hop) { return _config[hop].getReceiveTunnel(); }
/**
* retrieve the tunnelId that the given hop sends messages on.
* the gateway is hop 0.
*
*/
public TunnelId getSendTunnelId(int hop) { return _config[hop].getSendTunnel(); }
/** retrieve the peer at the given hop. the gateway is hop 0 */
public Hash getPeer(int hop) { return _peers[hop]; }
@ -47,5 +71,55 @@ public class TunnelCreatorConfig {
public boolean isInbound() { return _isInbound; }
/** if this is a client tunnel, what destination is it for? */
public Destination getDestination() { return _destination; }
public Hash getDestination() { return _destination; }
public long getExpiration() { return _expiration; }
public void setExpiration(long when) { _expiration = when; }
public void testSuccessful(int ms) {}
/** take note of a message being pumped through this tunnel */
public void incrementProcessedMessages() { _messagesProcessed++; }
public long getProcessedMessagesCount() { return _messagesProcessed; }
public String toString() {
// H0:1235-->H1:2345-->H2:2345
StringBuffer buf = new StringBuffer(128);
if (_isInbound)
buf.append("inbound: ");
else
buf.append("outbound: ");
for (int i = 0; i < _peers.length; i++) {
buf.append(_peers[i].toBase64().substring(0,4));
buf.append(':');
if (_config[i].getReceiveTunnel() != null)
buf.append(_config[i].getReceiveTunnel());
else
buf.append('x');
buf.append('.');
if (_config[i].getSendTunnel() != null)
buf.append(_config[i].getSendTunnel());
else
buf.append('x');
if (i + 1 < _peers.length)
buf.append("...");
}
buf.append(" expiring on ").append(getExpirationString());
if (_destination != null)
buf.append(" for ").append(Base64.encode(_destination.getData(), 0, 3));
return buf.toString();
}
private static final SimpleDateFormat _fmt = new SimpleDateFormat("HH:mm:ss", Locale.UK);
private String getExpirationString() {
return format(_expiration);
}
static String format(long date) {
Date d = new Date(date);
synchronized (_fmt) {
return _fmt.format(d);
}
}
}

View File

@ -7,6 +7,7 @@ import net.i2p.I2PAppContext;
import net.i2p.data.Hash;
import net.i2p.data.TunnelId;
import net.i2p.data.i2np.I2NPMessage;
import net.i2p.data.i2np.TunnelGatewayMessage;
import net.i2p.util.Log;
import net.i2p.util.SimpleTimer;
@ -62,6 +63,15 @@ public class TunnelGateway {
_lastFlush = _context.clock().now();
}
/**
* Add a message to be sent down the tunnel, where we are the inbound gateway.
*
* @param msg message received to be sent through the tunnel
*/
public void add(TunnelGatewayMessage msg) {
add(msg.getMessage(), null, null);
}
/**
* Add a message to be sent down the tunnel, either sending it now (perhaps
* coallesced with other pending messages) or after a brief pause (_flushFrequency).
@ -135,7 +145,7 @@ public class TunnelGateway {
_toRouter = toRouter;
_toTunnel = toTunnel;
_messageId = message.getUniqueId();
_expiration = message.getMessageExpiration().getTime();
_expiration = message.getMessageExpiration();
_remaining = message.toByteArray();
_offset = 0;
_fragmentNumber = 0;