forked from I2P_Developers/i2p.i2p
* 2005-03-29 0.5.0.5 released
2005-03-29 jrandom * Decreased the initial RTT estimate to 10s to allow more retries. * Increased the default netDb store replication factor from 2 to 6 to take into consideration tunnel failures. * Address some statistical anonymity attacks against the netDb that could be mounted by an active internal adversary by only answering lookups for leaseSets we received through an unsolicited store. * Don't throttle lookup responses (we throttle enough elsewhere) * Fix the NewsFetcher so that it doesn't incorrectly resume midway through the file (thanks nickster!) * Updated the I2PTunnel HTML (thanks postman!) * Added support to the I2PTunnel pages for the URL parameter "passphrase", which, if matched against the router.config "i2ptunnel.passphrase" value, skips the nonce check. If the config prop doesn't exist or is blank, no passphrase is accepted. * Implemented HMAC-SHA256. * Enable the tunnel batching with a 500ms delay by default * Dropped compatability with 0.5.0.3 and earlier releases
This commit is contained in:
@ -15,9 +15,9 @@ import net.i2p.CoreVersion;
|
||||
*
|
||||
*/
|
||||
public class RouterVersion {
|
||||
public final static String ID = "$Revision: 1.174 $ $Date: 2005/03/24 23:07:06 $";
|
||||
public final static String VERSION = "0.5.0.4";
|
||||
public final static long BUILD = 2;
|
||||
public final static String ID = "$Revision: 1.175 $ $Date: 2005/03/26 02:13:38 $";
|
||||
public final static String VERSION = "0.5.0.5";
|
||||
public final static long BUILD = 0;
|
||||
public static void main(String args[]) {
|
||||
System.out.println("I2P Router version: " + VERSION);
|
||||
System.out.println("Router ID: " + RouterVersion.ID);
|
||||
|
@ -113,8 +113,9 @@ public class StatisticsManager implements Service {
|
||||
includeRate("tunnel.buildFailure", stats, new long[] { 60*60*1000 });
|
||||
includeRate("tunnel.buildSuccess", stats, new long[] { 60*60*1000 });
|
||||
|
||||
includeRate("tunnel.batchDelaySent", stats, new long[] { 10*60*1000, 60*60*1000 });
|
||||
//includeRate("tunnel.batchDelaySent", stats, new long[] { 10*60*1000, 60*60*1000 });
|
||||
includeRate("tunnel.batchMultipleCount", stats, new long[] { 10*60*1000, 60*60*1000 });
|
||||
includeRate("tunnel.corruptMessage", stats, new long[] { 60*60*1000l, 3*60*60*1000l });
|
||||
|
||||
includeRate("router.throttleTunnelProbTestSlow", stats, new long[] { 60*60*1000 });
|
||||
includeRate("router.throttleTunnelProbTooFast", stats, new long[] { 60*60*1000 });
|
||||
|
@ -418,7 +418,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
long sendTime = getContext().clock().now() - _start;
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn(getJobId() + ": Failed to send the message " + _clientMessageId + " after "
|
||||
+ sendTime + "ms", new Exception("Message send failure"));
|
||||
+ sendTime + "ms");
|
||||
|
||||
long messageDelay = getContext().throttle().getMessageDelay();
|
||||
long tunnelLag = getContext().throttle().getTunnelLag();
|
||||
|
@ -34,7 +34,7 @@ public class DatabaseLookupMessageHandler implements HandlerJobBuilder {
|
||||
public Job createJob(I2NPMessage receivedMessage, RouterIdentity from, Hash fromHash) {
|
||||
_context.statManager().addRateData("netDb.lookupsReceived", 1, 0);
|
||||
|
||||
if (_context.throttle().acceptNetDbLookupRequest(((DatabaseLookupMessage)receivedMessage).getSearchKey())) {
|
||||
if (true || _context.throttle().acceptNetDbLookupRequest(((DatabaseLookupMessage)receivedMessage).getSearchKey())) {
|
||||
return new HandleDatabaseLookupMessageJob(_context, (DatabaseLookupMessage)receivedMessage, from, fromHash);
|
||||
} else {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
|
@ -40,6 +40,7 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
|
||||
private RouterIdentity _from;
|
||||
private Hash _fromHash;
|
||||
private final static int MAX_ROUTERS_RETURNED = 3;
|
||||
private final static int CLOSENESS_THRESHOLD = 10; // StoreJob.REDUNDANCY * 2
|
||||
private final static int REPLY_TIMEOUT = 60*1000;
|
||||
private final static int MESSAGE_PRIORITY = 300;
|
||||
|
||||
@ -48,6 +49,9 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
|
||||
_log = getContext().logManager().getLog(HandleDatabaseLookupMessageJob.class);
|
||||
getContext().statManager().createRateStat("netDb.lookupsHandled", "How many netDb lookups have we handled?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
getContext().statManager().createRateStat("netDb.lookupsMatched", "How many netDb lookups did we have the data for?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
getContext().statManager().createRateStat("netDb.lookupsMatchedReceivedPublished", "How many netDb lookups did we have the data for that were published to us?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
getContext().statManager().createRateStat("netDb.lookupsMatchedLocalClosest", "How many netDb lookups for local data were received where we are the closest peers?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
getContext().statManager().createRateStat("netDb.lookupsMatchedLocalNotClosest", "How many netDb lookups for local data were received where we are NOT the closest peers?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_message = receivedMessage;
|
||||
_from = from;
|
||||
_fromHash = fromHash;
|
||||
@ -65,26 +69,26 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
|
||||
+ " (tunnel " + _message.getReplyTunnel() + ")");
|
||||
}
|
||||
|
||||
if (getContext().netDb().lookupRouterInfoLocally(_message.getFrom()) == null) {
|
||||
// hmm, perhaps don't always send a lookup for this...
|
||||
// but for now, wtf, why not. we may even want to adjust it so that
|
||||
// we penalize or benefit peers who send us that which we can or
|
||||
// cannot lookup
|
||||
getContext().netDb().lookupRouterInfo(_message.getFrom(), null, null, REPLY_TIMEOUT);
|
||||
}
|
||||
|
||||
// whatdotheywant?
|
||||
handleRequest(fromKey);
|
||||
}
|
||||
|
||||
private void handleRequest(Hash fromKey) {
|
||||
LeaseSet ls = getContext().netDb().lookupLeaseSetLocally(_message.getSearchKey());
|
||||
if (ls != null) {
|
||||
// send that lease set to the _message.getFromHash peer
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("We do have key " + _message.getSearchKey().toBase64()
|
||||
+ " locally as a lease set. sending to " + fromKey.toBase64());
|
||||
sendData(_message.getSearchKey(), ls, fromKey, _message.getReplyTunnel());
|
||||
// only answer a request for a LeaseSet if it has been published
|
||||
// to us, or, if its local, if we would have published to ourselves
|
||||
if (ls.getReceivedAsPublished()) {
|
||||
getContext().statManager().addRateData("netDb.lookupsMatchedReceivedPublished", 1, 0);
|
||||
sendData(_message.getSearchKey(), ls, fromKey, _message.getReplyTunnel());
|
||||
} else {
|
||||
Set routerInfoSet = getContext().netDb().findNearestRouters(_message.getSearchKey(),
|
||||
CLOSENESS_THRESHOLD,
|
||||
_message.getDontIncludePeers());
|
||||
if (getContext().clientManager().isLocal(ls.getDestination()) &&
|
||||
weAreClosest(routerInfoSet)) {
|
||||
getContext().statManager().addRateData("netDb.lookupsMatchedLocalClosest", 1, 0);
|
||||
sendData(_message.getSearchKey(), ls, fromKey, _message.getReplyTunnel());
|
||||
} else {
|
||||
getContext().statManager().addRateData("netDb.lookupsMatchedLocalNotClosest", 1, 0);
|
||||
sendClosest(_message.getSearchKey(), routerInfoSet, fromKey, _message.getReplyTunnel());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
RouterInfo info = getContext().netDb().lookupRouterInfoLocally(_message.getSearchKey());
|
||||
if (info != null) {
|
||||
@ -106,6 +110,17 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
|
||||
}
|
||||
}
|
||||
|
||||
private boolean weAreClosest(Set routerInfoSet) {
|
||||
boolean weAreClosest = false;
|
||||
for (Iterator iter = routerInfoSet.iterator(); iter.hasNext(); ) {
|
||||
RouterInfo cur = (RouterInfo)iter.next();
|
||||
if (cur.getIdentity().calculateHash().equals(getContext().routerHash())) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
private void sendData(Hash key, DataStructure data, Hash toPeer, TunnelId replyTunnel) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Sending data matching key key " + key.toBase64() + " to peer " + toPeer.toBase64()
|
||||
@ -129,27 +144,27 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
|
||||
_log.debug("Sending closest routers to key " + key.toBase64() + ": # peers = "
|
||||
+ routerInfoSet.size() + " tunnel " + replyTunnel);
|
||||
DatabaseSearchReplyMessage msg = new DatabaseSearchReplyMessage(getContext());
|
||||
msg.setFromHash(getContext().router().getRouterInfo().getIdentity().getHash());
|
||||
msg.setFromHash(getContext().routerHash());
|
||||
msg.setSearchKey(key);
|
||||
for (Iterator iter = routerInfoSet.iterator(); iter.hasNext(); ) {
|
||||
RouterInfo peer = (RouterInfo)iter.next();
|
||||
msg.addReply(peer.getIdentity().getHash());
|
||||
if (msg.getNumReplies() >= MAX_ROUTERS_RETURNED)
|
||||
break;
|
||||
}
|
||||
getContext().statManager().addRateData("netDb.lookupsHandled", 1, 0);
|
||||
sendMessage(msg, toPeer, replyTunnel); // should this go via garlic messages instead?
|
||||
}
|
||||
|
||||
private void sendMessage(I2NPMessage message, Hash toPeer, TunnelId replyTunnel) {
|
||||
Job send = null;
|
||||
if (replyTunnel != null) {
|
||||
sendThroughTunnel(message, toPeer, replyTunnel);
|
||||
} else {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Sending reply directly to " + toPeer);
|
||||
send = new SendMessageDirectJob(getContext(), message, toPeer, REPLY_TIMEOUT, MESSAGE_PRIORITY);
|
||||
Job send = new SendMessageDirectJob(getContext(), message, toPeer, REPLY_TIMEOUT, MESSAGE_PRIORITY);
|
||||
getContext().netDb().lookupRouterInfo(toPeer, send, null, REPLY_TIMEOUT);
|
||||
}
|
||||
|
||||
getContext().netDb().lookupRouterInfo(toPeer, send, null, REPLY_TIMEOUT);
|
||||
}
|
||||
|
||||
private void sendThroughTunnel(I2NPMessage message, Hash toPeer, TunnelId replyTunnel) {
|
||||
@ -171,28 +186,6 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
|
||||
}
|
||||
}
|
||||
|
||||
private void sendToGateway(I2NPMessage message, Hash toPeer, TunnelId replyTunnel, TunnelInfo info) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Want to reply to a db request via a tunnel, but we're a participant in the reply! so send it to the gateway");
|
||||
|
||||
if ( (toPeer == null) || (replyTunnel == null) ) {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Someone br0ke us. where is this message supposed to go again?", getAddedBy());
|
||||
return;
|
||||
}
|
||||
|
||||
long expiration = REPLY_TIMEOUT + getContext().clock().now();
|
||||
|
||||
TunnelGatewayMessage msg = new TunnelGatewayMessage(getContext());
|
||||
msg.setMessage(message);
|
||||
msg.setTunnelId(replyTunnel);
|
||||
msg.setMessageExpiration(expiration);
|
||||
getContext().jobQueue().addJob(new SendMessageDirectJob(getContext(), msg, toPeer, null, null, null, null, REPLY_TIMEOUT, MESSAGE_PRIORITY));
|
||||
|
||||
String bodyType = message.getClass().getName();
|
||||
getContext().messageHistory().wrap(bodyType, message.getUniqueId(), TunnelGatewayMessage.class.getName(), msg.getUniqueId());
|
||||
}
|
||||
|
||||
public String getName() { return "Handle Database Lookup Message"; }
|
||||
|
||||
public void dropped() {
|
||||
|
@ -10,6 +10,7 @@ package net.i2p.router.networkdb;
|
||||
|
||||
import java.util.Date;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.LeaseSet;
|
||||
import net.i2p.data.RouterIdentity;
|
||||
import net.i2p.data.i2np.DatabaseStoreMessage;
|
||||
import net.i2p.data.i2np.DeliveryStatusMessage;
|
||||
@ -48,8 +49,18 @@ public class HandleDatabaseStoreMessageJob extends JobImpl {
|
||||
boolean wasNew = false;
|
||||
if (_message.getValueType() == DatabaseStoreMessage.KEY_TYPE_LEASESET) {
|
||||
try {
|
||||
Object match = getContext().netDb().store(_message.getKey(), _message.getLeaseSet());
|
||||
wasNew = (null == match);
|
||||
LeaseSet ls = _message.getLeaseSet();
|
||||
// mark it as something we received, so we'll answer queries
|
||||
// for it. this flag does NOT get set on entries that we
|
||||
// receive in response to our own lookups.
|
||||
ls.setReceivedAsPublished(true);
|
||||
LeaseSet match = getContext().netDb().store(_message.getKey(), _message.getLeaseSet());
|
||||
if (match == null) {
|
||||
wasNew = true;
|
||||
} else {
|
||||
wasNew = false;
|
||||
match.setReceivedAsPublished(true);
|
||||
}
|
||||
} catch (IllegalArgumentException iae) {
|
||||
invalidMessage = iae.getMessage();
|
||||
}
|
||||
|
@ -36,8 +36,8 @@ class StoreJob extends JobImpl {
|
||||
private long _expiration;
|
||||
private PeerSelector _peerSelector;
|
||||
|
||||
private final static int PARALLELIZATION = 1; // how many sent at a time
|
||||
private final static int REDUNDANCY = 2; // we want the data sent to 2 peers
|
||||
private final static int PARALLELIZATION = 2; // how many sent at a time
|
||||
private final static int REDUNDANCY = 6; // we want the data sent to 6 peers
|
||||
/**
|
||||
* additionally send to 1 outlier(s), in case all of the routers chosen in our
|
||||
* REDUNDANCY set are attacking us by accepting DbStore messages but dropping
|
||||
|
@ -87,8 +87,7 @@ public class TCPTransport extends TransportImpl {
|
||||
public static final int DEFAULT_ESTABLISHERS = 3;
|
||||
|
||||
/** Ordered list of supported I2NP protocols */
|
||||
public static final int[] SUPPORTED_PROTOCOLS = new int[] { 3
|
||||
, 4}; // forward compat, to drop <= 0.5.0.3
|
||||
public static final int[] SUPPORTED_PROTOCOLS = new int[] { 4 }; // drop <= 0.5.0.3
|
||||
/** blah, people shouldnt use defaults... */
|
||||
public static final int DEFAULT_LISTEN_PORT = 8887;
|
||||
|
||||
|
@ -34,16 +34,18 @@ public class BatchedPreprocessor extends TrivialPreprocessor {
|
||||
- 1 // 0x00 ending the padding
|
||||
- 4; // 4 byte checksum
|
||||
|
||||
private static final boolean DISABLE_BATCHING = false;
|
||||
|
||||
/* not final or private so the test code can adjust */
|
||||
static long DEFAULT_DELAY = 500;
|
||||
/** wait up to 2 seconds before sending a small message */
|
||||
protected long getSendDelay() { return DEFAULT_DELAY; }
|
||||
|
||||
public boolean preprocessQueue(List pending, TunnelGateway.Sender sender, TunnelGateway.Receiver rec) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Preprocess queue with " + pending.size() + " to send");
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Preprocess queue with " + pending.size() + " to send");
|
||||
|
||||
if (getSendDelay() <= 0) {
|
||||
if (DISABLE_BATCHING || getSendDelay() <= 0) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("No batching, send all messages immediately");
|
||||
while (pending.size() > 0) {
|
||||
@ -131,8 +133,8 @@ public class BatchedPreprocessor extends TrivialPreprocessor {
|
||||
}
|
||||
}
|
||||
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Sent everything on the list (pending=" + pending.size() + ")");
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Sent everything on the list (pending=" + pending.size() + ")");
|
||||
|
||||
// sent everything from the pending list, no need to delayed flush
|
||||
return false;
|
||||
@ -150,9 +152,6 @@ public class BatchedPreprocessor extends TrivialPreprocessor {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Sending " + startAt + ":" + sendThrough + " out of " + pending.size());
|
||||
byte preprocessed[] = _dataCache.acquire().getData();
|
||||
ByteArray ivBuf = _ivCache.acquire();
|
||||
byte iv[] = ivBuf.getData(); // new byte[IV_SIZE];
|
||||
_context.random().nextBytes(iv);
|
||||
|
||||
int offset = 0;
|
||||
offset = writeFragments(pending, startAt, sendThrough, preprocessed, offset);
|
||||
@ -160,6 +159,17 @@ public class BatchedPreprocessor extends TrivialPreprocessor {
|
||||
// so we need to format, pad, and rearrange according to the spec to
|
||||
// generate the final preprocessed data
|
||||
|
||||
if (offset <= 0) {
|
||||
StringBuffer buf = new StringBuffer(128);
|
||||
buf.append("wtf, written offset is ").append(offset);
|
||||
buf.append(" for ").append(startAt).append(" through ").append(sendThrough);
|
||||
for (int i = startAt; i <= sendThrough; i++) {
|
||||
buf.append(" ").append(pending.get(i).toString());
|
||||
}
|
||||
_log.log(Log.CRIT, buf.toString());
|
||||
return;
|
||||
}
|
||||
|
||||
preprocess(preprocessed, offset);
|
||||
|
||||
sender.sendPreprocessed(preprocessed, rec);
|
||||
|
@ -17,7 +17,7 @@ public class BatchedRouterPreprocessor extends BatchedPreprocessor {
|
||||
*/
|
||||
public static final String PROP_BATCH_FREQUENCY = "batchFrequency";
|
||||
public static final String PROP_ROUTER_BATCH_FREQUENCY = "router.batchFrequency";
|
||||
public static final int DEFAULT_BATCH_FREQUENCY = 0;
|
||||
public static final int DEFAULT_BATCH_FREQUENCY = 500;
|
||||
|
||||
public BatchedRouterPreprocessor(RouterContext ctx) {
|
||||
this(ctx, null);
|
||||
|
@ -49,6 +49,8 @@ public class FragmentHandler {
|
||||
"Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.fragmentedDropped", "How many fragments were in a partially received yet failed message?",
|
||||
"Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.corruptMessage", "How many corrupted messages arrived?",
|
||||
"Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000 });
|
||||
}
|
||||
|
||||
/**
|
||||
@ -61,9 +63,11 @@ public class FragmentHandler {
|
||||
public void receiveTunnelMessage(byte preprocessed[], int offset, int length) {
|
||||
boolean ok = verifyPreprocessed(preprocessed, offset, length);
|
||||
if (!ok) {
|
||||
_log.error("Unable to verify preprocessed data (pre.length=" + preprocessed.length
|
||||
+ " off=" +offset + " len=" + length, new Exception("failed"));
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Unable to verify preprocessed data (pre.length="
|
||||
+ preprocessed.length + " off=" +offset + " len=" + length);
|
||||
_cache.release(new ByteArray(preprocessed));
|
||||
_context.statManager().addRateData("tunnel.corruptMessage", 1, 1);
|
||||
return;
|
||||
}
|
||||
offset += HopProcessor.IV_LENGTH; // skip the IV
|
||||
@ -84,6 +88,7 @@ public class FragmentHandler {
|
||||
} catch (RuntimeException e) {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Corrupt fragment received: offset = " + offset, e);
|
||||
_context.statManager().addRateData("tunnel.corruptMessage", 1, 1);
|
||||
throw e;
|
||||
} finally {
|
||||
// each of the FragmentedMessages populated make a copy out of the
|
||||
@ -110,8 +115,19 @@ public class FragmentHandler {
|
||||
private boolean verifyPreprocessed(byte preprocessed[], int offset, int length) {
|
||||
// now we need to verify that the message was received correctly
|
||||
int paddingEnd = HopProcessor.IV_LENGTH + 4;
|
||||
while (preprocessed[offset+paddingEnd] != (byte)0x00)
|
||||
while (preprocessed[offset+paddingEnd] != (byte)0x00) {
|
||||
paddingEnd++;
|
||||
if (offset+paddingEnd >= length) {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Corrupt tunnel message padding");
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("cannot verify, going past the end [off="
|
||||
+ offset + " len=" + length + " paddingEnd="
|
||||
+ paddingEnd + " data:\n"
|
||||
+ Base64.encode(preprocessed, offset, length));
|
||||
return false;
|
||||
}
|
||||
}
|
||||
paddingEnd++; // skip the last
|
||||
|
||||
ByteArray ba = _validateCache.acquire(); // larger than necessary, but always sufficient
|
||||
@ -129,10 +145,11 @@ public class FragmentHandler {
|
||||
boolean eq = DataHelper.eq(v.getData(), 0, preprocessed, offset + HopProcessor.IV_LENGTH, 4);
|
||||
if (!eq) {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Endpoint data doesn't match: # pad bytes: " + (paddingEnd-(HopProcessor.IV_LENGTH+4)-1));
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("nomatching endpoint: # pad bytes: " + (paddingEnd-(HopProcessor.IV_LENGTH+4)-1) + "\n"
|
||||
+ Base64.encode(preprocessed, offset + paddingEnd, preV.length-HopProcessor.IV_LENGTH));
|
||||
_log.error("Corrupt tunnel message - verification fails");
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("nomatching endpoint: # pad bytes: " + (paddingEnd-(HopProcessor.IV_LENGTH+4)-1) + "\n"
|
||||
+ " offset=" + offset + " length=" + length + " paddingEnd=" + paddingEnd
|
||||
+ Base64.encode(preprocessed, offset, length));
|
||||
}
|
||||
|
||||
_context.sha().cache().release(cache);
|
||||
@ -380,8 +397,8 @@ public class FragmentHandler {
|
||||
if (removed && !_msg.getReleased()) {
|
||||
_failed++;
|
||||
noteFailure(_msg.getMessageId());
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Dropped failed fragmented message: " + _msg);
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Dropped failed fragmented message: " + _msg);
|
||||
_context.statManager().addRateData("tunnel.fragmentedDropped", _msg.getFragmentCount(), _msg.getLifetime());
|
||||
_msg.failed();
|
||||
} else {
|
||||
|
@ -8,6 +8,7 @@ import net.i2p.data.Hash;
|
||||
import net.i2p.data.TunnelId;
|
||||
import net.i2p.data.i2np.I2NPMessage;
|
||||
import net.i2p.data.i2np.TunnelGatewayMessage;
|
||||
import net.i2p.router.Router;
|
||||
import net.i2p.util.Log;
|
||||
import net.i2p.util.SimpleTimer;
|
||||
|
||||
@ -87,7 +88,7 @@ public class TunnelGateway {
|
||||
_messagesSent++;
|
||||
boolean delayedFlush = false;
|
||||
|
||||
Pending cur = new Pending(msg, toRouter, toTunnel);
|
||||
Pending cur = new PendingImpl(msg, toRouter, toTunnel);
|
||||
synchronized (_queue) {
|
||||
_queue.add(cur);
|
||||
delayedFlush = _preprocessor.preprocessQueue(_queue, _sender, _receiver);
|
||||
@ -96,9 +97,9 @@ public class TunnelGateway {
|
||||
// expire any as necessary, even if its framented
|
||||
for (int i = 0; i < _queue.size(); i++) {
|
||||
Pending m = (Pending)_queue.get(i);
|
||||
if (m.getExpiration() < _lastFlush) {
|
||||
if (m.getExpiration() + Router.CLOCK_FUDGE_FACTOR < _lastFlush) {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Expire on the queue: " + m);
|
||||
_log.error("Expire on the queue (size=" + _queue.size() + "): " + m);
|
||||
_queue.remove(i);
|
||||
i--;
|
||||
}
|
||||
@ -140,13 +141,13 @@ public class TunnelGateway {
|
||||
}
|
||||
|
||||
public static class Pending {
|
||||
private Hash _toRouter;
|
||||
private TunnelId _toTunnel;
|
||||
protected Hash _toRouter;
|
||||
protected TunnelId _toTunnel;
|
||||
private long _messageId;
|
||||
private long _expiration;
|
||||
private byte _remaining[];
|
||||
private int _offset;
|
||||
private int _fragmentNumber;
|
||||
protected long _expiration;
|
||||
protected byte _remaining[];
|
||||
protected int _offset;
|
||||
protected int _fragmentNumber;
|
||||
|
||||
public Pending(I2NPMessage message, Hash toRouter, TunnelId toTunnel) {
|
||||
_toRouter = toRouter;
|
||||
@ -174,6 +175,35 @@ public class TunnelGateway {
|
||||
/** ok, fragment sent, increment what the next will be */
|
||||
public void incrementFragmentNumber() { _fragmentNumber++; }
|
||||
}
|
||||
private class PendingImpl extends Pending {
|
||||
private long _created;
|
||||
|
||||
public PendingImpl(I2NPMessage message, Hash toRouter, TunnelId toTunnel) {
|
||||
super(message, toRouter, toTunnel);
|
||||
_created = _context.clock().now();
|
||||
}
|
||||
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer(64);
|
||||
buf.append("Message on ");
|
||||
buf.append(TunnelGateway.this.toString());
|
||||
if (_toRouter != null) {
|
||||
buf.append(" targetting ");
|
||||
buf.append(_toRouter.toBase64()).append(" ");
|
||||
if (_toTunnel != null)
|
||||
buf.append(_toTunnel.getTunnelId());
|
||||
}
|
||||
long now = _context.clock().now();
|
||||
buf.append(" actual lifetime ");
|
||||
buf.append(now - _created).append("ms");
|
||||
buf.append(" potential lifetime ");
|
||||
buf.append(_expiration - _created).append("ms");
|
||||
buf.append(" size ").append(_remaining.length);
|
||||
buf.append(" offset ").append(_offset);
|
||||
buf.append(" frag ").append(_fragmentNumber);
|
||||
return buf.toString();
|
||||
}
|
||||
}
|
||||
|
||||
private class DelayedFlush implements SimpleTimer.TimedEvent {
|
||||
public void timeReached() {
|
||||
|
@ -45,7 +45,7 @@ public class RequestTunnelJob extends JobImpl {
|
||||
private boolean _isFake;
|
||||
private boolean _isExploratory;
|
||||
|
||||
static final int HOP_REQUEST_TIMEOUT = 30*1000;
|
||||
static final int HOP_REQUEST_TIMEOUT = 20*1000;
|
||||
private static final int LOOKUP_TIMEOUT = 10*1000;
|
||||
|
||||
public RequestTunnelJob(RouterContext ctx, TunnelCreatorConfig cfg, Job onCreated, Job onFailed, int hop, boolean isFake, boolean isExploratory) {
|
||||
|
@ -37,11 +37,11 @@ public class TunnelPool {
|
||||
private long _lastSelectionPeriod;
|
||||
|
||||
/**
|
||||
* Only 3 builds per minute per pool, even if we have failing tunnels,
|
||||
* Only 5 builds per minute per pool, even if we have failing tunnels,
|
||||
* etc. On overflow, the necessary additional tunnels are built by the
|
||||
* RefreshJob
|
||||
*/
|
||||
private static final int MAX_BUILDS_PER_MINUTE = 3;
|
||||
private static final int MAX_BUILDS_PER_MINUTE = 5;
|
||||
|
||||
public TunnelPool(RouterContext ctx, TunnelPoolManager mgr, TunnelPoolSettings settings, TunnelPeerSelector sel, TunnelBuilder builder) {
|
||||
_context = ctx;
|
||||
|
Reference in New Issue
Block a user