Big findbugs cleanup
This commit is contained in:
@ -86,7 +86,7 @@ public class I2NPMessageHandler {
|
||||
* message - if it is an unknown type or has improper formatting, etc.
|
||||
*/
|
||||
public I2NPMessage readMessage(byte data[]) throws IOException, I2NPMessageException {
|
||||
int offset = readMessage(data, 0);
|
||||
readMessage(data, 0);
|
||||
return lastRead();
|
||||
}
|
||||
public int readMessage(byte data[], int offset) throws IOException, I2NPMessageException {
|
||||
|
@ -41,7 +41,7 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM
|
||||
|
||||
/** unsynchronized as its pretty much read only (except at startup) */
|
||||
private static final Map _builders = new HashMap(8);
|
||||
public static final void registerBuilder(Builder builder, int type) { _builders.put(new Integer(type), builder); }
|
||||
public static final void registerBuilder(Builder builder, int type) { _builders.put(Integer.valueOf(type), builder); }
|
||||
/** interface for extending the types of messages handled */
|
||||
public interface Builder {
|
||||
/** instantiate a new I2NPMessage to be populated shortly */
|
||||
@ -103,7 +103,7 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Reading bytes: type = " + type + " / uniqueId : " + _uniqueId + " / expiration : " + _expiration);
|
||||
readMessage(buffer, 0, size, type);
|
||||
long time = _context.clock().now() - start;
|
||||
//long time = _context.clock().now() - start;
|
||||
//if (time > 50)
|
||||
// _context.statManager().addRateData("i2np.readTime", time, time);
|
||||
_read = true;
|
||||
@ -148,7 +148,7 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM
|
||||
_log.debug("Reading bytes: type = " + type + " / uniqueId : " + _uniqueId + " / expiration : " + _expiration);
|
||||
readMessage(data, cur, size, type);
|
||||
cur += size;
|
||||
long time = _context.clock().now() - start;
|
||||
//long time = _context.clock().now() - start;
|
||||
//if (time > 50)
|
||||
// _context.statManager().addRateData("i2np.readTime", time, time);
|
||||
_read = true;
|
||||
@ -228,7 +228,7 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM
|
||||
off += 2;
|
||||
System.arraycopy(h.getData(), 0, buffer, off, CHECKSUM_LENGTH);
|
||||
|
||||
long time = _context.clock().now() - start;
|
||||
//long time = _context.clock().now() - start;
|
||||
//if (time > 50)
|
||||
// _context.statManager().addRateData("i2np.writeTime", time, time);
|
||||
|
||||
@ -370,7 +370,7 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM
|
||||
case TunnelBuildReplyMessage.MESSAGE_TYPE:
|
||||
return new TunnelBuildReplyMessage(context);
|
||||
default:
|
||||
Builder builder = (Builder)_builders.get(new Integer(type));
|
||||
Builder builder = (Builder)_builders.get(Integer.valueOf(type));
|
||||
if (builder == null)
|
||||
return null;
|
||||
else
|
||||
|
@ -95,7 +95,7 @@ public class TunnelGatewayMessage extends I2NPMessageImpl {
|
||||
if (_tunnelId.getTunnelId() <= 0)
|
||||
throw new I2NPMessageException("Invalid tunnel Id " + _tunnelId);
|
||||
|
||||
int size = (int)DataHelper.fromLong(data, curIndex, 2);
|
||||
DataHelper.fromLong(data, curIndex, 2);
|
||||
curIndex += 2;
|
||||
curIndex = handler.readMessage(data, curIndex);
|
||||
_msg = handler.lastRead();
|
||||
|
@ -209,7 +209,7 @@ public class InNetMessagePool implements Service {
|
||||
} else {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Message expiring on "
|
||||
+ (messageBody != null ? (messageBody.getMessageExpiration()+"") : " [unknown]")
|
||||
+ messageBody.getMessageExpiration()
|
||||
+ " was not handled by a HandlerJobBuilder - DROPPING: " + messageBody,
|
||||
new Exception("f00!"));
|
||||
_context.statManager().addRateData("inNetPool.dropped", 1, 0);
|
||||
|
@ -369,7 +369,7 @@ public class JobQueue {
|
||||
+ _queueRunners.size() + " to " + numThreads);
|
||||
for (int i = _queueRunners.size(); i < numThreads; i++) {
|
||||
JobQueueRunner runner = new JobQueueRunner(_context, i);
|
||||
_queueRunners.put(new Integer(i), runner);
|
||||
_queueRunners.put(Integer.valueOf(i), runner);
|
||||
Thread t = new I2PThread(runner);
|
||||
t.setName("JobQueue"+(_runnerId++));
|
||||
//t.setPriority(I2PThread.MAX_PRIORITY-1);
|
||||
@ -390,7 +390,7 @@ public class JobQueue {
|
||||
}
|
||||
}
|
||||
|
||||
void removeRunner(int id) { _queueRunners.remove(new Integer(id)); }
|
||||
void removeRunner(int id) { _queueRunners.remove(Integer.valueOf(id)); }
|
||||
|
||||
/**
|
||||
* Responsible for moving jobs from the timed queue to the ready queue,
|
||||
|
@ -289,7 +289,6 @@ public class Router {
|
||||
log.debug("Config file: " + filename, new Exception("location"));
|
||||
}
|
||||
Properties props = new Properties();
|
||||
FileInputStream fis = null;
|
||||
try {
|
||||
File f = new File(filename);
|
||||
if (f.canRead()) {
|
||||
@ -303,8 +302,6 @@ public class Router {
|
||||
} catch (Exception ioe) {
|
||||
if (log != null)
|
||||
log.error("Error loading the router configuration from " + filename, ioe);
|
||||
} finally {
|
||||
if (fis != null) try { fis.close(); } catch (IOException ioe) {}
|
||||
}
|
||||
return props;
|
||||
}
|
||||
|
@ -17,7 +17,7 @@ import net.i2p.CoreVersion;
|
||||
public class RouterVersion {
|
||||
public final static String ID = "$Revision: 1.548 $ $Date: 2008-06-07 23:00:00 $";
|
||||
public final static String VERSION = "0.6.4";
|
||||
public final static long BUILD = 5;
|
||||
public final static long BUILD = 6;
|
||||
public static void main(String args[]) {
|
||||
System.out.println("I2P Router version: " + VERSION + "-" + BUILD);
|
||||
System.out.println("Router ID: " + RouterVersion.ID);
|
||||
|
@ -52,8 +52,8 @@ class RouterWatchdog implements Runnable {
|
||||
|
||||
private void dumpStatus() {
|
||||
if (_log.shouldLog(Log.ERROR)) {
|
||||
Job cur = _context.jobQueue().getLastJob();
|
||||
/*
|
||||
Job cur = _context.jobQueue().getLastJob();
|
||||
if (cur != null)
|
||||
_log.error("Most recent job: " + cur);
|
||||
_log.error("Last job began: "
|
||||
|
@ -214,9 +214,6 @@ public class StatisticsManager implements Service {
|
||||
return stats;
|
||||
}
|
||||
|
||||
private void includeRate(String rateName, Properties stats) {
|
||||
includeRate(rateName, stats, null);
|
||||
}
|
||||
private void includeRate(String rateName, Properties stats, long selectedPeriods[]) {
|
||||
includeRate(rateName, stats, selectedPeriods, false);
|
||||
}
|
||||
@ -272,7 +269,6 @@ public class StatisticsManager implements Service {
|
||||
buf.append(num(rate.getLastEventCount())).append(';');
|
||||
if (numPeriods > 0) {
|
||||
double avgFrequency = rate.getLifetimeEventCount() / (double)numPeriods;
|
||||
double peakFrequency = rate.getExtremeEventCount();
|
||||
buf.append(num(avgFrequency)).append(';');
|
||||
buf.append(num(rate.getExtremeEventCount())).append(';');
|
||||
buf.append(num((double)rate.getLifetimeEventCount())).append(';');
|
||||
|
@ -84,9 +84,6 @@ public class SubmitMessageHistoryJob extends JobImpl {
|
||||
return;
|
||||
}
|
||||
long size = dataFile.length();
|
||||
int expectedSend = 512; // 512 for HTTP overhead
|
||||
if (size > 0)
|
||||
expectedSend += (int)size/10; // compression
|
||||
FileInputStream fin = new FileInputStream(dataFile);
|
||||
BandwidthLimitedInputStream in = new BandwidthLimitedInputStream(getContext(), fin, null, true);
|
||||
boolean sent = HTTPSendData.postData(url, size, in);
|
||||
|
@ -61,7 +61,6 @@ public class GarlicMessageParser {
|
||||
}
|
||||
|
||||
private CloveSet readCloveSet(byte data[]) throws DataFormatException {
|
||||
Set cloves = new HashSet();
|
||||
int offset = 0;
|
||||
|
||||
CloveSet set = new CloveSet();
|
||||
|
@ -69,7 +69,6 @@ class ExpireRoutersJob extends JobImpl {
|
||||
private Set selectKeysToExpire() {
|
||||
Set possible = getNotInUse();
|
||||
Set expiring = new HashSet(16);
|
||||
long earliestPublishDate = getContext().clock().now() - EXPIRE_DELAY;
|
||||
|
||||
for (Iterator iter = possible.iterator(); iter.hasNext(); ) {
|
||||
Hash key = (Hash)iter.next();
|
||||
|
@ -58,7 +58,7 @@ class KBucketImpl implements KBucket {
|
||||
// we want to make sure we've got the cache in place before calling cachedXor
|
||||
_local.prepareCache();
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Local hash reset to " + (local == null ? "null" : DataHelper.toHexString(local.getData())));
|
||||
_log.debug("Local hash reset to " + DataHelper.toHexString(local.getData()));
|
||||
}
|
||||
|
||||
private byte[] distanceFromLocal(Hash key) {
|
||||
|
@ -777,7 +777,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
}
|
||||
|
||||
if (o == null) {
|
||||
boolean removed = _kb.remove(dbEntry);
|
||||
_kb.remove(dbEntry);
|
||||
_context.peerManager().removeCapabilities(dbEntry);
|
||||
// if we dont know the key, lets make sure it isn't a now-dead peer
|
||||
}
|
||||
@ -1054,9 +1054,9 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
}
|
||||
Integer val = (Integer)routerVersions.get(routerVersion);
|
||||
if (val == null)
|
||||
routerVersions.put(routerVersion, new Integer(1));
|
||||
routerVersions.put(routerVersion, Integer.valueOf(1));
|
||||
else
|
||||
routerVersions.put(routerVersion, new Integer(val.intValue() + 1));
|
||||
routerVersions.put(routerVersion, Integer.valueOf(val.intValue() + 1));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -284,7 +284,7 @@ class SearchJob extends JobImpl {
|
||||
} else if (!(ds instanceof RouterInfo)) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn(getJobId() + ": Error selecting closest hash that wasnt a router! "
|
||||
+ peer + " : " + (ds == null ? "null" : ds.getClass().getName()));
|
||||
+ peer + " : " + ds.getClass().getName());
|
||||
_state.replyTimeout(peer);
|
||||
} else {
|
||||
RouterInfo ri = (RouterInfo)ds;
|
||||
@ -375,10 +375,10 @@ class SearchJob extends JobImpl {
|
||||
|
||||
getContext().statManager().addRateData("netDb.searchMessageCount", 1, 0);
|
||||
|
||||
if (_isLease || true) // always send searches out tunnels
|
||||
//if (_isLease || true) // always send searches out tunnels
|
||||
sendLeaseSearch(router);
|
||||
else
|
||||
sendRouterSearch(router);
|
||||
//else
|
||||
// sendRouterSearch(router);
|
||||
}
|
||||
|
||||
|
||||
|
@ -308,10 +308,6 @@ class StoreJob extends JobImpl {
|
||||
return;
|
||||
}
|
||||
TunnelId replyTunnelId = replyTunnel.getReceiveTunnelId(0);
|
||||
if (replyTunnel == null) {
|
||||
_log.warn("No reply inbound tunnels available!");
|
||||
return;
|
||||
}
|
||||
msg.setReplyToken(token);
|
||||
msg.setReplyTunnel(replyTunnelId);
|
||||
msg.setReplyGateway(replyTunnel.getPeer(0));
|
||||
|
@ -67,7 +67,7 @@ public class ReliabilityCalculator extends Calculator {
|
||||
|
||||
val -= profile.getCommError().getRate(24*60*60*1000).getCurrentEventCount() * 1;
|
||||
|
||||
long now = _context.clock().now();
|
||||
//long now = _context.clock().now();
|
||||
|
||||
long timeSinceRejection = 61*60*1000; // now - profile.getTunnelHistory().getLastRejected();
|
||||
if (timeSinceRejection > 60*60*1000) {
|
||||
|
@ -41,7 +41,7 @@ public class CreateRouterInfoJob extends JobImpl {
|
||||
public void runJob() {
|
||||
_log.debug("Creating the new router info");
|
||||
// create a new router info and store it where LoadRouterInfoJob looks
|
||||
RouterInfo info = createRouterInfo();
|
||||
createRouterInfo();
|
||||
getContext().jobQueue().addJob(_next);
|
||||
}
|
||||
|
||||
|
@ -63,7 +63,6 @@ public class OutboundMessageRegistry {
|
||||
public List getOriginalMessages(I2NPMessage message) {
|
||||
ArrayList matchedSelectors = null;
|
||||
ArrayList removedSelectors = null;
|
||||
long beforeSync = _context.clock().now();
|
||||
synchronized (_selectors) {
|
||||
for (int i = 0; i < _selectors.size(); i++) {
|
||||
MessageSelector sel = (MessageSelector)_selectors.get(i);
|
||||
|
@ -168,10 +168,10 @@ public class EstablishmentManager {
|
||||
if (_outboundStates.size() >= getMaxConcurrentEstablish()) {
|
||||
List queued = (List)_queuedOutbound.get(to);
|
||||
if (queued == null) {
|
||||
queued = new ArrayList(1);
|
||||
if (_queuedOutbound.size() > MAX_QUEUED_OUTBOUND) {
|
||||
rejected = true;
|
||||
} else {
|
||||
queued = new ArrayList(1);
|
||||
_queuedOutbound.put(to, queued);
|
||||
}
|
||||
}
|
||||
@ -336,11 +336,11 @@ public class EstablishmentManager {
|
||||
*/
|
||||
PeerState receiveData(OutboundEstablishState state) {
|
||||
state.dataReceived();
|
||||
int active = 0;
|
||||
int admitted = 0;
|
||||
int remaining = 0;
|
||||
//int active = 0;
|
||||
//int admitted = 0;
|
||||
//int remaining = 0;
|
||||
synchronized (_outboundStates) {
|
||||
active = _outboundStates.size();
|
||||
//active = _outboundStates.size();
|
||||
_outboundStates.remove(state.getRemoteHostId());
|
||||
if (_queuedOutbound.size() > 0) {
|
||||
// there shouldn't have been queued messages for this active state, but just in case...
|
||||
@ -350,9 +350,9 @@ public class EstablishmentManager {
|
||||
state.addMessage((OutNetMessage)queued.get(i));
|
||||
}
|
||||
|
||||
admitted = locked_admitQueued();
|
||||
//admitted = locked_admitQueued();
|
||||
}
|
||||
remaining = _queuedOutbound.size();
|
||||
//remaining = _queuedOutbound.size();
|
||||
}
|
||||
//if (admitted > 0)
|
||||
// _log.log(Log.CRIT, "Admitted " + admitted + " with " + remaining + " remaining queued and " + active + " active");
|
||||
@ -598,7 +598,6 @@ public class EstablishmentManager {
|
||||
}
|
||||
|
||||
private void sendRequest(OutboundEstablishState state) {
|
||||
long now = _context.clock().now();
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Send request to: " + state.getRemoteHostId().toString());
|
||||
UDPPacket packet = _builder.buildSessionRequestPacket(state);
|
||||
@ -703,7 +702,6 @@ public class EstablishmentManager {
|
||||
}
|
||||
|
||||
private void sendConfirmation(OutboundEstablishState state) {
|
||||
long now = _context.clock().now();
|
||||
boolean valid = state.validateSessionCreated();
|
||||
if (!valid) // validate clears fields on failure
|
||||
return;
|
||||
@ -841,11 +839,11 @@ public class EstablishmentManager {
|
||||
long now = _context.clock().now();
|
||||
long nextSendTime = -1;
|
||||
OutboundEstablishState outboundState = null;
|
||||
int admitted = 0;
|
||||
int remaining = 0;
|
||||
int active = 0;
|
||||
//int admitted = 0;
|
||||
//int remaining = 0;
|
||||
//int active = 0;
|
||||
synchronized (_outboundStates) {
|
||||
active = _outboundStates.size();
|
||||
//active = _outboundStates.size();
|
||||
//if (_log.shouldLog(Log.DEBUG))
|
||||
// _log.debug("# outbound states: " + _outboundStates.size());
|
||||
for (Iterator iter = _outboundStates.values().iterator(); iter.hasNext(); ) {
|
||||
@ -891,8 +889,8 @@ public class EstablishmentManager {
|
||||
}
|
||||
}
|
||||
|
||||
admitted = locked_admitQueued();
|
||||
remaining = _queuedOutbound.size();
|
||||
//admitted = locked_admitQueued();
|
||||
//remaining = _queuedOutbound.size();
|
||||
}
|
||||
|
||||
//if (admitted > 0)
|
||||
|
@ -903,9 +903,10 @@ public class PeerState {
|
||||
|
||||
/** we are resending a packet, so lets jack up the rto */
|
||||
public void messageRetransmitted(int packets) {
|
||||
long now = _context.clock().now();
|
||||
if (true || _retransmissionPeriodStart + 1000 <= now) {
|
||||
//long now = _context.clock().now();
|
||||
//if (true || _retransmissionPeriodStart + 1000 <= now) {
|
||||
_packetsRetransmitted += packets;
|
||||
/*****
|
||||
} else {
|
||||
_packetRetransmissionRate = (int)((float)(0.9f*_packetRetransmissionRate) + (float)(0.1f*_packetsRetransmitted));
|
||||
//_packetsPeriodTransmitted = _packetsTransmitted - _retransmissionPeriodStart;
|
||||
@ -913,21 +914,24 @@ public class PeerState {
|
||||
_retransmissionPeriodStart = now;
|
||||
_packetsRetransmitted = packets;
|
||||
}
|
||||
*****/
|
||||
congestionOccurred();
|
||||
_context.statManager().addRateData("udp.congestedRTO", _rto, _rttDeviation);
|
||||
adjustMTU();
|
||||
//_rto *= 2;
|
||||
}
|
||||
public void packetsTransmitted(int packets) {
|
||||
long now = _context.clock().now();
|
||||
//long now = _context.clock().now();
|
||||
_packetsTransmitted += packets;
|
||||
//_packetsPeriodTransmitted += packets;
|
||||
/*****
|
||||
if (false && _retransmissionPeriodStart + 1000 <= now) {
|
||||
_packetRetransmissionRate = (int)((float)(0.9f*_packetRetransmissionRate) + (float)(0.1f*_packetsRetransmitted));
|
||||
_retransmissionPeriodStart = 0;
|
||||
_packetsPeriodRetransmitted = (int)_packetsRetransmitted;
|
||||
_packetsRetransmitted = 0;
|
||||
}
|
||||
*****/
|
||||
}
|
||||
/** how long does it usually take to get a message ACKed? */
|
||||
public int getRTT() { return _rtt; }
|
||||
|
@ -34,7 +34,7 @@ public class UDPReceiver {
|
||||
public UDPReceiver(RouterContext ctx, UDPTransport transport, DatagramSocket socket, String name) {
|
||||
_context = ctx;
|
||||
_log = ctx.logManager().getLog(UDPReceiver.class);
|
||||
_id = ++_id;
|
||||
_id++;
|
||||
_name = name;
|
||||
_inboundQueue = new ArrayList(128);
|
||||
_socket = socket;
|
||||
|
@ -101,13 +101,13 @@ public class UDPSender {
|
||||
* @return number of packets queued
|
||||
*/
|
||||
public int add(UDPPacket packet, int blockTime) {
|
||||
long expiration = _context.clock().now() + blockTime;
|
||||
//long expiration = _context.clock().now() + blockTime;
|
||||
int remaining = -1;
|
||||
long lifetime = -1;
|
||||
boolean added = false;
|
||||
int removed = 0;
|
||||
while ( (_keepRunning) && (remaining < 0) ) {
|
||||
try {
|
||||
//try {
|
||||
synchronized (_outboundQueue) {
|
||||
// clear out any too-old packets
|
||||
UDPPacket head = null;
|
||||
@ -123,12 +123,13 @@ public class UDPSender {
|
||||
}
|
||||
}
|
||||
|
||||
if (true || (_outboundQueue.size() < MAX_QUEUED)) {
|
||||
//if (true || (_outboundQueue.size() < MAX_QUEUED)) {
|
||||
lifetime = packet.getLifetime();
|
||||
_outboundQueue.add(packet);
|
||||
added = true;
|
||||
remaining = _outboundQueue.size();
|
||||
_outboundQueue.notifyAll();
|
||||
/*****
|
||||
} else {
|
||||
long remainingTime = expiration - _context.clock().now();
|
||||
if (remainingTime > 0) {
|
||||
@ -139,8 +140,9 @@ public class UDPSender {
|
||||
}
|
||||
lifetime = packet.getLifetime();
|
||||
}
|
||||
*****/
|
||||
}
|
||||
} catch (InterruptedException ie) {}
|
||||
//} catch (InterruptedException ie) {}
|
||||
}
|
||||
_context.statManager().addRateData("udp.sendQueueSize", remaining, lifetime);
|
||||
if (!added)
|
||||
|
@ -23,7 +23,7 @@ import net.i2p.util.Log;
|
||||
public class BuildMessageGenerator {
|
||||
// cached, rather than creating lots of temporary Integer objects whenever we build a tunnel
|
||||
public static final Integer ORDER[] = new Integer[TunnelBuildMessage.RECORD_COUNT];
|
||||
static { for (int i = 0; i < ORDER.length; i++) ORDER[i] = new Integer(i); }
|
||||
static { for (int i = 0; i < ORDER.length; i++) ORDER[i] = Integer.valueOf(i); }
|
||||
|
||||
/** return null if it is unable to find a router's public key (etc) */
|
||||
public TunnelBuildMessage createInbound(RouterContext ctx, TunnelCreatorConfig cfg) {
|
||||
|
@ -362,6 +362,8 @@ public class FragmentHandler {
|
||||
}
|
||||
|
||||
private void receiveComplete(FragmentedMessage msg) {
|
||||
if (msg == null)
|
||||
return;
|
||||
_completed++;
|
||||
String stringified = null;
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
@ -370,8 +372,6 @@ public class FragmentHandler {
|
||||
int fragmentCount = msg.getFragmentCount();
|
||||
// toByteArray destroys the contents of the message completely
|
||||
byte data[] = msg.toByteArray();
|
||||
if (msg == null)
|
||||
return;
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("RECV(" + data.length + "): " + Base64.encode(data)
|
||||
+ " " + _context.sha().calculateHash(data).toBase64());
|
||||
|
@ -77,7 +77,7 @@ public class InboundEndpointProcessor {
|
||||
RouterContext ctx = null;
|
||||
if (_context instanceof RouterContext)
|
||||
ctx = (RouterContext)_context;
|
||||
if ( (ctx != null) && (_config != null) && (_config.getLength() > 0) ) {
|
||||
if ( (ctx != null) && (_config.getLength() > 0) ) {
|
||||
int rtt = 0; // dunno... may not be related to an rtt
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Received a " + length + "byte message through tunnel " + _config);
|
||||
|
@ -60,13 +60,10 @@ public class PumpedTunnelGateway extends TunnelGateway {
|
||||
*/
|
||||
public void add(I2NPMessage msg, Hash toRouter, TunnelId toTunnel) {
|
||||
_messagesSent++;
|
||||
long startAdd = System.currentTimeMillis();
|
||||
Pending cur = new PendingImpl(msg, toRouter, toTunnel);
|
||||
long beforeLock = System.currentTimeMillis();
|
||||
long afterAdded = -1;
|
||||
synchronized (_prequeue) {
|
||||
_prequeue.add(cur);
|
||||
afterAdded = System.currentTimeMillis();
|
||||
}
|
||||
_pumper.wantsPumping(this);
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
|
@ -510,7 +510,7 @@ public class TunnelDispatcher implements Service {
|
||||
+ (before-msg.getMessageExpiration()) + "ms ago? "
|
||||
+ msg, new Exception("cause"));
|
||||
}
|
||||
long tid1 = (outboundTunnel != null ? outboundTunnel.getTunnelId() : -1);
|
||||
long tid1 = outboundTunnel.getTunnelId();
|
||||
long tid2 = (targetTunnel != null ? targetTunnel.getTunnelId() : -1);
|
||||
_context.messageHistory().tunnelDispatched(msg.getUniqueId(), tid1, tid2, targetPeer, "outbound gateway");
|
||||
gw.add(msg, targetPeer, targetTunnel);
|
||||
|
@ -245,7 +245,7 @@ class BuildHandler {
|
||||
// For each peer in the tunnel
|
||||
for (int i = 0; i < cfg.getLength(); i++) {
|
||||
Hash peer = cfg.getPeer(i);
|
||||
int record = order.indexOf(new Integer(i));
|
||||
int record = order.indexOf(Integer.valueOf(i));
|
||||
if (record < 0) {
|
||||
_log.error("Bad status index " + i);
|
||||
return;
|
||||
@ -483,7 +483,7 @@ class BuildHandler {
|
||||
int proactiveDrops = countProactiveDrops();
|
||||
long recvDelay = System.currentTimeMillis()-state.recvTime;
|
||||
if (response == 0) {
|
||||
float pDrop = recvDelay / (BuildRequestor.REQUEST_TIMEOUT*3);
|
||||
float pDrop = ((float) recvDelay) / (float) (BuildRequestor.REQUEST_TIMEOUT*3);
|
||||
pDrop = (float)Math.pow(pDrop, 16);
|
||||
if (_context.random().nextFloat() < pDrop) { // || (proactiveDrops > MAX_PROACTIVE_DROPS) ) ) {
|
||||
_context.statManager().addRateData("tunnel.rejectOverloaded", recvDelay, proactiveDrops);
|
||||
@ -648,7 +648,7 @@ class BuildHandler {
|
||||
+ ", waiting ids: " + ids + ", found matching tunnel? " + (cfg != null),
|
||||
null);//new Exception("source"));
|
||||
if (cfg != null) {
|
||||
BuildEndMessageState state = new BuildEndMessageState(cfg, receivedMessage, from, fromHash);
|
||||
BuildEndMessageState state = new BuildEndMessageState(cfg, receivedMessage);
|
||||
if (HANDLE_REPLIES_INLINE) {
|
||||
handleRequestAsInboundEndpoint(state);
|
||||
} else {
|
||||
@ -737,10 +737,10 @@ class BuildHandler {
|
||||
_log.debug("Receive tunnel build reply message " + receivedMessage.getUniqueId() + " from "
|
||||
+ (fromHash != null ? fromHash.toBase64() : from != null ? from.calculateHash().toBase64() : "a tunnel"));
|
||||
if (HANDLE_REPLIES_INLINE) {
|
||||
handleReply(new BuildReplyMessageState(receivedMessage, from, fromHash));
|
||||
handleReply(new BuildReplyMessageState(receivedMessage));
|
||||
} else {
|
||||
synchronized (_inboundBuildReplyMessages) {
|
||||
_inboundBuildReplyMessages.add(new BuildReplyMessageState(receivedMessage, from, fromHash));
|
||||
_inboundBuildReplyMessages.add(new BuildReplyMessageState(receivedMessage));
|
||||
}
|
||||
_exec.repoll();
|
||||
}
|
||||
@ -764,13 +764,9 @@ class BuildHandler {
|
||||
/** replies for outbound tunnels that we have created */
|
||||
private class BuildReplyMessageState {
|
||||
TunnelBuildReplyMessage msg;
|
||||
RouterIdentity from;
|
||||
Hash fromHash;
|
||||
long recvTime;
|
||||
public BuildReplyMessageState(I2NPMessage m, RouterIdentity f, Hash h) {
|
||||
public BuildReplyMessageState(I2NPMessage m) {
|
||||
msg = (TunnelBuildReplyMessage)m;
|
||||
from = f;
|
||||
fromHash = h;
|
||||
recvTime = System.currentTimeMillis();
|
||||
}
|
||||
}
|
||||
@ -778,14 +774,10 @@ class BuildHandler {
|
||||
private class BuildEndMessageState {
|
||||
TunnelBuildMessage msg;
|
||||
PooledTunnelCreatorConfig cfg;
|
||||
RouterIdentity from;
|
||||
Hash fromHash;
|
||||
long recvTime;
|
||||
public BuildEndMessageState(PooledTunnelCreatorConfig c, I2NPMessage m, RouterIdentity f, Hash h) {
|
||||
public BuildEndMessageState(PooledTunnelCreatorConfig c, I2NPMessage m) {
|
||||
cfg = c;
|
||||
msg = (TunnelBuildMessage)m;
|
||||
from = f;
|
||||
fromHash = h;
|
||||
recvTime = System.currentTimeMillis();
|
||||
}
|
||||
}
|
||||
|
@ -25,7 +25,7 @@ class BuildRequestor {
|
||||
private static final List ORDER = new ArrayList(BuildMessageGenerator.ORDER.length);
|
||||
static {
|
||||
for (int i = 0; i < BuildMessageGenerator.ORDER.length; i++)
|
||||
ORDER.add(new Integer(i));
|
||||
ORDER.add(Integer.valueOf(i));
|
||||
}
|
||||
private static final int PRIORITY = 500;
|
||||
static final int REQUEST_TIMEOUT = 10*1000;
|
||||
|
@ -103,7 +103,6 @@ public abstract class TunnelPeerSelector {
|
||||
Log log = ctx.logManager().getLog(ClientPeerSelector.class);
|
||||
List rv = new ArrayList();
|
||||
StringTokenizer tok = new StringTokenizer(peers, ",");
|
||||
Hash h = new Hash();
|
||||
while (tok.hasMoreTokens()) {
|
||||
String peerStr = tok.nextToken();
|
||||
Hash peer = new Hash();
|
||||
@ -307,7 +306,6 @@ public abstract class TunnelPeerSelector {
|
||||
private static char[] getExcludeCaps(RouterContext ctx) {
|
||||
String excludeCaps = ctx.getProperty("router.excludePeerCaps",
|
||||
String.valueOf(Router.CAPABILITY_BW12));
|
||||
Set peers = new HashSet();
|
||||
if (excludeCaps != null) {
|
||||
char excl[] = excludeCaps.toCharArray();
|
||||
return excl;
|
||||
@ -342,7 +340,6 @@ public abstract class TunnelPeerSelector {
|
||||
String val = peer.getOption("stat_uptime");
|
||||
if (val != null) {
|
||||
long uptimeMs = 0;
|
||||
if (val != null) {
|
||||
long factor = 1;
|
||||
if (val.endsWith("ms")) {
|
||||
factor = 1;
|
||||
@ -362,10 +359,6 @@ public abstract class TunnelPeerSelector {
|
||||
}
|
||||
try { uptimeMs = Long.parseLong(val); } catch (NumberFormatException nfe) {}
|
||||
uptimeMs *= factor;
|
||||
} else {
|
||||
// not publishing an uptime, so exclude it
|
||||
return true;
|
||||
}
|
||||
|
||||
long infoAge = ctx.clock().now() - peer.getPublished();
|
||||
if (infoAge < 0) {
|
||||
@ -391,7 +384,7 @@ public abstract class TunnelPeerSelector {
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// not publishing stats, so exclude it
|
||||
// not publishing an uptime, so exclude it
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
@ -336,13 +336,11 @@ public class TunnelPool {
|
||||
public void tunnelFailed(PooledTunnelCreatorConfig cfg) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn(toString() + ": Tunnel failed: " + cfg);
|
||||
int remaining = 0;
|
||||
LeaseSet ls = null;
|
||||
synchronized (_tunnels) {
|
||||
_tunnels.remove(cfg);
|
||||
if (_settings.isInbound() && (_settings.getDestination() != null) )
|
||||
ls = locked_buildNewLeaseSet();
|
||||
remaining = _tunnels.size();
|
||||
if (_lastSelected == cfg) {
|
||||
_lastSelected = null;
|
||||
_lastSelectionPeriod = 0;
|
||||
@ -403,12 +401,10 @@ public class TunnelPool {
|
||||
void refreshLeaseSet() {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug(toString() + ": refreshing leaseSet on tunnel expiration (but prior to grace timeout)");
|
||||
int remaining = 0;
|
||||
LeaseSet ls = null;
|
||||
if (_settings.isInbound() && (_settings.getDestination() != null) ) {
|
||||
synchronized (_tunnels) {
|
||||
ls = locked_buildNewLeaseSet();
|
||||
remaining = _tunnels.size();
|
||||
}
|
||||
if (ls != null) {
|
||||
_context.clientManager().requestLeaseSet(_settings.getDestination(), ls);
|
||||
|
Reference in New Issue
Block a user