2009-04-21 sponge
* Code janator work, basic corrections involving @Override, and appling final where it is important. Also fixed some equals methods and commented places that need fixing.
This commit is contained in:
@ -1,3 +1,8 @@
|
||||
2009-04-21 sponge
|
||||
* Code janator work, basic corrections involving @Override, and
|
||||
appling final where it is important. Also fixed some equals methods
|
||||
and commented places that need fixing.
|
||||
|
||||
2009-04-18 Complication
|
||||
* Fix typo in "news.xml", no build number increase.
|
||||
|
||||
|
@ -12,7 +12,7 @@ import java.io.IOException;
|
||||
|
||||
import net.i2p.I2PAppContext;
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.util.Log;
|
||||
// import net.i2p.util.Log;
|
||||
|
||||
/**
|
||||
* Defines a message containing arbitrary bytes of data
|
||||
@ -20,11 +20,11 @@ import net.i2p.util.Log;
|
||||
* @author jrandom
|
||||
*/
|
||||
public class DataMessage extends I2NPMessageImpl {
|
||||
private final static Log _log = new Log(DataMessage.class);
|
||||
// private final static Log _log = new Log(DataMessage.class);
|
||||
public final static int MESSAGE_TYPE = 20;
|
||||
private byte _data[];
|
||||
|
||||
private static final int MAX_SIZE = 64*1024;
|
||||
// private static final int MAX_SIZE = 64*1024; // LINT -- field hides another field, and not used
|
||||
|
||||
public DataMessage(I2PAppContext context) {
|
||||
super(context);
|
||||
@ -81,6 +81,7 @@ public class DataMessage extends I2NPMessageImpl {
|
||||
return curIndex;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void written() {
|
||||
super.written();
|
||||
_data = null;
|
||||
@ -88,10 +89,12 @@ public class DataMessage extends I2NPMessageImpl {
|
||||
|
||||
public int getType() { return MESSAGE_TYPE; }
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return DataHelper.hashCode(getData());
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object object) {
|
||||
if ( (object != null) && (object instanceof DataMessage) ) {
|
||||
DataMessage msg = (DataMessage)object;
|
||||
@ -101,6 +104,7 @@ public class DataMessage extends I2NPMessageImpl {
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer();
|
||||
buf.append("[DataMessage: ");
|
||||
|
@ -215,6 +215,7 @@ public class DatabaseLookupMessage extends I2NPMessageImpl {
|
||||
|
||||
public int getType() { return MESSAGE_TYPE; }
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return DataHelper.hashCode(getSearchKey()) +
|
||||
DataHelper.hashCode(getFrom()) +
|
||||
@ -222,6 +223,7 @@ public class DatabaseLookupMessage extends I2NPMessageImpl {
|
||||
DataHelper.hashCode(_dontIncludePeers);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object object) {
|
||||
if ( (object != null) && (object instanceof DatabaseLookupMessage) ) {
|
||||
DatabaseLookupMessage msg = (DatabaseLookupMessage)object;
|
||||
@ -234,6 +236,7 @@ public class DatabaseLookupMessage extends I2NPMessageImpl {
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer();
|
||||
buf.append("[DatabaseLookupMessage: ");
|
||||
|
@ -110,6 +110,7 @@ public class DatabaseSearchReplyMessage extends I2NPMessageImpl {
|
||||
|
||||
public int getType() { return MESSAGE_TYPE; }
|
||||
|
||||
@Override
|
||||
public boolean equals(Object object) {
|
||||
if ( (object != null) && (object instanceof DatabaseSearchReplyMessage) ) {
|
||||
DatabaseSearchReplyMessage msg = (DatabaseSearchReplyMessage)object;
|
||||
@ -121,12 +122,14 @@ public class DatabaseSearchReplyMessage extends I2NPMessageImpl {
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return DataHelper.hashCode(getSearchKey()) +
|
||||
DataHelper.hashCode(getFromHash()) +
|
||||
DataHelper.hashCode(_peerHashes);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer();
|
||||
buf.append("[DatabaseSearchReplyMessage: ");
|
||||
|
@ -231,6 +231,7 @@ public class DatabaseStoreMessage extends I2NPMessageImpl {
|
||||
|
||||
public int getType() { return MESSAGE_TYPE; }
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return DataHelper.hashCode(getKey()) +
|
||||
DataHelper.hashCode(getLeaseSet()) +
|
||||
@ -241,6 +242,7 @@ public class DatabaseStoreMessage extends I2NPMessageImpl {
|
||||
DataHelper.hashCode(getReplyGateway());
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object object) {
|
||||
if ( (object != null) && (object instanceof DatabaseStoreMessage) ) {
|
||||
DatabaseStoreMessage msg = (DatabaseStoreMessage)object;
|
||||
@ -256,6 +258,7 @@ public class DatabaseStoreMessage extends I2NPMessageImpl {
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer();
|
||||
buf.append("[DatabaseStoreMessage: ");
|
||||
|
@ -53,10 +53,12 @@ public class DateMessage extends I2NPMessageImpl {
|
||||
|
||||
public int getType() { return MESSAGE_TYPE; }
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return (int)getNow();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object object) {
|
||||
if ( (object != null) && (object instanceof DateMessage) ) {
|
||||
DateMessage msg = (DateMessage)object;
|
||||
@ -66,6 +68,7 @@ public class DateMessage extends I2NPMessageImpl {
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer();
|
||||
buf.append("[DateMessage: ");
|
||||
|
@ -350,6 +350,7 @@ public class DeliveryInstructions extends DataStructureImpl {
|
||||
+ getAdditionalInfoSize();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if ( (obj == null) || !(obj instanceof DeliveryInstructions))
|
||||
return false;
|
||||
@ -364,6 +365,7 @@ public class DeliveryInstructions extends DataStructureImpl {
|
||||
DataHelper.eq(getTunnelId(), instr.getTunnelId());
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return (int)getDelaySeconds() +
|
||||
getDeliveryMode() +
|
||||
@ -373,6 +375,7 @@ public class DeliveryInstructions extends DataStructureImpl {
|
||||
DataHelper.hashCode(getTunnelId());
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer(128);
|
||||
buf.append("[DeliveryInstructions: ");
|
||||
|
@ -64,10 +64,12 @@ public class DeliveryStatusMessage extends I2NPMessageImpl {
|
||||
|
||||
public int getType() { return MESSAGE_TYPE; }
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return (int)getMessageId() + (int)getArrival();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object object) {
|
||||
if ( (object != null) && (object instanceof DeliveryStatusMessage) ) {
|
||||
DeliveryStatusMessage msg = (DeliveryStatusMessage)object;
|
||||
@ -78,6 +80,7 @@ public class DeliveryStatusMessage extends I2NPMessageImpl {
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer();
|
||||
buf.append("[DeliveryStatusMessage: ");
|
||||
|
@ -44,17 +44,20 @@ public class EndPointPrivateKey extends DataStructureImpl {
|
||||
_key.writeBytes(out);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if ( (obj == null) || !(obj instanceof EndPointPublicKey))
|
||||
return false;
|
||||
return DataHelper.eq(getKey(), ((EndPointPublicKey)obj).getKey());
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
if (_key == null) return 0;
|
||||
return getKey().hashCode();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "[EndPointPrivateKey: " + getKey() + "]";
|
||||
}
|
||||
|
@ -44,17 +44,20 @@ public class EndPointPublicKey extends DataStructureImpl {
|
||||
_key.writeBytes(out);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if ( (obj == null) || !(obj instanceof EndPointPublicKey))
|
||||
return false;
|
||||
return DataHelper.eq(getKey(), ((EndPointPublicKey)obj).getKey());
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
if (_key == null) return 0;
|
||||
return getKey().hashCode();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "[EndPointPublicKey: " + getKey() + "]";
|
||||
}
|
||||
|
@ -156,6 +156,7 @@ public class GarlicClove extends DataStructureImpl {
|
||||
_log.debug("Written cert: " + _certificate);
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] toByteArray() {
|
||||
byte rv[] = new byte[estimateSize()];
|
||||
int offset = 0;
|
||||
@ -186,6 +187,7 @@ public class GarlicClove extends DataStructureImpl {
|
||||
+ _certificate.size(); // certificate
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if ( (obj == null) || !(obj instanceof GarlicClove))
|
||||
return false;
|
||||
@ -197,6 +199,7 @@ public class GarlicClove extends DataStructureImpl {
|
||||
DataHelper.eq(getInstructions(), clove.getInstructions());
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return DataHelper.hashCode(getCertificate()) +
|
||||
(int)getCloveId() +
|
||||
@ -205,6 +208,7 @@ public class GarlicClove extends DataStructureImpl {
|
||||
DataHelper.hashCode(getInstructions());
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer(128);
|
||||
buf.append("[GarlicClove: ");
|
||||
|
@ -67,15 +67,18 @@ public class GarlicMessage extends I2NPMessageImpl {
|
||||
|
||||
public int getType() { return MESSAGE_TYPE; }
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return DataHelper.hashCode(getData());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void written() {
|
||||
super.written();
|
||||
_data = null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object object) {
|
||||
if ( (object != null) && (object instanceof GarlicMessage) ) {
|
||||
GarlicMessage msg = (GarlicMessage)object;
|
||||
@ -85,6 +88,7 @@ public class GarlicMessage extends I2NPMessageImpl {
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer();
|
||||
buf.append("[GarlicMessage: ");
|
||||
|
@ -187,6 +187,7 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM
|
||||
return calculateWrittenLength()+5;
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] toByteArray() {
|
||||
byte data[] = new byte[getMessageSize()];
|
||||
int written = toByteArray(data);
|
||||
|
@ -232,6 +232,7 @@ public class TunnelCreateMessage extends I2NPMessageImpl {
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public byte[] toByteArray() {
|
||||
byte rv[] = super.toByteArray();
|
||||
if (rv == null)
|
||||
@ -239,6 +240,7 @@ public class TunnelCreateMessage extends I2NPMessageImpl {
|
||||
return rv;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return DataHelper.hashCode(getNextRouter()) +
|
||||
DataHelper.hashCode(getNextTunnelId()) +
|
||||
@ -246,6 +248,7 @@ public class TunnelCreateMessage extends I2NPMessageImpl {
|
||||
DataHelper.hashCode(getReplyTunnel());
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object object) {
|
||||
if ( (object != null) && (object instanceof TunnelCreateMessage) ) {
|
||||
TunnelCreateMessage msg = (TunnelCreateMessage)object;
|
||||
@ -258,6 +261,7 @@ public class TunnelCreateMessage extends I2NPMessageImpl {
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer();
|
||||
buf.append("[TunnelCreateMessage: ");
|
||||
|
@ -87,12 +87,14 @@ public class TunnelCreateStatusMessage extends I2NPMessageImpl {
|
||||
|
||||
public int getType() { return MESSAGE_TYPE; }
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return DataHelper.hashCode(getReceiveTunnelId()) +
|
||||
getStatus() +
|
||||
(int)getNonce();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object object) {
|
||||
if ( (object != null) && (object instanceof TunnelCreateStatusMessage) ) {
|
||||
TunnelCreateStatusMessage msg = (TunnelCreateStatusMessage)object;
|
||||
@ -104,6 +106,7 @@ public class TunnelCreateStatusMessage extends I2NPMessageImpl {
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer();
|
||||
buf.append("[TunnelCreateStatusMessage: ");
|
||||
|
@ -112,11 +112,13 @@ public class TunnelDataMessage extends I2NPMessageImpl {
|
||||
|
||||
public int getType() { return MESSAGE_TYPE; }
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return (int)_tunnelId +
|
||||
DataHelper.hashCode(_data);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object object) {
|
||||
if ( (object != null) && (object instanceof TunnelDataMessage) ) {
|
||||
TunnelDataMessage msg = (TunnelDataMessage)object;
|
||||
@ -127,6 +129,7 @@ public class TunnelDataMessage extends I2NPMessageImpl {
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] toByteArray() {
|
||||
byte rv[] = super.toByteArray();
|
||||
if (rv == null)
|
||||
@ -134,6 +137,7 @@ public class TunnelDataMessage extends I2NPMessageImpl {
|
||||
return rv;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer();
|
||||
buf.append("[TunnelDataMessage:");
|
||||
|
@ -90,6 +90,7 @@ public class TunnelGatewayMessage extends I2NPMessageImpl {
|
||||
I2NPMessageHandler h = new I2NPMessageHandler(_context);
|
||||
readMessage(data, offset, dataSize, type, h);
|
||||
}
|
||||
@Override
|
||||
public void readMessage(byte data[], int offset, int dataSize, int type, I2NPMessageHandler handler) throws I2NPMessageException, IOException {
|
||||
if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message");
|
||||
int curIndex = offset;
|
||||
@ -110,11 +111,13 @@ public class TunnelGatewayMessage extends I2NPMessageImpl {
|
||||
|
||||
public int getType() { return MESSAGE_TYPE; }
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return DataHelper.hashCode(getTunnelId()) +
|
||||
DataHelper.hashCode(_msg);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object object) {
|
||||
if ( (object != null) && (object instanceof TunnelGatewayMessage) ) {
|
||||
TunnelGatewayMessage msg = (TunnelGatewayMessage)object;
|
||||
@ -126,6 +129,7 @@ public class TunnelGatewayMessage extends I2NPMessageImpl {
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer();
|
||||
buf.append("[TunnelGatewayMessage:");
|
||||
|
@ -44,17 +44,20 @@ public class TunnelSessionKey extends DataStructureImpl {
|
||||
_key.writeBytes(out);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if ( (obj == null) || !(obj instanceof TunnelSessionKey))
|
||||
return false;
|
||||
return DataHelper.eq(getKey(), ((TunnelSessionKey)obj).getKey());
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
if (_key == null) return 0;
|
||||
return getKey().hashCode();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "[TunnelSessionKey: " + getKey() + "]";
|
||||
}
|
||||
|
@ -45,17 +45,20 @@ public class TunnelSigningPrivateKey extends DataStructureImpl {
|
||||
_key.writeBytes(out);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if ( (obj == null) || !(obj instanceof TunnelSigningPrivateKey))
|
||||
return false;
|
||||
return DataHelper.eq(getKey(), ((TunnelSigningPrivateKey)obj).getKey());
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
if (_key == null) return 0;
|
||||
return getKey().hashCode();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "[EndPointPrivateKey: " + getKey() + "]";
|
||||
}
|
||||
|
@ -44,17 +44,20 @@ public class TunnelSigningPublicKey extends DataStructureImpl {
|
||||
_key.writeBytes(out);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if ( (obj == null) || !(obj instanceof TunnelSigningPublicKey))
|
||||
return false;
|
||||
return DataHelper.eq(getKey(), ((TunnelSigningPublicKey)obj).getKey());
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
if (_key == null) return 0;
|
||||
return getKey().hashCode();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "[TunnelSigningPublicKey: " + getKey() + "]";
|
||||
}
|
||||
|
@ -69,6 +69,7 @@ public class TunnelVerificationStructure extends DataStructureImpl {
|
||||
_authSignature.writeBytes(out);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if ( (obj == null) || !(obj instanceof TunnelVerificationStructure))
|
||||
return false;
|
||||
@ -77,11 +78,13 @@ public class TunnelVerificationStructure extends DataStructureImpl {
|
||||
DataHelper.eq(getAuthorizationSignature(), str.getAuthorizationSignature());
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
if ( (_msgHash == null) || (_authSignature == null) ) return 0;
|
||||
return getMessageHash().hashCode() + getAuthorizationSignature().hashCode();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "[TunnelVerificationStructure: " + getMessageHash() + " " + getAuthorizationSignature() + "]";
|
||||
}
|
||||
|
@ -13,14 +13,12 @@ import java.net.InetAddress;
|
||||
import java.net.UnknownHostException;
|
||||
import java.util.*;
|
||||
|
||||
import net.i2p.I2PAppContext;
|
||||
import net.i2p.data.Base64;
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.RouterAddress;
|
||||
import net.i2p.data.RouterInfo;
|
||||
import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseFacade;
|
||||
import net.i2p.util.HexDump;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
/**
|
||||
@ -55,22 +53,22 @@ public class Blocklist {
|
||||
private RouterContext _context;
|
||||
private long _blocklist[];
|
||||
private int _blocklistSize;
|
||||
private Object _lock;
|
||||
private final Object _lock = new Object();
|
||||
private Entry _wrapSave;
|
||||
private Set _inProcess;
|
||||
private Map _peerBlocklist;
|
||||
private Set _singleIPBlocklist;
|
||||
private final Set _inProcess = new HashSet(0);
|
||||
private Map _peerBlocklist = new HashMap(0);
|
||||
private final Set _singleIPBlocklist = new HashSet(0);
|
||||
|
||||
public Blocklist(RouterContext context) {
|
||||
_context = context;
|
||||
_log = context.logManager().getLog(Blocklist.class);
|
||||
_blocklist = null;
|
||||
_blocklistSize = 0;
|
||||
_lock = new Object();
|
||||
// _lock = new Object();
|
||||
_wrapSave = null;
|
||||
_inProcess = new HashSet(0);
|
||||
_peerBlocklist = new HashMap(0);
|
||||
_singleIPBlocklist = new HashSet(0);
|
||||
// _inProcess = new HashSet(0);
|
||||
// _peerBlocklist = new HashMap(0);
|
||||
// _singleIPBlocklist = new HashSet(0);
|
||||
}
|
||||
|
||||
public Blocklist() {
|
||||
|
@ -44,6 +44,7 @@ public class ClientTunnelSettings {
|
||||
_outboundSettings.writeToProperties("outbound.", props);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer();
|
||||
Properties p = new Properties();
|
||||
|
@ -35,9 +35,9 @@ public class InNetMessagePool implements Service {
|
||||
private Log _log;
|
||||
private RouterContext _context;
|
||||
private HandlerJobBuilder _handlerJobBuilders[];
|
||||
private List _pendingDataMessages;
|
||||
private List _pendingDataMessagesFrom;
|
||||
private List _pendingGatewayMessages;
|
||||
private final List _pendingDataMessages;
|
||||
private final List _pendingDataMessagesFrom;
|
||||
private final List _pendingGatewayMessages;
|
||||
private SharedShortCircuitDataJob _shortCircuitDataJob;
|
||||
private SharedShortCircuitGatewayJob _shortCircuitGatewayJob;
|
||||
private boolean _alive;
|
||||
|
@ -33,6 +33,7 @@ public abstract class JobImpl implements Job {
|
||||
|
||||
public final RouterContext getContext() { return _context; }
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer(128);
|
||||
buf.append(super.toString());
|
||||
|
@ -33,7 +33,7 @@ public class JobQueue {
|
||||
private RouterContext _context;
|
||||
|
||||
/** Integer (runnerId) to JobQueueRunner for created runners */
|
||||
private HashMap _queueRunners;
|
||||
private final HashMap _queueRunners;
|
||||
/** a counter to identify a job runner */
|
||||
private volatile static int _runnerId = 0;
|
||||
/** list of jobs that are ready to run ASAP */
|
||||
@ -41,7 +41,7 @@ public class JobQueue {
|
||||
/** list of jobs that are scheduled for running in the future */
|
||||
private ArrayList _timedJobs;
|
||||
/** job name to JobStat for that job */
|
||||
private SortedMap _jobStats;
|
||||
private final SortedMap _jobStats;
|
||||
/** how many job queue runners can go concurrently */
|
||||
private int _maxRunners = 1;
|
||||
private QueuePumper _pumper;
|
||||
@ -50,7 +50,7 @@ public class JobQueue {
|
||||
/** have we been killed or are we alive? */
|
||||
private boolean _alive;
|
||||
|
||||
private Object _jobLock;
|
||||
private final Object _jobLock;
|
||||
|
||||
/** default max # job queue runners operating */
|
||||
private final static int DEFAULT_MAX_RUNNERS = 1;
|
||||
@ -94,7 +94,7 @@ public class JobQueue {
|
||||
* queue runners wait on this whenever they're not doing anything, and
|
||||
* this gets notified *once* whenever there are ready jobs
|
||||
*/
|
||||
private Object _runnerLock = new Object();
|
||||
private final Object _runnerLock = new Object();
|
||||
|
||||
public JobQueue(RouterContext context) {
|
||||
_context = context;
|
||||
|
@ -59,7 +59,9 @@ class JobStats {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() { return _job.hashCode(); }
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if ( (obj != null) && (obj instanceof JobStats) ) {
|
||||
JobStats stats = (JobStats)obj;
|
||||
@ -73,6 +75,7 @@ class JobStats {
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer();
|
||||
buf.append("Over ").append(getRuns()).append(" runs, job <b>").append(getName()).append("</b> took ");
|
||||
|
@ -41,7 +41,7 @@ public class KeyManager {
|
||||
private PublicKey _publicKey;
|
||||
private SigningPrivateKey _signingPrivateKey;
|
||||
private SigningPublicKey _signingPublicKey;
|
||||
private Map _leaseSetKeys; // Destination --> LeaseSetKeys
|
||||
private final Map _leaseSetKeys; // Destination --> LeaseSetKeys
|
||||
private SynchronizeKeysJob _synchronizeJob;
|
||||
|
||||
public final static String PROP_KEYDIR = "router.keyBackupDir";
|
||||
|
@ -73,6 +73,7 @@ public class LeaseSetKeys extends DataStructureImpl {
|
||||
_revocationKey.writeBytes(out);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int rv = 0;
|
||||
rv += DataHelper.hashCode(_dest);
|
||||
@ -81,6 +82,7 @@ public class LeaseSetKeys extends DataStructureImpl {
|
||||
return rv;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if ( (obj != null) && (obj instanceof LeaseSetKeys) ) {
|
||||
LeaseSetKeys keys = (LeaseSetKeys)obj;
|
||||
|
@ -26,7 +26,7 @@ import net.i2p.util.Log;
|
||||
public class MessageHistory {
|
||||
private Log _log;
|
||||
private RouterContext _context;
|
||||
private List _unwrittenEntries; // list of raw entries (strings) yet to be written
|
||||
private final List _unwrittenEntries = new ArrayList(64); // list of raw entries (strings) yet to be written
|
||||
private String _historyFile; // where to write
|
||||
private String _localIdent; // placed in each entry to uniquely identify the local router
|
||||
private boolean _doLog; // true == we want to log
|
||||
@ -104,7 +104,7 @@ public class MessageHistory {
|
||||
_doLog = DEFAULT_KEEP_MESSAGE_HISTORY;
|
||||
_historyFile = filename;
|
||||
_localIdent = getName(_context.routerHash());
|
||||
_unwrittenEntries = new ArrayList(64);
|
||||
// _unwrittenEntries = new ArrayList(64);
|
||||
updateSettings();
|
||||
// clear the history file on startup
|
||||
if (_firstPass) {
|
||||
|
@ -58,6 +58,7 @@ public class MultiRouter {
|
||||
_defaultContext.clock().setOffset(0);
|
||||
|
||||
Runtime.getRuntime().addShutdownHook(new Thread() {
|
||||
@Override
|
||||
public void run() {
|
||||
Thread.currentThread().setName("Router* Shutdown");
|
||||
try { Thread.sleep(120*1000); } catch (InterruptedException ie) {}
|
||||
|
@ -10,10 +10,6 @@ package net.i2p.router;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Writer;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import net.i2p.data.Hash;
|
||||
|
@ -302,6 +302,7 @@ public class OutNetMessage {
|
||||
super.finalize();
|
||||
}
|
||||
*/
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer(128);
|
||||
buf.append("[OutNetMessage contains ");
|
||||
@ -365,6 +366,7 @@ public class OutNetMessage {
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int rv = 0;
|
||||
rv += DataHelper.hashCode(_message);
|
||||
@ -373,7 +375,10 @@ public class OutNetMessage {
|
||||
return rv;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if(obj == null) return false;
|
||||
if(obj.getClass() != OutNetMessage.class) return false;
|
||||
return obj == this; // two OutNetMessages are different even if they contain the same message
|
||||
}
|
||||
}
|
||||
|
@ -8,7 +8,6 @@ package net.i2p.router;
|
||||
*
|
||||
*/
|
||||
|
||||
import java.io.Writer;
|
||||
import java.util.List;
|
||||
|
||||
import net.i2p.data.Hash;
|
||||
|
@ -4,16 +4,12 @@ import java.io.IOException;
|
||||
import java.io.Writer;
|
||||
|
||||
import java.util.Iterator;
|
||||
import java.util.Map;
|
||||
import java.util.TreeMap;
|
||||
|
||||
import net.i2p.data.Base64;
|
||||
import net.i2p.data.DataFormatException;
|
||||
import net.i2p.data.Destination;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.LeaseSet;
|
||||
import net.i2p.data.SessionKey;
|
||||
import net.i2p.router.TunnelPoolSettings;
|
||||
import net.i2p.util.KeyRing;
|
||||
|
||||
/**
|
||||
@ -31,6 +27,7 @@ public class PersistentKeyRing extends KeyRing {
|
||||
addFromProperties();
|
||||
}
|
||||
|
||||
@Override
|
||||
public SessionKey put(Hash h, SessionKey sk) {
|
||||
SessionKey old = super.put(h, sk);
|
||||
if (!sk.equals(old)) {
|
||||
@ -67,6 +64,7 @@ public class PersistentKeyRing extends KeyRing {
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void renderStatusHTML(Writer out) throws IOException {
|
||||
StringBuffer buf = new StringBuffer(1024);
|
||||
buf.append("\n<table border=\"1\"><tr><th align=\"left\">Destination Hash<th align=\"left\">Name or Dest.<th align=\"left\">Session Key</tr>");
|
||||
|
@ -9,7 +9,6 @@ package net.i2p.router;
|
||||
*/
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.Writer;
|
||||
@ -53,7 +52,7 @@ import net.i2p.util.SimpleTimer;
|
||||
public class Router {
|
||||
private Log _log;
|
||||
private RouterContext _context;
|
||||
private Properties _config;
|
||||
private final Properties _config;
|
||||
private String _configFilename;
|
||||
private RouterInfo _routerInfo;
|
||||
private long _started;
|
||||
@ -64,7 +63,7 @@ public class Router {
|
||||
private int _gracefulExitCode;
|
||||
private I2PThread.OOMEventListener _oomListener;
|
||||
private ShutdownHook _shutdownHook;
|
||||
private I2PThread _gracefulShutdownDetector;
|
||||
private final I2PThread _gracefulShutdownDetector;
|
||||
|
||||
public final static String PROP_CONFIG_FILE = "router.configLocation";
|
||||
|
||||
@ -1350,6 +1349,7 @@ private static class ShutdownHook extends Thread {
|
||||
_context = ctx;
|
||||
_id = ++__id;
|
||||
}
|
||||
@Override
|
||||
public void run() {
|
||||
setName("Router " + _id + " shutdown");
|
||||
Log l = _context.logManager().getLog(Router.class);
|
||||
|
@ -15,11 +15,11 @@ import net.i2p.util.Log;
|
||||
*/
|
||||
public class RouterClock extends Clock {
|
||||
|
||||
RouterContext _context;
|
||||
RouterContext _contextRC; // LINT field hides another field
|
||||
|
||||
public RouterClock(RouterContext context) {
|
||||
super(context);
|
||||
_context = context;
|
||||
_contextRC = context;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -27,6 +27,7 @@ public class RouterClock extends Clock {
|
||||
* value means that we are slow, while a negative value means we are fast.
|
||||
*
|
||||
*/
|
||||
@Override
|
||||
public void setOffset(long offsetMs, boolean force) {
|
||||
|
||||
if (false) return;
|
||||
@ -53,10 +54,10 @@ public class RouterClock extends Clock {
|
||||
}
|
||||
|
||||
// If so configured, check sanity of proposed clock offset
|
||||
if (Boolean.valueOf(_context.getProperty("router.clockOffsetSanityCheck","true")).booleanValue() == true) {
|
||||
if (Boolean.valueOf(_contextRC.getProperty("router.clockOffsetSanityCheck","true")).booleanValue() == true) {
|
||||
|
||||
// Try calculating peer clock skew
|
||||
Long peerClockSkew = _context.commSystem().getFramedAveragePeerClockSkew(50);
|
||||
Long peerClockSkew = _contextRC.commSystem().getFramedAveragePeerClockSkew(50);
|
||||
|
||||
if (peerClockSkew != null) {
|
||||
|
||||
@ -88,9 +89,9 @@ public class RouterClock extends Clock {
|
||||
getLog().info("Updating clock offset to " + offsetMs + "ms from " + _offset + "ms");
|
||||
|
||||
if (!_statCreated)
|
||||
_context.statManager().createRateStat("clock.skew", "How far is the already adjusted clock being skewed?", "Clock", new long[] { 10*60*1000, 3*60*60*1000, 24*60*60*60 });
|
||||
_contextRC.statManager().createRateStat("clock.skew", "How far is the already adjusted clock being skewed?", "Clock", new long[] { 10*60*1000, 3*60*60*1000, 24*60*60*60 });
|
||||
_statCreated = true;
|
||||
_context.statManager().addRateData("clock.skew", delta, 0);
|
||||
_contextRC.statManager().addRateData("clock.skew", delta, 0);
|
||||
} else {
|
||||
getLog().log(Log.INFO, "Initializing clock offset to " + offsetMs + "ms from " + _offset + "ms");
|
||||
}
|
||||
|
@ -58,7 +58,7 @@ public class RouterContext extends I2PAppContext {
|
||||
private MessageValidator _messageValidator;
|
||||
private MessageStateMonitor _messageStateMonitor;
|
||||
private RouterThrottle _throttle;
|
||||
private RouterClock _clock;
|
||||
private RouterClock _clockX; // LINT field hides another field, hope rename won't break anything.
|
||||
private Calculator _integrationCalc;
|
||||
private Calculator _speedCalc;
|
||||
private Calculator _capacityCalc;
|
||||
@ -262,6 +262,7 @@ public class RouterContext extends I2PAppContext {
|
||||
/** how do we rank the capacity of profiles? */
|
||||
public Calculator capacityCalculator() { return _capacityCalc; }
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer(512);
|
||||
buf.append("RouterContext: ").append(super.toString()).append('\n');
|
||||
@ -294,6 +295,7 @@ public class RouterContext extends I2PAppContext {
|
||||
* I2PAppContext says.
|
||||
*
|
||||
*/
|
||||
@Override
|
||||
public String getProperty(String propName) {
|
||||
if (_router != null) {
|
||||
String val = _router.getConfigSetting(propName);
|
||||
@ -306,6 +308,7 @@ public class RouterContext extends I2PAppContext {
|
||||
* I2PAppContext says.
|
||||
*
|
||||
*/
|
||||
@Override
|
||||
public String getProperty(String propName, String defaultVal) {
|
||||
if (_router != null) {
|
||||
String val = _router.getConfigSetting(propName);
|
||||
@ -317,6 +320,7 @@ public class RouterContext extends I2PAppContext {
|
||||
/**
|
||||
* Return an int with an int default
|
||||
*/
|
||||
@Override
|
||||
public int getProperty(String propName, int defaultVal) {
|
||||
if (_router != null) {
|
||||
String val = _router.getConfigSetting(propName);
|
||||
@ -339,14 +343,16 @@ public class RouterContext extends I2PAppContext {
|
||||
* that it triggers initializeClock() of which we definitely
|
||||
* need the local version to run.
|
||||
*/
|
||||
@Override
|
||||
public Clock clock() {
|
||||
if (!_clockInitialized) initializeClock();
|
||||
return _clock;
|
||||
return _clockX;
|
||||
}
|
||||
@Override
|
||||
protected void initializeClock() {
|
||||
synchronized (this) {
|
||||
if (_clock == null)
|
||||
_clock = new RouterClock(this);
|
||||
if (_clockX == null)
|
||||
_clockX = new RouterClock(this);
|
||||
_clockInitialized = true;
|
||||
}
|
||||
}
|
||||
|
@ -19,6 +19,7 @@ class RouterDoSThrottle extends RouterThrottleImpl {
|
||||
private static final long LOOKUP_THROTTLE_PERIOD = 10*1000;
|
||||
private static final long LOOKUP_THROTTLE_MAX = 20;
|
||||
|
||||
@Override
|
||||
public boolean acceptNetDbLookupRequest(Hash key) {
|
||||
// if we were going to refuse it anyway, drop it
|
||||
boolean shouldAccept = super.acceptNetDbLookupRequest(key);
|
||||
|
@ -17,7 +17,7 @@ import net.i2p.CoreVersion;
|
||||
public class RouterVersion {
|
||||
public final static String ID = "$Revision: 1.548 $ $Date: 2008-06-07 23:00:00 $";
|
||||
public final static String VERSION = CoreVersion.VERSION;
|
||||
public final static long BUILD = 0;
|
||||
public final static long BUILD = 1;
|
||||
public static void main(String args[]) {
|
||||
System.out.println("I2P Router version: " + VERSION + "-" + BUILD);
|
||||
System.out.println("Router ID: " + RouterVersion.ID);
|
||||
|
@ -13,8 +13,6 @@ import java.io.Writer;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Comparator;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -8,8 +8,6 @@ package net.i2p.router;
|
||||
*
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Writer;
|
||||
import java.util.Set;
|
||||
|
||||
import net.i2p.data.Destination;
|
||||
|
@ -185,6 +185,7 @@ public class TunnelPoolSettings {
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer();
|
||||
Properties p = new Properties();
|
||||
|
@ -105,6 +105,7 @@ public class TunnelSettings extends DataStructureImpl {
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int rv = 0;
|
||||
rv += _includeDummy ? 100 : 0;
|
||||
@ -118,6 +119,7 @@ public class TunnelSettings extends DataStructureImpl {
|
||||
return rv;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if ( (obj != null) && (obj instanceof TunnelSettings) ) {
|
||||
TunnelSettings settings = (TunnelSettings)obj;
|
||||
|
@ -13,7 +13,6 @@ import java.io.OutputStream;
|
||||
import java.net.Socket;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
@ -73,7 +72,7 @@ public class ClientConnectionRunner {
|
||||
* This contains the last 10 MessageIds that have had their (non-ack) status
|
||||
* delivered to the client (so that we can be sure only to update when necessary)
|
||||
*/
|
||||
private List _alreadyProcessed;
|
||||
private final List _alreadyProcessed;
|
||||
private ClientWriterRunner _writer;
|
||||
private Hash _destHashCache;
|
||||
/** are we, uh, dead */
|
||||
@ -111,7 +110,7 @@ public class ClientConnectionRunner {
|
||||
t.setDaemon(true);
|
||||
t.setPriority(I2PThread.MAX_PRIORITY);
|
||||
t.start();
|
||||
_out = _socket.getOutputStream();
|
||||
_out = _socket.getOutputStream(); // LINT -- OWCH! needs a better way so it can be final.
|
||||
_reader.startReading();
|
||||
} catch (IOException ioe) {
|
||||
_log.error("Error starting up the runner", ioe);
|
||||
@ -412,7 +411,7 @@ public class ClientConnectionRunner {
|
||||
}
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("after writeMessage("+ msg.getClass().getName() + "): "
|
||||
+ (_context.clock().now()-before) + "ms");;
|
||||
+ (_context.clock().now()-before) + "ms");
|
||||
} catch (I2CPMessageException ime) {
|
||||
_log.error("Message exception sending I2CP message: " + ime);
|
||||
stopRunning();
|
||||
@ -464,7 +463,7 @@ public class ClientConnectionRunner {
|
||||
// this *should* be mod 65536, but UnsignedInteger is still b0rked. FIXME
|
||||
private final static int MAX_MESSAGE_ID = 32767;
|
||||
private static volatile int _messageId = RandomSource.getInstance().nextInt(MAX_MESSAGE_ID); // messageId counter
|
||||
private static Object _messageIdLock = new Object();
|
||||
private final static Object _messageIdLock = new Object();
|
||||
|
||||
static int getNextMessageId() {
|
||||
synchronized (_messageIdLock) {
|
||||
|
@ -41,8 +41,8 @@ import net.i2p.util.Log;
|
||||
public class ClientManager {
|
||||
private Log _log;
|
||||
private ClientListenerRunner _listener;
|
||||
private HashMap _runners; // Destination --> ClientConnectionRunner
|
||||
private Set _pendingRunners; // ClientConnectionRunner for clients w/out a Dest yet
|
||||
private final HashMap _runners; // Destination --> ClientConnectionRunner
|
||||
private final Set _pendingRunners; // ClientConnectionRunner for clients w/out a Dest yet
|
||||
private RouterContext _ctx;
|
||||
|
||||
/** ms to wait before rechecking for inbound messages to deliver to clients */
|
||||
|
@ -74,9 +74,11 @@ public class ClientManagerFacadeImpl extends ClientManagerFacade {
|
||||
startup();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isAlive() { return _manager != null && _manager.isAlive(); }
|
||||
|
||||
private static final long MAX_TIME_TO_REBUILD = 10*60*1000;
|
||||
@Override
|
||||
public boolean verifyClientLiveliness() {
|
||||
if (_manager == null) return true;
|
||||
boolean lively = true;
|
||||
@ -167,6 +169,7 @@ public class ClientManagerFacadeImpl extends ClientManagerFacade {
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean shouldPublishLeaseSet(Hash destinationHash) { return _manager.shouldPublishLeaseSet(destinationHash); }
|
||||
|
||||
public void messageDeliveryStatusUpdate(Destination fromDest, MessageId id, boolean delivered) {
|
||||
@ -196,6 +199,7 @@ public class ClientManagerFacadeImpl extends ClientManagerFacade {
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void renderStatusHTML(Writer out) throws IOException {
|
||||
if (_manager != null)
|
||||
_manager.renderStatusHTML(out);
|
||||
@ -206,6 +210,7 @@ public class ClientManagerFacadeImpl extends ClientManagerFacade {
|
||||
*
|
||||
* @return set of Destination objects
|
||||
*/
|
||||
@Override
|
||||
public Set listClients() {
|
||||
if (_manager != null)
|
||||
return _manager.listClients();
|
||||
|
@ -42,6 +42,7 @@ public class CloveSet {
|
||||
public long getExpiration() { return _expiration; }
|
||||
public void setExpiration(long expiration) { _expiration = expiration; }
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer(128);
|
||||
buf.append("{");
|
||||
|
@ -156,6 +156,7 @@ public class GarlicConfig {
|
||||
|
||||
protected String getSubData() { return ""; }
|
||||
private final static String NL = System.getProperty("line.separator");
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer();
|
||||
buf.append("<garlicConfig>").append(NL);
|
||||
|
@ -9,8 +9,6 @@ package net.i2p.router.message;
|
||||
*/
|
||||
|
||||
import java.util.Date;
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
|
||||
import net.i2p.data.Certificate;
|
||||
import net.i2p.data.DataFormatException;
|
||||
|
@ -106,6 +106,7 @@ public class HandleGarlicMessageJob extends JobImpl implements GarlicMessageRece
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void dropped() {
|
||||
getContext().messageHistory().messageProcessingError(_message.getUniqueId(),
|
||||
_message.getClass().getName(),
|
||||
|
@ -20,7 +20,7 @@ class OutboundClientMessageStatus {
|
||||
private ClientMessage _msg;
|
||||
private PayloadGarlicConfig _clove;
|
||||
private LeaseSet _leaseSet;
|
||||
private Set _sent;
|
||||
private final Set _sent;
|
||||
private int _numLookups;
|
||||
private boolean _success;
|
||||
private boolean _failure;
|
||||
@ -114,6 +114,7 @@ class OutboundClientMessageStatus {
|
||||
public Hash getGateway() { return _gateway; }
|
||||
public TunnelId getTunnel() { return _tunnel; }
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int rv = 0;
|
||||
if (_gateway != null)
|
||||
@ -123,6 +124,7 @@ class OutboundClientMessageStatus {
|
||||
return rv;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (o == null) return false;
|
||||
if (o.getClass() != Tunnel.class) return false;
|
||||
|
@ -33,6 +33,7 @@ public class PayloadGarlicConfig extends GarlicConfig {
|
||||
}
|
||||
public I2NPMessage getPayload() { return _payload; }
|
||||
|
||||
@Override
|
||||
protected String getSubData() {
|
||||
StringBuffer buf = new StringBuffer();
|
||||
buf.append("<payloadMessage>").append(_payload).append("</payloadMessage>");
|
||||
|
@ -262,6 +262,7 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
|
||||
|
||||
public String getName() { return "Handle Database Lookup Message"; }
|
||||
|
||||
@Override
|
||||
public void dropped() {
|
||||
getContext().messageHistory().messageProcessingError(_message.getUniqueId(),
|
||||
_message.getClass().getName(),
|
||||
|
@ -132,6 +132,7 @@ public class HandleDatabaseStoreMessageJob extends JobImpl {
|
||||
|
||||
public String getName() { return "Handle Database Store Message"; }
|
||||
|
||||
@Override
|
||||
public void dropped() {
|
||||
getContext().messageHistory().messageProcessingError(_message.getUniqueId(), _message.getClass().getName(), "Dropped due to overload");
|
||||
}
|
||||
|
@ -11,7 +11,6 @@ package net.i2p.router.networkdb.kademlia;
|
||||
import java.util.List;
|
||||
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.RouterInfo;
|
||||
import net.i2p.data.TunnelId;
|
||||
import net.i2p.data.i2np.DatabaseLookupMessage;
|
||||
import net.i2p.router.RouterContext;
|
||||
@ -41,11 +40,11 @@ class ExploreJob extends SearchJob {
|
||||
/** only send the closest "dont tell me about" refs...
|
||||
* Override to make this bigger because we want to include both the
|
||||
* floodfills and the previously-queried peers */
|
||||
static final int MAX_CLOSEST = 20;
|
||||
static final int MAX_CLOSEST = 20; // LINT -- field hides another field, this isn't an override.
|
||||
|
||||
/** Override to make this shorter, since we don't sort out the
|
||||
* unresponsive ff peers like we do in FloodOnlySearchJob */
|
||||
static final int PER_FLOODFILL_PEER_TIMEOUT = 5*1000;
|
||||
static final int PER_FLOODFILL_PEER_TIMEOUT = 5*1000; // LINT -- field hides another field, this isn't an override.
|
||||
|
||||
/**
|
||||
* Create a new search for the routingKey specified
|
||||
@ -78,6 +77,7 @@ class ExploreJob extends SearchJob {
|
||||
* @param replyGateway gateway for the reply tunnel
|
||||
* @param expiration when the search should stop
|
||||
*/
|
||||
@Override
|
||||
protected DatabaseLookupMessage buildMessage(TunnelId replyTunnelId, Hash replyGateway, long expiration) {
|
||||
DatabaseLookupMessage msg = new DatabaseLookupMessage(getContext(), true);
|
||||
msg.setSearchKey(getState().getTarget());
|
||||
@ -112,11 +112,13 @@ class ExploreJob extends SearchJob {
|
||||
* replies sent back to us directly). This uses the similar overrides as the other buildMessage above.
|
||||
*
|
||||
*/
|
||||
@Override
|
||||
protected DatabaseLookupMessage buildMessage(long expiration) {
|
||||
return buildMessage(null, getContext().router().getRouterInfo().getIdentity().getHash(), expiration);
|
||||
}
|
||||
|
||||
/** max # of concurrent searches */
|
||||
@Override
|
||||
protected int getBredth() { return EXPLORE_BREDTH; }
|
||||
|
||||
|
||||
@ -125,6 +127,7 @@ class ExploreJob extends SearchJob {
|
||||
* number of peers that we didn't know about before.
|
||||
*
|
||||
*/
|
||||
@Override
|
||||
protected void newPeersFound(int numNewPeers) {
|
||||
// who cares about how many new peers. well, maybe we do. but for now,
|
||||
// we'll do the simplest thing that could possibly work.
|
||||
@ -139,5 +142,6 @@ class ExploreJob extends SearchJob {
|
||||
*
|
||||
*/
|
||||
|
||||
@Override
|
||||
public String getName() { return "Kademlia NetDb Explore"; }
|
||||
}
|
||||
|
@ -3,7 +3,6 @@ package net.i2p.router.networkdb.kademlia;
|
||||
import net.i2p.data.i2np.DatabaseSearchReplyMessage;
|
||||
import net.i2p.data.i2np.DatabaseStoreMessage;
|
||||
import net.i2p.data.i2np.I2NPMessage;
|
||||
import net.i2p.router.JobImpl;
|
||||
import net.i2p.router.MessageSelector;
|
||||
import net.i2p.router.RouterContext;
|
||||
import net.i2p.util.Log;
|
||||
|
@ -8,11 +8,7 @@ import java.util.List;
|
||||
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.i2np.DatabaseLookupMessage;
|
||||
import net.i2p.data.i2np.DatabaseSearchReplyMessage;
|
||||
import net.i2p.data.i2np.DatabaseStoreMessage;
|
||||
import net.i2p.data.i2np.I2NPMessage;
|
||||
import net.i2p.router.Job;
|
||||
import net.i2p.router.JobImpl;
|
||||
import net.i2p.router.MessageSelector;
|
||||
import net.i2p.router.OutNetMessage;
|
||||
import net.i2p.router.ReplyJob;
|
||||
@ -44,8 +40,8 @@ class FloodOnlySearchJob extends FloodSearchJob {
|
||||
protected Log _log;
|
||||
private FloodfillNetworkDatabaseFacade _facade;
|
||||
protected Hash _key;
|
||||
private List _onFind;
|
||||
private List _onFailed;
|
||||
private final List _onFind;
|
||||
private final List _onFailed;
|
||||
private long _expiration;
|
||||
protected int _timeoutMs;
|
||||
private long _origExpiration;
|
||||
@ -54,9 +50,9 @@ class FloodOnlySearchJob extends FloodSearchJob {
|
||||
private volatile boolean _dead;
|
||||
private long _created;
|
||||
private boolean _shouldProcessDSRM;
|
||||
private HashSet _unheardFrom;
|
||||
private final HashSet _unheardFrom;
|
||||
|
||||
protected List _out;
|
||||
protected final List _out;
|
||||
protected MessageSelector _replySelector;
|
||||
protected ReplyJob _onReply;
|
||||
protected Job _onTimeout;
|
||||
@ -83,6 +79,7 @@ class FloodOnlySearchJob extends FloodSearchJob {
|
||||
_created = System.currentTimeMillis();
|
||||
_shouldProcessDSRM = false;
|
||||
}
|
||||
@Override
|
||||
void addDeferred(Job onFind, Job onFailed, long timeoutMs, boolean isLease) {
|
||||
if (_dead) {
|
||||
getContext().jobQueue().addJob(onFailed);
|
||||
@ -91,10 +88,12 @@ class FloodOnlySearchJob extends FloodSearchJob {
|
||||
if (onFailed != null) synchronized (_onFailed) { _onFailed.add(onFailed); }
|
||||
}
|
||||
}
|
||||
@Override
|
||||
public long getExpiration() { return _expiration; }
|
||||
public long getCreated() { return _created; }
|
||||
public boolean shouldProcessDSRM() { return _shouldProcessDSRM; }
|
||||
private static final int CONCURRENT_SEARCHES = 2;
|
||||
@Override
|
||||
public void runJob() {
|
||||
// pick some floodfill peers and send out the searches
|
||||
List floodfillPeers = _facade.getFloodfillPeers();
|
||||
@ -184,10 +183,14 @@ class FloodOnlySearchJob extends FloodSearchJob {
|
||||
failed();
|
||||
}
|
||||
}
|
||||
@Override
|
||||
public String getName() { return "NetDb flood search (phase 1)"; }
|
||||
|
||||
@Override
|
||||
Hash getKey() { return _key; }
|
||||
@Override
|
||||
void decrementRemaining() { if (_lookupsRemaining > 0) _lookupsRemaining--; }
|
||||
@Override
|
||||
int getLookupsRemaining() { return _lookupsRemaining; }
|
||||
/** Note that we heard from the peer */
|
||||
void decrementRemaining(Hash peer) {
|
||||
@ -197,6 +200,7 @@ class FloodOnlySearchJob extends FloodSearchJob {
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
void failed() {
|
||||
synchronized (this) {
|
||||
if (_dead) return;
|
||||
@ -224,6 +228,7 @@ class FloodOnlySearchJob extends FloodSearchJob {
|
||||
}
|
||||
}
|
||||
}
|
||||
@Override
|
||||
void success() {
|
||||
synchronized (this) {
|
||||
if (_dead) return;
|
||||
|
@ -31,7 +31,7 @@ import net.i2p.util.Log;
|
||||
*/
|
||||
public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacade {
|
||||
public static final char CAPACITY_FLOODFILL = 'f';
|
||||
private Map _activeFloodQueries;
|
||||
private final Map _activeFloodQueries;
|
||||
private boolean _floodfillEnabled;
|
||||
/** for testing, see isFloodfill() below */
|
||||
private static String _alwaysQuery;
|
||||
@ -55,11 +55,13 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
|
||||
_context.statManager().createRateStat("netDb.republishQuantity", "How many peers do we need to send a found leaseSet to?", "NetworkDatabase", new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
}
|
||||
|
||||
@Override
|
||||
public void startup() {
|
||||
super.startup();
|
||||
_context.jobQueue().addJob(new FloodfillMonitorJob(_context, this));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void createHandlers() {
|
||||
_context.inNetMessagePool().registerHandlerJobBuilder(DatabaseLookupMessage.MESSAGE_TYPE, new FloodfillDatabaseLookupMessageHandler(_context));
|
||||
_context.inNetMessagePool().registerHandlerJobBuilder(DatabaseStoreMessage.MESSAGE_TYPE, new FloodfillDatabaseStoreMessageHandler(_context, this));
|
||||
@ -70,6 +72,7 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
|
||||
/**
|
||||
* @throws IllegalArgumentException if the local router info is invalid
|
||||
*/
|
||||
@Override
|
||||
public void publish(RouterInfo localRouterInfo) throws IllegalArgumentException {
|
||||
if (localRouterInfo == null) throw new IllegalArgumentException("wtf, null localRouterInfo?");
|
||||
if (_context.router().isHidden()) return; // DE-nied!
|
||||
@ -77,6 +80,7 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
|
||||
sendStore(localRouterInfo.getIdentity().calculateHash(), localRouterInfo, null, null, PUBLISH_TIMEOUT, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void sendStore(Hash key, DataStructure ds, Job onSuccess, Job onFailure, long sendTimeout, Set toIgnore) {
|
||||
// if we are a part of the floodfill netDb, don't send out our own leaseSets as part
|
||||
// of the flooding - instead, send them to a random floodfill peer so *they* can flood 'em out.
|
||||
@ -131,6 +135,7 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
|
||||
private static final int FLOOD_PRIORITY = 200;
|
||||
private static final int FLOOD_TIMEOUT = 30*1000;
|
||||
|
||||
@Override
|
||||
protected PeerSelector createPeerSelector() { return new FloodfillPeerSelector(_context); }
|
||||
|
||||
public void setFloodfillEnabled(boolean yes) { _floodfillEnabled = yes; }
|
||||
@ -183,6 +188,7 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
|
||||
* without any match)
|
||||
*
|
||||
*/
|
||||
@Override
|
||||
SearchJob search(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs, boolean isLease) {
|
||||
//if (true) return super.search(key, onFindJob, onFailedLookupJob, timeoutMs, isLease);
|
||||
if (key == null) throw new IllegalArgumentException("searchin for nothin, eh?");
|
||||
@ -282,6 +288,7 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
|
||||
* Search for a newer router info, drop it from the db if the search fails,
|
||||
* unless just started up or have bigger problems.
|
||||
*/
|
||||
@Override
|
||||
protected void lookupBeforeDropping(Hash peer, RouterInfo info) {
|
||||
// following are some special situations, we don't want to
|
||||
// drop the peer in these cases
|
||||
@ -356,8 +363,8 @@ class FloodSearchJob extends JobImpl {
|
||||
private Log _log;
|
||||
private FloodfillNetworkDatabaseFacade _facade;
|
||||
private Hash _key;
|
||||
private List _onFind;
|
||||
private List _onFailed;
|
||||
private final List _onFind;
|
||||
private final List _onFailed;
|
||||
private long _expiration;
|
||||
private int _timeoutMs;
|
||||
private long _origExpiration;
|
||||
|
@ -31,10 +31,12 @@ class FloodfillPeerSelector extends PeerSelector {
|
||||
*
|
||||
* @return List of Hash for the peers selected
|
||||
*/
|
||||
@Override
|
||||
public List selectMostReliablePeers(Hash key, int maxNumRouters, Set peersToIgnore, KBucketSet kbuckets) {
|
||||
return selectNearestExplicitThin(key, maxNumRouters, peersToIgnore, kbuckets, true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List selectNearestExplicitThin(Hash key, int maxNumRouters, Set peersToIgnore, KBucketSet kbuckets) {
|
||||
return selectNearestExplicitThin(key, maxNumRouters, peersToIgnore, kbuckets, false);
|
||||
}
|
||||
|
@ -34,17 +34,21 @@ class FloodfillStoreJob extends StoreJob {
|
||||
_facade = facade;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int getParallelization() { return 1; }
|
||||
@Override
|
||||
protected int getRedundancy() { return 1; }
|
||||
|
||||
/**
|
||||
* Send was totally successful
|
||||
*/
|
||||
@Override
|
||||
protected void succeed() {
|
||||
super.succeed();
|
||||
if (_state != null)
|
||||
getContext().jobQueue().addJob(new FloodfillVerifyStoreJob(getContext(), _state.getTarget(), _facade));
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() { return "Floodfill netDb store"; }
|
||||
}
|
@ -30,6 +30,7 @@ public class HandleFloodfillDatabaseLookupMessageJob extends HandleDatabaseLooku
|
||||
super(ctx, receivedMessage, from, fromHash);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean answerAllQueries() {
|
||||
if (!FloodfillNetworkDatabaseFacade.floodfillEnabled(getContext())) return false;
|
||||
return FloodfillNetworkDatabaseFacade.isFloodfill(getContext().router().getRouterInfo());
|
||||
@ -40,6 +41,7 @@ public class HandleFloodfillDatabaseLookupMessageJob extends HandleDatabaseLooku
|
||||
* This gets the word out to routers that we are no longer floodfill, so they
|
||||
* will stop bugging us.
|
||||
*/
|
||||
@Override
|
||||
protected void sendClosest(Hash key, Set routerInfoSet, Hash toPeer, TunnelId replyTunnel) {
|
||||
super.sendClosest(key, routerInfoSet, toPeer, replyTunnel);
|
||||
|
||||
|
@ -184,6 +184,7 @@ public class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
|
||||
|
||||
public String getName() { return "Handle Database Store Message"; }
|
||||
|
||||
@Override
|
||||
public void dropped() {
|
||||
getContext().messageHistory().messageProcessingError(_message.getUniqueId(), _message.getClass().getName(), "Dropped due to overload");
|
||||
}
|
||||
|
@ -26,7 +26,7 @@ import net.i2p.util.RandomSource;
|
||||
class KBucketImpl implements KBucket {
|
||||
private Log _log;
|
||||
/** set of Hash objects for the peers in the kbucket */
|
||||
private List _entries;
|
||||
private final List _entries;
|
||||
/** we center the kbucket set on the given hash, and derive distances from this */
|
||||
private Hash _local;
|
||||
/** include if any bits equal or higher to this bit (in big endian order) */
|
||||
@ -328,6 +328,7 @@ class KBucketImpl implements KBucket {
|
||||
return BigInteger.ZERO.setBit(_begin);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer(1024);
|
||||
buf.append("KBucketImpl: ");
|
||||
|
@ -138,6 +138,7 @@ class KBucketSet {
|
||||
_log.info(toString());
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
BigInteger us = new BigInteger(1, _us.getData());
|
||||
StringBuffer buf = new StringBuffer(1024);
|
||||
|
@ -53,7 +53,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
private DataStore _ds; // hash to DataStructure mapping, persisted when necessary
|
||||
/** where the data store is pushing the data */
|
||||
private String _dbDir;
|
||||
private Set _exploreKeys; // set of Hash objects that we should search on (to fill up a bucket, not to get data)
|
||||
private final Set _exploreKeys = new HashSet(64); // set of Hash objects that we should search on (to fill up a bucket, not to get data)
|
||||
private boolean _initialized;
|
||||
/** Clock independent time of when we started up */
|
||||
private long _started;
|
||||
@ -69,7 +69,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
* removed when the job decides to stop running.
|
||||
*
|
||||
*/
|
||||
private Map _publishingLeaseSets;
|
||||
private final Map _publishingLeaseSets;
|
||||
|
||||
/**
|
||||
* Hash of the key currently being searched for, pointing the SearchJob that
|
||||
@ -77,7 +77,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
* added on to the list of jobs fired on success/failure
|
||||
*
|
||||
*/
|
||||
private Map _activeRequests;
|
||||
private final Map _activeRequests;
|
||||
|
||||
/**
|
||||
* The search for the given key is no longer active
|
||||
@ -176,7 +176,8 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
_initialized = false;
|
||||
_kb = null;
|
||||
_ds = null;
|
||||
_exploreKeys = null;
|
||||
_exploreKeys.clear(); // hope this doesn't cause an explosion, it shouldn't.
|
||||
// _exploreKeys = null;
|
||||
}
|
||||
|
||||
public void restart() {
|
||||
@ -218,7 +219,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
_kb = new KBucketSet(_context, ri.getIdentity().getHash());
|
||||
_ds = new PersistentDataStore(_context, dbDir, this);
|
||||
//_ds = new TransientDataStore();
|
||||
_exploreKeys = new HashSet(64);
|
||||
// _exploreKeys = new HashSet(64);
|
||||
_dbDir = dbDir;
|
||||
|
||||
createHandlers();
|
||||
@ -331,6 +332,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
return rv;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getKnownRouters() {
|
||||
if (_kb == null) return 0;
|
||||
CountRouters count = new CountRouters();
|
||||
@ -349,11 +351,13 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getKnownLeaseSets() {
|
||||
if (_ds == null) return 0;
|
||||
return _ds.countLeaseSets();
|
||||
}
|
||||
|
||||
|
||||
/* aparently, not used?? should be public if used elsewhere. */
|
||||
private class CountLeaseSets implements SelectionCollector {
|
||||
private int _count;
|
||||
public int size() { return _count; }
|
||||
@ -364,7 +368,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
_count++;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* This is fast and doesn't use synchronization,
|
||||
* but it includes both routerinfos and leasesets.
|
||||
@ -868,6 +872,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void renderRouterInfoHTML(Writer out, String routerPrefix) throws IOException {
|
||||
StringBuffer buf = new StringBuffer(4*1024);
|
||||
buf.append("<h2>Network Database RouterInfo Lookup</h2>\n");
|
||||
@ -895,6 +900,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
renderStatusHTML(out, true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void renderStatusHTML(Writer out, boolean full) throws IOException {
|
||||
int size = getKnownRouters() * 512;
|
||||
if (full)
|
||||
|
@ -42,7 +42,7 @@ public class PeerSelector {
|
||||
*
|
||||
* @return ordered list of Hash objects
|
||||
*/
|
||||
public List selectMostReliablePeers(Hash key, int numClosest, Set alreadyChecked, KBucketSet kbuckets) {
|
||||
public List selectMostReliablePeers(Hash key, int numClosest, Set alreadyChecked, KBucketSet kbuckets) {// LINT -- Exporting non-public type through public API
|
||||
// get the peers closest to the key
|
||||
List nearest = selectNearestExplicit(key, numClosest, alreadyChecked, kbuckets);
|
||||
return nearest;
|
||||
@ -55,7 +55,7 @@ public class PeerSelector {
|
||||
*
|
||||
* @return List of Hash for the peers selected, ordered by bucket (but intra bucket order is not defined)
|
||||
*/
|
||||
public List selectNearestExplicit(Hash key, int maxNumRouters, Set peersToIgnore, KBucketSet kbuckets) {
|
||||
public List selectNearestExplicit(Hash key, int maxNumRouters, Set peersToIgnore, KBucketSet kbuckets) {// LINT -- Exporting non-public type through public API
|
||||
if (true)
|
||||
return selectNearestExplicitThin(key, maxNumRouters, peersToIgnore, kbuckets);
|
||||
|
||||
@ -91,7 +91,7 @@ public class PeerSelector {
|
||||
*
|
||||
* @return List of Hash for the peers selected, ordered by bucket (but intra bucket order is not defined)
|
||||
*/
|
||||
public List selectNearestExplicitThin(Hash key, int maxNumRouters, Set peersToIgnore, KBucketSet kbuckets) {
|
||||
public List selectNearestExplicitThin(Hash key, int maxNumRouters, Set peersToIgnore, KBucketSet kbuckets) { // LINT -- Exporting non-public type through public API
|
||||
if (peersToIgnore == null)
|
||||
peersToIgnore = new HashSet(1);
|
||||
peersToIgnore.add(_context.router().getRouterInfo().getIdentity().getHash());
|
||||
@ -195,7 +195,7 @@ public class PeerSelector {
|
||||
*
|
||||
* @return List of Hash for the peers selected, ordered by bucket (but intra bucket order is not defined)
|
||||
*/
|
||||
public List selectNearest(Hash key, int maxNumRouters, Set peersToIgnore, KBucketSet kbuckets) {
|
||||
public List selectNearest(Hash key, int maxNumRouters, Set peersToIgnore, KBucketSet kbuckets) { // LINT -- Exporting non-public type through public API
|
||||
// sure, this may not be exactly correct per kademlia (peers on the border of a kbucket in strict kademlia
|
||||
// would behave differently) but I can see no reason to keep around an /additional/ more complicated algorithm.
|
||||
// later if/when selectNearestExplicit gets costly, we may revisit this (since kbuckets let us cache the distance()
|
||||
|
@ -56,15 +56,18 @@ class PersistentDataStore extends TransientDataStore {
|
||||
writer.start();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void restart() {
|
||||
_dbDir = _facade.getDbDir();
|
||||
}
|
||||
|
||||
@Override
|
||||
public DataStructure remove(Hash key) {
|
||||
_context.jobQueue().addJob(new RemoveJob(key));
|
||||
return super.remove(key);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void put(Hash key, DataStructure data) {
|
||||
if ( (data == null) || (key == null) ) return;
|
||||
super.put(key, data);
|
||||
@ -105,7 +108,7 @@ class PersistentDataStore extends TransientDataStore {
|
||||
* Queue up writes, write up to 600 files every 10 minutes
|
||||
*/
|
||||
private class Writer implements Runnable {
|
||||
private Map _keys;
|
||||
private final Map _keys;
|
||||
private List _keyOrder;
|
||||
public Writer() {
|
||||
_keys = new HashMap(64);
|
||||
|
@ -23,7 +23,7 @@ import net.i2p.util.Log;
|
||||
public class RepublishLeaseSetJob extends JobImpl {
|
||||
private Log _log;
|
||||
private final static long REPUBLISH_LEASESET_DELAY = 5*60*1000;
|
||||
public final static long REPUBLISH_LEASESET_TIMEOUT = 60*1000;
|
||||
public final /* static */ long REPUBLISH_LEASESET_TIMEOUT = 60*1000;
|
||||
private Hash _dest;
|
||||
private KademliaNetworkDatabaseFacade _facade;
|
||||
/** this is actually last attempted publish */
|
||||
|
@ -47,7 +47,7 @@ class SearchJob extends JobImpl {
|
||||
private boolean _isLease;
|
||||
private Job _pendingRequeueJob;
|
||||
private PeerSelector _peerSelector;
|
||||
private List _deferredSearches;
|
||||
private final List _deferredSearches;
|
||||
private boolean _deferredCleared;
|
||||
private long _startedOn;
|
||||
private boolean _floodfillPeersExhausted;
|
||||
@ -780,6 +780,7 @@ class SearchJob extends JobImpl {
|
||||
|
||||
public String getName() { return "Kademlia NetDb Search"; }
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return super.toString() + " started "
|
||||
+ DataHelper.formatDuration((getContext().clock().now() - _startedOn)) + " ago";
|
||||
|
@ -36,6 +36,7 @@ class SearchMessageSelector implements MessageSelector {
|
||||
_log.debug("[" + _id + "] Created: " + toString());
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "Search selector [" + _id + "] looking for a reply from " + _peer
|
||||
+ " with regards to " + _state.getTarget();
|
||||
|
@ -17,12 +17,12 @@ import net.i2p.router.RouterContext;
|
||||
*/
|
||||
class SearchState {
|
||||
private RouterContext _context;
|
||||
private HashSet _pendingPeers;
|
||||
private final HashSet _pendingPeers;
|
||||
private HashMap _pendingPeerTimes;
|
||||
private HashSet _attemptedPeers;
|
||||
private HashSet _failedPeers;
|
||||
private HashSet _successfulPeers;
|
||||
private HashSet _repliedPeers;
|
||||
private final HashSet _attemptedPeers;
|
||||
private final HashSet _failedPeers;
|
||||
private final HashSet _successfulPeers;
|
||||
private final HashSet _repliedPeers;
|
||||
private Hash _searchKey;
|
||||
private volatile long _completed;
|
||||
private volatile long _started;
|
||||
@ -166,6 +166,7 @@ class SearchState {
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer(256);
|
||||
buf.append("Searching for ").append(_searchKey);
|
||||
|
@ -2,7 +2,6 @@ package net.i2p.router.networkdb.kademlia;
|
||||
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.i2np.DatabaseLookupMessage;
|
||||
import net.i2p.router.JobImpl;
|
||||
import net.i2p.router.RouterContext;
|
||||
import net.i2p.router.OutNetMessage;
|
||||
import net.i2p.router.TunnelInfo;
|
||||
@ -23,8 +22,11 @@ class SingleSearchJob extends FloodOnlySearchJob {
|
||||
super(ctx, null, key, null, null, 5*1000, false);
|
||||
_to = to;
|
||||
}
|
||||
@Override
|
||||
public String getName() { return "NetDb search key from DSRM"; }
|
||||
@Override
|
||||
public boolean shouldProcessDSRM() { return false; } // don't loop
|
||||
@Override
|
||||
public void runJob() {
|
||||
_onm = getContext().messageRegistry().registerPending(_replySelector, _onReply, _onTimeout, _timeoutMs);
|
||||
DatabaseLookupMessage dlm = new DatabaseLookupMessage(getContext(), true);
|
||||
@ -44,8 +46,10 @@ class SingleSearchJob extends FloodOnlySearchJob {
|
||||
getContext().tunnelDispatcher().dispatchOutbound(dlm, outTunnel.getSendTunnelId(0), _to);
|
||||
_lookupsRemaining = 1;
|
||||
}
|
||||
@Override
|
||||
void failed() {
|
||||
getContext().messageRegistry().unregisterPending(_onm);
|
||||
}
|
||||
@Override
|
||||
void success() {}
|
||||
}
|
||||
|
@ -55,6 +55,7 @@ class StoreMessageSelector implements MessageSelector {
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuffer rv = new StringBuffer(64);
|
||||
rv.append("Waiting for netDb confirm from ").append(_peer.toBase64()).append(", found? ");
|
||||
|
@ -15,12 +15,12 @@ class StoreState {
|
||||
private RouterContext _context;
|
||||
private Hash _key;
|
||||
private DataStructure _data;
|
||||
private HashSet _pendingPeers;
|
||||
private final HashSet _pendingPeers;
|
||||
private HashMap _pendingPeerTimes;
|
||||
private HashSet _successfulPeers;
|
||||
private HashSet _successfulExploratoryPeers;
|
||||
private HashSet _failedPeers;
|
||||
private HashSet _attemptedPeers;
|
||||
private final HashSet _successfulPeers;
|
||||
private final HashSet _successfulExploratoryPeers;
|
||||
private final HashSet _failedPeers;
|
||||
private final HashSet _attemptedPeers;
|
||||
private int _completeCount;
|
||||
private volatile long _completed;
|
||||
private volatile long _started;
|
||||
@ -147,6 +147,7 @@ class StoreState {
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer(256);
|
||||
buf.append("Storing ").append(_key);
|
||||
|
@ -10,9 +10,7 @@ package net.i2p.router.networkdb.kademlia;
|
||||
|
||||
import java.util.Date;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
@ -119,14 +117,17 @@ class TransientDataStore implements DataStore {
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return DataHelper.hashCode(_data);
|
||||
}
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if ( (obj == null) || (obj.getClass() != getClass()) ) return false;
|
||||
TransientDataStore ds = (TransientDataStore)obj;
|
||||
return DataHelper.eq(ds._data, _data);
|
||||
}
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer();
|
||||
buf.append("Transient DataStore: ").append(_data.size()).append("\nKeys: ");
|
||||
|
@ -44,6 +44,7 @@ public class CapacityCalculator extends Calculator {
|
||||
/** the calculator estimates over a 1 hour period */
|
||||
private static long ESTIMATE_PERIOD = 60*60*1000;
|
||||
|
||||
@Override
|
||||
public double calc(PeerProfile profile) {
|
||||
RateStat acceptStat = profile.getTunnelCreateResponseTime();
|
||||
RateStat rejectStat = profile.getTunnelHistory().getRejectionRate();
|
||||
|
@ -17,6 +17,7 @@ public class IntegrationCalculator extends Calculator {
|
||||
_log = context.logManager().getLog(IntegrationCalculator.class);
|
||||
}
|
||||
|
||||
@Override
|
||||
public double calc(PeerProfile profile) {
|
||||
// give more weight to recent counts
|
||||
long val = profile.getDbIntroduction().getRate(24*60*60*1000l).getCurrentEventCount();
|
||||
|
@ -37,7 +37,7 @@ class PeerManager {
|
||||
private ProfileOrganizer _organizer;
|
||||
private ProfilePersistenceHelper _persistenceHelper;
|
||||
private List _peersByCapability[];
|
||||
private Map _capabilitiesByPeer;
|
||||
private final Map _capabilitiesByPeer;
|
||||
|
||||
public PeerManager(RouterContext context) {
|
||||
_context = context;
|
||||
|
@ -445,7 +445,9 @@ public class PeerProfile {
|
||||
private boolean calculateIsFailing() { return false; }
|
||||
void setIsFailing(boolean val) { _isFailing = val; }
|
||||
|
||||
@Override
|
||||
public int hashCode() { return (_peer == null ? 0 : _peer.hashCode()); }
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null) return false;
|
||||
if (obj.getClass() != PeerProfile.class) return false;
|
||||
@ -453,6 +455,7 @@ public class PeerProfile {
|
||||
PeerProfile prof = (PeerProfile)obj;
|
||||
return _peer.equals(prof.getPeer());
|
||||
}
|
||||
@Override
|
||||
public String toString() { return "Profile: " + getPeer().toBase64(); }
|
||||
|
||||
/**
|
||||
|
@ -50,7 +50,7 @@ public class PeerTestJob extends JobImpl {
|
||||
/** number of peers to test each round */
|
||||
private int getTestConcurrency() { return 1; }
|
||||
|
||||
public void startTesting(PeerManager manager) {
|
||||
public void startTesting(PeerManager manager) { // LINT -- Exporting non-public type through public API
|
||||
_manager = manager;
|
||||
_keepTesting = true;
|
||||
getContext().jobQueue().addJob(this);
|
||||
@ -222,6 +222,7 @@ public class PeerTestJob extends JobImpl {
|
||||
return false;
|
||||
}
|
||||
public boolean matchFound() { return _matchFound; }
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer(64);
|
||||
buf.append("Test peer ").append(_peer.toBase64().substring(0,4));
|
||||
|
@ -84,7 +84,7 @@ public class ProfileOrganizer {
|
||||
public static final int DEFAULT_MINIMUM_HIGH_CAPACITY_PEERS = 10;
|
||||
|
||||
/** synchronized against this lock when updating the tier that peers are located in (and when fetching them from a peer) */
|
||||
private Object _reorganizeLock = new Object();
|
||||
private final Object _reorganizeLock = new Object();
|
||||
|
||||
/** incredibly weak PRNG, just used for shuffling peers. no need to waste the real PRNG on this */
|
||||
private Random _random = new Random();
|
||||
|
@ -3,10 +3,8 @@ package net.i2p.router.peermanager;
|
||||
import java.io.IOException;
|
||||
import java.io.Writer;
|
||||
import java.text.DecimalFormat;
|
||||
import java.text.DecimalFormatSymbols;
|
||||
import java.util.Comparator;
|
||||
import java.util.Iterator;
|
||||
import java.util.Locale;
|
||||
import java.util.Set;
|
||||
import java.util.TreeSet;
|
||||
|
||||
|
@ -18,6 +18,7 @@ public class SpeedCalculator extends Calculator {
|
||||
public SpeedCalculator(RouterContext context) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public double calc(PeerProfile profile) {
|
||||
// measures 1 minute throughput of individual tunnels
|
||||
double d = (profile.getPeakTunnel1mThroughputKBps()*1024d) + profile.getSpeedBonus();
|
||||
|
@ -41,6 +41,7 @@ public class BandwidthLimitedInputStream extends FilterInputStream {
|
||||
_log = context.logManager().getLog(BandwidthLimitedInputStream.class);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int read() throws IOException {
|
||||
if (_pullFromOutbound)
|
||||
_currentRequest = _context.bandwidthLimiter().requestOutbound(1, _peerSource);
|
||||
@ -56,10 +57,12 @@ public class BandwidthLimitedInputStream extends FilterInputStream {
|
||||
return in.read();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int read(byte dest[]) throws IOException {
|
||||
return read(dest, 0, dest.length);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int read(byte dest[], int off, int len) throws IOException {
|
||||
int read = in.read(dest, off, len);
|
||||
if (read == -1) return -1;
|
||||
@ -84,6 +87,7 @@ public class BandwidthLimitedInputStream extends FilterInputStream {
|
||||
}
|
||||
return read;
|
||||
}
|
||||
@Override
|
||||
public long skip(long numBytes) throws IOException {
|
||||
long skip = in.skip(numBytes);
|
||||
|
||||
@ -105,6 +109,7 @@ public class BandwidthLimitedInputStream extends FilterInputStream {
|
||||
return skip;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
synchronized (this) {
|
||||
if (_currentRequest != null)
|
||||
|
@ -37,6 +37,7 @@ public class BandwidthLimitedOutputStream extends FilterOutputStream {
|
||||
|
||||
public FIFOBandwidthLimiter.Request getCurrentRequest() { return _currentRequest; }
|
||||
|
||||
@Override
|
||||
public void write(int val) throws IOException {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Writing a single byte!", new Exception("Single byte from..."));
|
||||
@ -49,9 +50,11 @@ public class BandwidthLimitedOutputStream extends FilterOutputStream {
|
||||
_log.warn("Waiting to write a byte took too long [" + waited + "ms");
|
||||
out.write(val);
|
||||
}
|
||||
@Override
|
||||
public void write(byte src[]) throws IOException {
|
||||
write(src, 0, src.length);
|
||||
}
|
||||
@Override
|
||||
public void write(byte src[], int off, int len) throws IOException {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Writing " + len + " bytes");
|
||||
@ -83,6 +86,7 @@ public class BandwidthLimitedOutputStream extends FilterOutputStream {
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
synchronized (this) {
|
||||
if (_currentRequest != null)
|
||||
|
@ -58,14 +58,18 @@ public class CommSystemFacadeImpl extends CommSystemFacade {
|
||||
_manager.restart();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int countActivePeers() { return (_manager == null ? 0 : _manager.countActivePeers()); }
|
||||
@Override
|
||||
public int countActiveSendPeers() { return (_manager == null ? 0 : _manager.countActiveSendPeers()); }
|
||||
@Override
|
||||
public boolean haveCapacity() { return (_manager == null ? false : _manager.haveCapacity()); }
|
||||
|
||||
/**
|
||||
* Framed average clock skew of connected peers in seconds, or null if we cannot answer.
|
||||
* Average is calculated over the middle "percentToInclude" peers.
|
||||
*/
|
||||
@Override
|
||||
public Long getFramedAveragePeerClockSkew(int percentToInclude) {
|
||||
if (_manager == null) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
@ -121,14 +125,17 @@ public class CommSystemFacadeImpl extends CommSystemFacade {
|
||||
GetBidsJob.getBids(_context, this, msg);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isBacklogged(Hash dest) {
|
||||
return _manager.isBacklogged(dest);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isEstablished(Hash dest) {
|
||||
return _manager.isEstablished(dest);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean wasUnreachable(Hash dest) {
|
||||
return _manager.wasUnreachable(dest);
|
||||
}
|
||||
@ -137,21 +144,26 @@ public class CommSystemFacadeImpl extends CommSystemFacade {
|
||||
return _manager.getIP(dest);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List getMostRecentErrorMessages() {
|
||||
return _manager.getMostRecentErrorMessages();
|
||||
}
|
||||
|
||||
@Override
|
||||
public short getReachabilityStatus() {
|
||||
if (_manager == null) return CommSystemFacade.STATUS_UNKNOWN;
|
||||
if (_context.router().isHidden()) return CommSystemFacade.STATUS_OK;
|
||||
return _manager.getReachabilityStatus();
|
||||
}
|
||||
@Override
|
||||
public void recheckReachability() { _manager.recheckReachability(); }
|
||||
|
||||
@Override
|
||||
public void renderStatusHTML(Writer out, String urlBase, int sortFlags) throws IOException {
|
||||
_manager.renderStatusHTML(out, urlBase, sortFlags);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Set createAddresses() {
|
||||
Map addresses = null;
|
||||
boolean newCreated = false;
|
||||
@ -225,6 +237,7 @@ public class CommSystemFacadeImpl extends CommSystemFacade {
|
||||
/**
|
||||
* UDP changed addresses, tell NTCP and restart
|
||||
*/
|
||||
@Override
|
||||
public void notifyReplaceAddress(RouterAddress UDPAddr) {
|
||||
if (UDPAddr == null)
|
||||
return;
|
||||
|
@ -12,8 +12,8 @@ import net.i2p.util.Log;
|
||||
public class FIFOBandwidthLimiter {
|
||||
private Log _log;
|
||||
private I2PAppContext _context;
|
||||
private List _pendingInboundRequests;
|
||||
private List _pendingOutboundRequests;
|
||||
private final List _pendingInboundRequests;
|
||||
private final List _pendingOutboundRequests;
|
||||
/** how many bytes we can consume for inbound transmission immediately */
|
||||
private volatile int _availableInbound;
|
||||
/** how many bytes we can consume for outbound transmission immediately */
|
||||
@ -54,7 +54,7 @@ public class FIFOBandwidthLimiter {
|
||||
|
||||
private static int __id = 0;
|
||||
|
||||
static long now() {
|
||||
public /* static */ long now() {
|
||||
// dont use the clock().now(), since that may jump
|
||||
return System.currentTimeMillis();
|
||||
}
|
||||
@ -769,6 +769,7 @@ public class FIFOBandwidthLimiter {
|
||||
}
|
||||
public void attach(Object obj) { _attachment = obj; }
|
||||
public Object attachment() { return _attachment; }
|
||||
@Override
|
||||
public String toString() { return getRequestName(); }
|
||||
}
|
||||
|
||||
|
@ -30,11 +30,11 @@ import net.i2p.util.SimpleTimer;
|
||||
public class OutboundMessageRegistry {
|
||||
private Log _log;
|
||||
/** list of currently active MessageSelector instances */
|
||||
private List _selectors;
|
||||
private final List _selectors;
|
||||
/** map of active MessageSelector to either an OutNetMessage or a List of OutNetMessages causing it (for quick removal) */
|
||||
private Map _selectorToMessage;
|
||||
private final Map _selectorToMessage;
|
||||
/** set of active OutNetMessage (for quick removal and selector fetching) */
|
||||
private Set _activeMessages;
|
||||
private final Set _activeMessages;
|
||||
private CleanupTask _cleanupTask;
|
||||
private RouterContext _context;
|
||||
|
||||
|
@ -33,11 +33,11 @@ public class EventPumper implements Runnable {
|
||||
private Log _log;
|
||||
private boolean _alive;
|
||||
private Selector _selector;
|
||||
private List _bufCache;
|
||||
private List _wantsRead;
|
||||
private List _wantsWrite;
|
||||
private List _wantsRegister;
|
||||
private List _wantsConRegister;
|
||||
private final List _bufCache;
|
||||
private final List _wantsRead = new ArrayList(16);
|
||||
private final List _wantsWrite = new ArrayList(4);
|
||||
private final List _wantsRegister = new ArrayList(1);
|
||||
private final List _wantsConRegister = new ArrayList(4);
|
||||
private NTCPTransport _transport;
|
||||
private long _expireIdleWriteTime;
|
||||
|
||||
@ -66,10 +66,10 @@ public class EventPumper implements Runnable {
|
||||
public void startPumping() {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Starting pumper");
|
||||
_wantsRead = new ArrayList(16);
|
||||
_wantsWrite = new ArrayList(4);
|
||||
_wantsRegister = new ArrayList(1);
|
||||
_wantsConRegister = new ArrayList(4);
|
||||
// _wantsRead = new ArrayList(16);
|
||||
// _wantsWrite = new ArrayList(4);
|
||||
// _wantsRegister = new ArrayList(1);
|
||||
// _wantsConRegister = new ArrayList(4);
|
||||
try {
|
||||
_selector = Selector.open();
|
||||
_alive = true;
|
||||
|
@ -124,8 +124,10 @@ public class NTCPAddress {
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() { return _host + ":" + _port; }
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int rv = 0;
|
||||
rv += _port;
|
||||
@ -136,6 +138,7 @@ public class NTCPAddress {
|
||||
return rv;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object val) {
|
||||
if ( (val != null) && (val instanceof NTCPAddress) ) {
|
||||
NTCPAddress addr = (NTCPAddress)val;
|
||||
|
@ -56,13 +56,13 @@ public class NTCPConnection implements FIFOBandwidthLimiter.CompleteListener {
|
||||
private SocketChannel _chan;
|
||||
private SelectionKey _conKey;
|
||||
/** list of ByteBuffer containing data we have read and are ready to process, oldest first */
|
||||
private List _readBufs;
|
||||
private final List _readBufs;
|
||||
/**
|
||||
* list of ByteBuffers containing fully populated and encrypted data, ready to write,
|
||||
* and already cleared through the bandwidth limiter.
|
||||
*/
|
||||
private List _writeBufs;
|
||||
private List _bwRequests;
|
||||
private final List _writeBufs;
|
||||
private final List _bwRequests;
|
||||
private boolean _established;
|
||||
private long _establishedOn;
|
||||
private EstablishState _establishState;
|
||||
@ -75,7 +75,7 @@ public class NTCPConnection implements FIFOBandwidthLimiter.CompleteListener {
|
||||
/**
|
||||
* pending unprepared OutNetMessage instances
|
||||
*/
|
||||
private List _outbound;
|
||||
private final List _outbound;
|
||||
/** current prepared OutNetMessage, or null */
|
||||
private OutNetMessage _currentOutbound;
|
||||
private SessionKey _sessionKey;
|
||||
@ -96,7 +96,7 @@ public class NTCPConnection implements FIFOBandwidthLimiter.CompleteListener {
|
||||
private long _created;
|
||||
private long _nextMetaTime;
|
||||
/** unencrypted outbound metadata buffer */
|
||||
private byte _meta[] = new byte[16];
|
||||
private final byte _meta[] = new byte[16];
|
||||
private boolean _sendingMeta;
|
||||
/** how many consecutive sends were failed due to (estimated) send queue time */
|
||||
private int _consecutiveBacklog;
|
||||
@ -704,7 +704,7 @@ public class NTCPConnection implements FIFOBandwidthLimiter.CompleteListener {
|
||||
private static int NUM_PREP_BUFS = 5;
|
||||
private static int __liveBufs = 0;
|
||||
private static int __consecutiveExtra;
|
||||
private static List _bufs = new ArrayList(NUM_PREP_BUFS);
|
||||
private final static List _bufs = new ArrayList(NUM_PREP_BUFS);
|
||||
private PrepBuffer acquireBuf() {
|
||||
synchronized (_bufs) {
|
||||
if (_bufs.size() > 0) {
|
||||
@ -1093,11 +1093,17 @@ public class NTCPConnection implements FIFOBandwidthLimiter.CompleteListener {
|
||||
// enqueueInfoMessage(); // this often?
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() { return System.identityHashCode(this); }
|
||||
public boolean equals(Object obj) { return obj == this; }
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if(obj == null) return false;
|
||||
if(obj.getClass() != NTCPConnection.class) return false;
|
||||
return obj == this;
|
||||
}
|
||||
|
||||
private static List _i2npHandlers = new ArrayList(4);
|
||||
private static I2NPMessageHandler acquireHandler(RouterContext ctx) {
|
||||
private final static List _i2npHandlers = new ArrayList(4);
|
||||
private final static I2NPMessageHandler acquireHandler(RouterContext ctx) {
|
||||
I2NPMessageHandler rv = null;
|
||||
synchronized (_i2npHandlers) {
|
||||
if (_i2npHandlers.size() > 0)
|
||||
@ -1127,7 +1133,7 @@ public class NTCPConnection implements FIFOBandwidthLimiter.CompleteListener {
|
||||
}
|
||||
|
||||
private static int MAX_DATA_READ_BUFS = 16;
|
||||
private static List _dataReadBufs = new ArrayList(16);
|
||||
private final static List _dataReadBufs = new ArrayList(16);
|
||||
private static DataBuf acquireReadBuf() {
|
||||
synchronized (_dataReadBufs) {
|
||||
if (_dataReadBufs.size() > 0)
|
||||
@ -1289,6 +1295,7 @@ public class NTCPConnection implements FIFOBandwidthLimiter.CompleteListener {
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "NTCP Connection to " +
|
||||
(_remotePeer == null ? "unknown " : _remotePeer.calculateHash().toBase64().substring(0,6)) +
|
||||
|
@ -17,7 +17,7 @@ import net.i2p.util.Log;
|
||||
class Reader {
|
||||
private RouterContext _context;
|
||||
private Log _log;
|
||||
private List _pendingConnections;
|
||||
private final List _pendingConnections;
|
||||
private List _liveReads;
|
||||
private List _readAfterLive;
|
||||
private List _runners;
|
||||
|
@ -16,7 +16,7 @@ import net.i2p.util.Log;
|
||||
class Writer {
|
||||
private RouterContext _context;
|
||||
private Log _log;
|
||||
private List _pendingConnections;
|
||||
private final List _pendingConnections;
|
||||
private List _liveWrites;
|
||||
private List _writeAfterLive;
|
||||
private List _runners;
|
||||
|
@ -18,7 +18,7 @@ public class ACKSender implements Runnable {
|
||||
private UDPTransport _transport;
|
||||
private PacketBuilder _builder;
|
||||
/** list of peers (PeerState) who we have received data from but not yet ACKed to */
|
||||
private List _peersToACK;
|
||||
private final List _peersToACK;
|
||||
private boolean _alive;
|
||||
|
||||
/** how frequently do we want to send ACKs to a peer? */
|
||||
|
@ -670,7 +670,7 @@ public class EstablishmentManager {
|
||||
}
|
||||
}
|
||||
|
||||
public void receiveRelayResponse(RemoteHostId bob, UDPPacketReader reader) {
|
||||
public void receiveRelayResponse(RemoteHostId bob, UDPPacketReader reader) {// LINT -- Exporting non-public type through public API
|
||||
long nonce = reader.getRelayResponseReader().readNonce();
|
||||
OutboundEstablishState state = null;
|
||||
synchronized (_liveIntroductions) {
|
||||
|
@ -210,7 +210,7 @@ public class InboundEstablishState {
|
||||
public synchronized void setNextSendTime(long when) { _nextSend = when; }
|
||||
|
||||
/** RemoteHostId, uniquely identifies an attempt */
|
||||
public RemoteHostId getRemoteHostId() { return _remoteHostId; }
|
||||
public RemoteHostId getRemoteHostId() { return _remoteHostId; }// LINT -- Exporting non-public type through public API
|
||||
|
||||
public synchronized void receiveSessionConfirmed(UDPPacketReader.SessionConfirmedReader conf) {
|
||||
if (_receivedIdentity == null)
|
||||
@ -331,6 +331,7 @@ public class InboundEstablishState {
|
||||
_nextSend = _lastReceive;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer(128);
|
||||
buf.append(super.toString());
|
||||
|
@ -138,6 +138,7 @@ public class InboundMessageState {
|
||||
}
|
||||
public boolean receivedComplete() { return false; }
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer(64);
|
||||
buf.append("Partial ACK of ");
|
||||
@ -162,6 +163,7 @@ public class InboundMessageState {
|
||||
}
|
||||
public int getFragmentCount() { return _lastFragment+1; }
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer(32);
|
||||
buf.append("Message: ").append(_messageId);
|
||||
|
@ -25,7 +25,7 @@ public class IntroductionManager {
|
||||
/** map of relay tag to PeerState that should receive the introduction */
|
||||
private Map _outbound;
|
||||
/** list of peers (PeerState) who have given us introduction tags */
|
||||
private List _inbound;
|
||||
private final List _inbound;
|
||||
|
||||
public IntroductionManager(RouterContext ctx, UDPTransport transport) {
|
||||
_context = ctx;
|
||||
@ -157,7 +157,7 @@ public class IntroductionManager {
|
||||
return found;
|
||||
}
|
||||
|
||||
public void receiveRelayIntro(RemoteHostId bob, UDPPacketReader reader) {
|
||||
public void receiveRelayIntro(RemoteHostId bob, UDPPacketReader reader) {// LINT -- Exporting non-public type through public API
|
||||
if (_context.router().isHidden())
|
||||
return;
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
@ -166,7 +166,7 @@ public class IntroductionManager {
|
||||
_transport.send(_builder.buildHolePunch(reader));
|
||||
}
|
||||
|
||||
public void receiveRelayRequest(RemoteHostId alice, UDPPacketReader reader) {
|
||||
public void receiveRelayRequest(RemoteHostId alice, UDPPacketReader reader) {// LINT -- Exporting non-public type through public API
|
||||
if (_context.router().isHidden())
|
||||
return;
|
||||
long tag = reader.getRelayRequestReader().readTag();
|
||||
|
@ -24,7 +24,7 @@ public class MessageReceiver {
|
||||
private Log _log;
|
||||
private UDPTransport _transport;
|
||||
/** list of messages (InboundMessageState) fully received but not interpreted yet */
|
||||
private List _completeMessages;
|
||||
private final List _completeMessages;
|
||||
private boolean _alive;
|
||||
private ByteCache _cache;
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user