propagate from branch 'i2p.i2p' (head 2b1a99ea78270f80514ced3860a7d54cc3f2e309)

to branch 'i2p.i2p.zzz.VTBM' (head 155eea7b96fa5ce48faec385242e0b6eb232b0dd)
This commit is contained in:
zzz
2010-02-01 14:39:16 +00:00
154 changed files with 8218 additions and 7624 deletions

View File

@ -22,8 +22,6 @@ public class DataMessage extends I2NPMessageImpl {
public final static int MESSAGE_TYPE = 20;
private byte _data[];
// private static final int MAX_SIZE = 64*1024; // LINT -- field hides another field, and not used
public DataMessage(I2PAppContext context) {
super(context);
_data = null;
@ -48,7 +46,7 @@ public class DataMessage extends I2NPMessageImpl {
int curIndex = offset;
long size = DataHelper.fromLong(data, curIndex, 4);
curIndex += 4;
if (size > 64*1024)
if (size > MAX_SIZE)
throw new I2NPMessageException("wtf, size=" + size);
_data = new byte[(int)size];
System.arraycopy(data, curIndex, _data, 0, (int)size);

View File

@ -17,7 +17,7 @@ import net.i2p.I2PAppContext;
import net.i2p.data.DataHelper;
import net.i2p.data.Hash;
import net.i2p.data.TunnelId;
import net.i2p.util.Log;
//import net.i2p.util.Log;
/**
* Defines the message a router sends to another router to search for a
@ -26,18 +26,22 @@ import net.i2p.util.Log;
* @author jrandom
*/
public class DatabaseLookupMessage extends I2NPMessageImpl {
private final static Log _log = new Log(DatabaseLookupMessage.class);
//private final static Log _log = new Log(DatabaseLookupMessage.class);
public final static int MESSAGE_TYPE = 2;
private Hash _key;
private Hash _fromHash;
private TunnelId _replyTunnel;
private Set _dontIncludePeers;
private Set<Hash> _dontIncludePeers;
private static volatile long _currentLookupPeriod = 0;
private static volatile int _currentLookupCount = 0;
//private static volatile long _currentLookupPeriod = 0;
//private static volatile int _currentLookupCount = 0;
// if we try to send over 20 netDb lookups in 10 seconds, we're acting up
private static final long LOOKUP_THROTTLE_PERIOD = 10*1000;
private static final long LOOKUP_THROTTLE_MAX = 50;
//private static final long LOOKUP_THROTTLE_PERIOD = 10*1000;
//private static final long LOOKUP_THROTTLE_MAX = 50;
/** Insanely big. Not much more than 1500 will fit in a message.
Have to prevent a huge alloc on rcv of a malicious msg though */
private static final int MAX_NUM_PEERS = 512;
public DatabaseLookupMessage(I2PAppContext context) {
this(context, false);
@ -48,24 +52,27 @@ public class DatabaseLookupMessage extends I2NPMessageImpl {
//setFrom(null);
//setDontIncludePeers(null);
context.statManager().createRateStat("router.throttleNetDbDoSSend", "How many netDb lookup messages we are sending during a period with a DoS detected", "Throttle", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 });
// This is the wrong place for this, any throttling should be in netdb
// And it doesnt throttle anyway (that would have to be in netdb), it just increments a stat
//context.statManager().createRateStat("router.throttleNetDbDoSSend", "How many netDb lookup messages we are sending during a period with a DoS detected", "Throttle", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 });
//
// only check DoS generation if we are creating the message...
if (locallyCreated) {
// we do this in the writeMessage so we know that we have all the data
int dosCount = detectDoS(context);
if (dosCount > 0) {
if (_log.shouldLog(Log.WARN))
_log.warn("Are we flooding the network with NetDb messages? (" + dosCount
+ " messages so far)", new Exception("Flood cause"));
}
}
//if (locallyCreated) {
// // we do this in the writeMessage so we know that we have all the data
// int dosCount = detectDoS(context);
// if (dosCount > 0) {
// if (_log.shouldLog(Log.WARN))
// _log.warn("Are we flooding the network with NetDb messages? (" + dosCount
// + " messages so far)", new Exception("Flood cause"));
// }
//}
}
/**
* Return number of netDb messages in this period, if flood, else 0
*
*/
/*****
private static int detectDoS(I2PAppContext context) {
int count = _currentLookupCount++;
// now lets check for DoS
@ -87,6 +94,7 @@ public class DatabaseLookupMessage extends I2NPMessageImpl {
return 0;
}
}
*****/
/**
* Defines the key being searched for
@ -113,7 +121,7 @@ public class DatabaseLookupMessage extends I2NPMessageImpl {
*
* @return Set of Hash objects, each of which is the H(routerIdentity) to skip
*/
public Set getDontIncludePeers() { return _dontIncludePeers; }
public Set<Hash> getDontIncludePeers() { return _dontIncludePeers; }
public void setDontIncludePeers(Set peers) {
if (peers != null)
_dontIncludePeers = new HashSet(peers);
@ -156,9 +164,9 @@ public class DatabaseLookupMessage extends I2NPMessageImpl {
int numPeers = (int)DataHelper.fromLong(data, curIndex, 2);
curIndex += 2;
if ( (numPeers < 0) || (numPeers >= (1<<16) ) )
if ( (numPeers < 0) || (numPeers > MAX_NUM_PEERS) )
throw new I2NPMessageException("Invalid number of peers - " + numPeers);
Set peers = new HashSet(numPeers);
Set<Hash> peers = new HashSet(numPeers);
for (int i = 0; i < numPeers; i++) {
byte peer[] = new byte[Hash.HASH_LENGTH];
System.arraycopy(data, curIndex, peer, 0, Hash.HASH_LENGTH);
@ -201,11 +209,14 @@ public class DatabaseLookupMessage extends I2NPMessageImpl {
out[curIndex++] = 0x0;
out[curIndex++] = 0x0;
} else {
byte len[] = DataHelper.toLong(2, _dontIncludePeers.size());
int size = _dontIncludePeers.size();
if (size > MAX_NUM_PEERS)
throw new I2NPMessageException("Too many peers: " + size);
byte len[] = DataHelper.toLong(2, size);
out[curIndex++] = len[0];
out[curIndex++] = len[1];
for (Iterator iter = _dontIncludePeers.iterator(); iter.hasNext(); ) {
Hash peer = (Hash)iter.next();
for (Iterator<Hash> iter = _dontIncludePeers.iterator(); iter.hasNext(); ) {
Hash peer = iter.next();
System.arraycopy(peer.getData(), 0, out, curIndex, Hash.HASH_LENGTH);
curIndex += Hash.HASH_LENGTH;
}

View File

@ -26,13 +26,14 @@ import net.i2p.data.Hash;
public class DatabaseSearchReplyMessage extends I2NPMessageImpl {
public final static int MESSAGE_TYPE = 3;
private Hash _key;
private List _peerHashes;
private List<Hash> _peerHashes;
private Hash _from;
public DatabaseSearchReplyMessage(I2PAppContext context) {
super(context);
_context.statManager().createRateStat("netDb.searchReplyMessageSend", "How many search reply messages we send", "NetworkDatabase", new long[] { 60*1000, 5*60*1000, 10*60*1000, 60*60*1000 });
_context.statManager().createRateStat("netDb.searchReplyMessageReceive", "How many search reply messages we receive", "NetworkDatabase", new long[] { 60*1000, 5*60*1000, 10*60*1000, 60*60*1000 });
// do this in netdb if we need it
//_context.statManager().createRateStat("netDb.searchReplyMessageSend", "How many search reply messages we send", "NetworkDatabase", new long[] { 60*1000, 5*60*1000, 10*60*1000, 60*60*1000 });
//_context.statManager().createRateStat("netDb.searchReplyMessageReceive", "How many search reply messages we receive", "NetworkDatabase", new long[] { 60*1000, 5*60*1000, 10*60*1000, 60*60*1000 });
setSearchKey(null);
_peerHashes = new ArrayList(3);
setFromHash(null);
@ -45,7 +46,7 @@ public class DatabaseSearchReplyMessage extends I2NPMessageImpl {
public void setSearchKey(Hash key) { _key = key; }
public int getNumReplies() { return _peerHashes.size(); }
public Hash getReply(int index) { return (Hash)_peerHashes.get(index); }
public Hash getReply(int index) { return _peerHashes.get(index); }
public void addReply(Hash peer) { _peerHashes.add(peer); }
//public void addReplies(Collection replies) { _peerHashes.addAll(replies); }
@ -77,7 +78,7 @@ public class DatabaseSearchReplyMessage extends I2NPMessageImpl {
curIndex += Hash.HASH_LENGTH;
_from = new Hash(from);
_context.statManager().addRateData("netDb.searchReplyMessageReceive", num*32 + 64, 1);
//_context.statManager().addRateData("netDb.searchReplyMessageReceive", num*32 + 64, 1);
}
/** calculate the message body's length (not including the header and footer */

View File

@ -44,7 +44,7 @@ public class GarlicMessage extends I2NPMessageImpl {
long len = DataHelper.fromLong(data, curIndex, 4);
curIndex += 4;
if ( (len <= 0) || (len > 64*1024) ) throw new I2NPMessageException("size="+len);
if ( (len <= 0) || (len > MAX_SIZE) ) throw new I2NPMessageException("size="+len);
_data = new byte[(int)len];
System.arraycopy(data, curIndex, _data, 0, (int)len);
}

View File

@ -374,8 +374,9 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM
return new DatabaseSearchReplyMessage(context);
case DeliveryStatusMessage.MESSAGE_TYPE:
return new DeliveryStatusMessage(context);
case DateMessage.MESSAGE_TYPE:
return new DateMessage(context);
// unused since forever (0.5?)
//case DateMessage.MESSAGE_TYPE:
// return new DateMessage(context);
case GarlicMessage.MESSAGE_TYPE:
return new GarlicMessage(context);
case TunnelDataMessage.MESSAGE_TYPE:
@ -384,6 +385,7 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM
return new TunnelGatewayMessage(context);
case DataMessage.MESSAGE_TYPE:
return new DataMessage(context);
// unused since 0.6.1.10
case TunnelBuildMessage.MESSAGE_TYPE:
return new TunnelBuildMessage(context);
case TunnelBuildReplyMessage.MESSAGE_TYPE:

View File

@ -35,6 +35,7 @@ class DummyTunnelManagerFacade implements TunnelManagerFacade {
public int getFreeTunnelCount() { return 0; }
public int getOutboundTunnelCount() { return 0; }
public int getInboundClientTunnelCount() { return 0; }
public double getShareRatio() { return 0d; }
public int getOutboundClientTunnelCount() { return 0; }
public long getLastParticipatingExpiration() { return -1; }
public void buildTunnels(Destination client, ClientTunnelSettings settings) {}

View File

@ -155,6 +155,7 @@ public interface ProfileManager {
* through an explicit dbStore or in a dbLookupReply
*/
void heardAbout(Hash peer);
void heardAbout(Hash peer, long when);
/**
* Note that the router received a message from the given peer on the specified

View File

@ -105,6 +105,12 @@ public class Router {
System.setProperty("user.timezone", "GMT");
// just in case, lets make it explicit...
TimeZone.setDefault(TimeZone.getTimeZone("GMT"));
// https://www.kb.cert.org/vuls/id/402580
// http://docs.codehaus.org/display/JETTY/SystemProperties
// Fixed in Jetty 5.1.15 but we are running 5.1.12
// The default is true, unfortunately it was previously
// set to false in wrapper.config thru 0.7.10 so we must set it back here.
System.setProperty("Dorg.mortbay.util.FileResource.checkAliases", "true");
}
public Router() { this(null, null); }
@ -1053,8 +1059,9 @@ public class Router {
String val = _config.getProperty(key);
// Escape line breaks before saving.
// Remember: "\" needs escaping both for regex and string.
val = val.replaceAll("\\r","\\\\r");
val = val.replaceAll("\\n","\\\\n");
// NOOO - see comments in DataHelper
//val = val.replaceAll("\\r","\\\\r");
//val = val.replaceAll("\\n","\\\\n");
buf.append(key).append('=').append(val).append('\n');
}
}

View File

@ -29,8 +29,6 @@ public class RouterClock extends Clock {
*/
@Override
public void setOffset(long offsetMs, boolean force) {
if (false) return;
long delta = offsetMs - _offset;
if (!force) {
if ((offsetMs > MAX_OFFSET) || (offsetMs < 0 - MAX_OFFSET)) {
@ -54,7 +52,8 @@ public class RouterClock extends Clock {
}
// If so configured, check sanity of proposed clock offset
if (Boolean.valueOf(_contextRC.getProperty("router.clockOffsetSanityCheck","true")).booleanValue() == true) {
if (Boolean.valueOf(_contextRC.getProperty("router.clockOffsetSanityCheck","true")).booleanValue() &&
_alreadyChanged) {
// Try calculating peer clock skew
Long peerClockSkew = _contextRC.commSystem().getFramedAveragePeerClockSkew(50);
@ -88,9 +87,10 @@ public class RouterClock extends Clock {
else if (getLog().shouldLog(Log.INFO))
getLog().info("Updating clock offset to " + offsetMs + "ms from " + _offset + "ms");
if (!_statCreated)
if (!_statCreated) {
_contextRC.statManager().createRateStat("clock.skew", "How far is the already adjusted clock being skewed?", "Clock", new long[] { 10*60*1000, 3*60*60*1000, 24*60*60*60 });
_statCreated = true;
}
_contextRC.statManager().addRateData("clock.skew", delta, 0);
} else {
getLog().log(Log.INFO, "Initializing clock offset to " + offsetMs + "ms from " + _offset + "ms");

View File

@ -134,8 +134,8 @@ public class RouterContext extends I2PAppContext {
_shitlist = new Shitlist(this);
_blocklist = new Blocklist(this);
_messageValidator = new MessageValidator(this);
//_throttle = new RouterThrottleImpl(this);
_throttle = new RouterDoSThrottle(this);
_throttle = new RouterThrottleImpl(this);
//_throttle = new RouterDoSThrottle(this);
_integrationCalc = new IntegrationCalculator(this);
_speedCalc = new SpeedCalculator(this);
_capacityCalc = new CapacityCalculator(this);

View File

@ -6,6 +6,7 @@ import net.i2p.data.Hash;
* Minor extention of the router throttle to handle some DoS events and
* throttle accordingly.
*
* @deprecated unused
*/
class RouterDoSThrottle extends RouterThrottleImpl {
public RouterDoSThrottle(RouterContext context) {

View File

@ -72,6 +72,7 @@ class RouterThrottleImpl implements RouterThrottle {
}
}
/** @deprecated unused, function moved to netdb */
public boolean acceptNetDbLookupRequest(Hash key) {
long lag = _context.jobQueue().getMaxLag();
if (lag > JOB_LAG_LIMIT) {

View File

@ -18,7 +18,8 @@ public class RouterVersion {
/** deprecated */
public final static String ID = "Monotone";
public final static String VERSION = CoreVersion.VERSION;
public final static long BUILD = 17;
public final static long BUILD = 6;
/** for example "-test" */
public final static String EXTRA = "";
public final static String FULL_VERSION = VERSION + "-" + BUILD + EXTRA;

View File

@ -51,6 +51,7 @@ public interface TunnelManagerFacade extends Service {
public int getInboundClientTunnelCount();
/** how many outbound client tunnels do we have available? */
public int getOutboundClientTunnelCount();
public double getShareRatio();
/** When does the last tunnel we are participating in expire? */
public long getLastParticipatingExpiration();

View File

@ -977,7 +977,8 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
long dataMsgId = _cloveId;
getContext().messageHistory().sendPayloadMessage(dataMsgId, true, sendTime);
getContext().clientManager().messageDeliveryStatusUpdate(_from, _clientMessageId, true);
_lease.setNumSuccess(_lease.getNumSuccess()+1);
// unused
//_lease.setNumSuccess(_lease.getNumSuccess()+1);
int size = _clientMessageSize;
@ -1022,7 +1023,8 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
_log.debug(OutboundClientMessageOneShotJob.this.getJobId()
+ ": Soft timeout through the lease " + _lease);
_lease.setNumFailure(_lease.getNumFailure()+1);
// unused
//_lease.setNumFailure(_lease.getNumFailure()+1);
if (_key != null && _tags != null && _leaseSet != null) {
SessionKeyManager skm = getContext().clientManager().getClientSessionKeyManager(_from.calculateHash());
if (skm != null)

View File

@ -55,13 +55,6 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
public HandleDatabaseLookupMessageJob(RouterContext ctx, DatabaseLookupMessage receivedMessage, RouterIdentity from, Hash fromHash) {
super(ctx);
_log = getContext().logManager().getLog(HandleDatabaseLookupMessageJob.class);
getContext().statManager().createRateStat("netDb.lookupsHandled", "How many netDb lookups have we handled?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.lookupsMatched", "How many netDb lookups did we have the data for?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.lookupsMatchedLeaseSet", "How many netDb leaseSet lookups did we have the data for?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.lookupsMatchedReceivedPublished", "How many netDb lookups did we have the data for that were published to us?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.lookupsMatchedLocalClosest", "How many netDb lookups for local data were received where we are the closest peers?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.lookupsMatchedLocalNotClosest", "How many netDb lookups for local data were received where we are NOT the closest peers?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.lookupsMatchedRemoteNotClosest", "How many netDb lookups for remote data were received where we are NOT the closest peers?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
_message = receivedMessage;
_from = from;
_fromHash = fromHash;

View File

@ -0,0 +1,34 @@
package net.i2p.router.networkdb.kademlia;
import net.i2p.data.Hash;
import net.i2p.util.ObjectCounter;
import net.i2p.util.SimpleScheduler;
import net.i2p.util.SimpleTimer;
/**
* Count how often we have recently flooded a key
* This offers basic DOS protection but is not a complete solution.
*
* @since 0.7.11
*/
class FloodThrottler {
private ObjectCounter<Hash> counter;
private static final int MAX_FLOODS = 3;
private static final long CLEAN_TIME = 60*1000;
FloodThrottler() {
this.counter = new ObjectCounter();
SimpleScheduler.getInstance().addPeriodicEvent(new Cleaner(), CLEAN_TIME);
}
/** increments before checking */
boolean shouldThrottle(Hash h) {
return this.counter.increment(h) > MAX_FLOODS;
}
private class Cleaner implements SimpleTimer.TimedEvent {
public void timeReached() {
FloodThrottler.this.counter.clear();
}
}
}

View File

@ -23,29 +23,41 @@ import net.i2p.util.Log;
*/
public class FloodfillDatabaseLookupMessageHandler implements HandlerJobBuilder {
private RouterContext _context;
private FloodfillNetworkDatabaseFacade _facade;
private Log _log;
public FloodfillDatabaseLookupMessageHandler(RouterContext context) {
public FloodfillDatabaseLookupMessageHandler(RouterContext context, FloodfillNetworkDatabaseFacade facade) {
_context = context;
_facade = facade;
_log = context.logManager().getLog(FloodfillDatabaseLookupMessageHandler.class);
_context.statManager().createRateStat("netDb.lookupsReceived", "How many netDb lookups have we received?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("netDb.lookupsDropped", "How many netDb lookups did we drop due to throttling?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("netDb.lookupsReceived", "How many netDb lookups have we received?", "NetworkDatabase", new long[] { 60*60*1000l });
_context.statManager().createRateStat("netDb.lookupsDropped", "How many netDb lookups did we drop due to throttling?", "NetworkDatabase", new long[] { 60*60*1000l });
// following are for ../HDLMJ
_context.statManager().createRateStat("netDb.lookupsHandled", "How many netDb lookups have we handled?", "NetworkDatabase", new long[] { 60*60*1000l });
_context.statManager().createRateStat("netDb.lookupsMatched", "How many netDb lookups did we have the data for?", "NetworkDatabase", new long[] { 60*60*1000l });
_context.statManager().createRateStat("netDb.lookupsMatchedLeaseSet", "How many netDb leaseSet lookups did we have the data for?", "NetworkDatabase", new long[] { 60*60*1000l });
_context.statManager().createRateStat("netDb.lookupsMatchedReceivedPublished", "How many netDb lookups did we have the data for that were published to us?", "NetworkDatabase", new long[] { 60*60*1000l });
_context.statManager().createRateStat("netDb.lookupsMatchedLocalClosest", "How many netDb lookups for local data were received where we are the closest peers?", "NetworkDatabase", new long[] { 60*60*1000l });
_context.statManager().createRateStat("netDb.lookupsMatchedLocalNotClosest", "How many netDb lookups for local data were received where we are NOT the closest peers?", "NetworkDatabase", new long[] { 60*60*1000l });
_context.statManager().createRateStat("netDb.lookupsMatchedRemoteNotClosest", "How many netDb lookups for remote data were received where we are NOT the closest peers?", "NetworkDatabase", new long[] { 60*60*1000l });
}
public Job createJob(I2NPMessage receivedMessage, RouterIdentity from, Hash fromHash) {
_context.statManager().addRateData("netDb.lookupsReceived", 1, 0);
if (true || _context.throttle().acceptNetDbLookupRequest(((DatabaseLookupMessage)receivedMessage).getSearchKey())) {
Job j = new HandleFloodfillDatabaseLookupMessageJob(_context, (DatabaseLookupMessage)receivedMessage, from, fromHash);
if (false) {
// might as well inline it, all the heavy lifting is queued up in later jobs, if necessary
j.runJob();
return null;
} else {
DatabaseLookupMessage dlm = (DatabaseLookupMessage)receivedMessage;
if (!_facade.shouldThrottleLookup(dlm.getFrom(), dlm.getReplyTunnel())) {
Job j = new HandleFloodfillDatabaseLookupMessageJob(_context, dlm, from, fromHash);
//if (false) {
// // might as well inline it, all the heavy lifting is queued up in later jobs, if necessary
// j.runJob();
// return null;
//} else {
return j;
}
//}
} else {
if (_log.shouldLog(Log.INFO))
_log.info("Dropping lookup request as throttled");
if (_log.shouldLog(Log.WARN))
_log.warn("Dropping lookup request for " + dlm.getSearchKey() + " (throttled), reply was to: " + dlm.getFrom() + " tunnel: " + dlm.getReplyTunnel());
_context.statManager().addRateData("netDb.lookupsDropped", 1, 1);
return null;
}

View File

@ -27,7 +27,13 @@ public class FloodfillDatabaseStoreMessageHandler implements HandlerJobBuilder {
public FloodfillDatabaseStoreMessageHandler(RouterContext context, FloodfillNetworkDatabaseFacade facade) {
_context = context;
_facade = facade;
// following are for HFDSMJ
context.statManager().createRateStat("netDb.storeHandled", "How many netDb store messages have we handled?", "NetworkDatabase", new long[] { 60*60*1000l });
context.statManager().createRateStat("netDb.storeLeaseSetHandled", "How many leaseSet store messages have we handled?", "NetworkDatabase", new long[] { 60*60*1000l });
context.statManager().createRateStat("netDb.storeRouterInfoHandled", "How many routerInfo store messages have we handled?", "NetworkDatabase", new long[] { 60*60*1000l });
context.statManager().createRateStat("netDb.storeRecvTime", "How long it takes to handle the local store part of a dbStore?", "NetworkDatabase", new long[] { 60*60*1000l });
}
public Job createJob(I2NPMessage receivedMessage, RouterIdentity from, Hash fromHash) {
Job j = new HandleFloodfillDatabaseStoreMessageJob(_context, (DatabaseStoreMessage)receivedMessage, from, fromHash, _facade);
if (false) {

View File

@ -26,8 +26,8 @@ class FloodfillMonitorJob extends JobImpl {
private static final int REQUEUE_DELAY = 60*60*1000;
private static final long MIN_UPTIME = 2*60*60*1000;
private static final long MIN_CHANGE_DELAY = 6*60*60*1000;
private static final int MIN_FF = 4;
private static final int MAX_FF = 6;
private static final int MIN_FF = 10;
private static final int MAX_FF = 15;
private static final String PROP_FLOODFILL_PARTICIPANT = "router.floodfillParticipant";
public FloodfillMonitorJob(RouterContext context, FloodfillNetworkDatabaseFacade facade) {

View File

@ -12,6 +12,7 @@ import net.i2p.data.DataStructure;
import net.i2p.data.Hash;
import net.i2p.data.LeaseSet;
import net.i2p.data.RouterInfo;
import net.i2p.data.TunnelId;
import net.i2p.data.i2np.DatabaseLookupMessage;
import net.i2p.data.i2np.DatabaseSearchReplyMessage;
import net.i2p.data.i2np.DatabaseStoreMessage;
@ -37,6 +38,8 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
/** for testing, see isFloodfill() below */
private static String _alwaysQuery;
private final Set<Hash> _verifiesInProgress;
private FloodThrottler _floodThrottler;
private LookupThrottler _lookupThrottler;
public FloodfillNetworkDatabaseFacade(RouterContext context) {
super(context);
@ -62,11 +65,12 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
public void startup() {
super.startup();
_context.jobQueue().addJob(new FloodfillMonitorJob(_context, this));
_lookupThrottler = new LookupThrottler();
}
@Override
protected void createHandlers() {
_context.inNetMessagePool().registerHandlerJobBuilder(DatabaseLookupMessage.MESSAGE_TYPE, new FloodfillDatabaseLookupMessageHandler(_context));
_context.inNetMessagePool().registerHandlerJobBuilder(DatabaseLookupMessage.MESSAGE_TYPE, new FloodfillDatabaseLookupMessageHandler(_context, this));
_context.inNetMessagePool().registerHandlerJobBuilder(DatabaseStoreMessage.MESSAGE_TYPE, new FloodfillDatabaseStoreMessageHandler(_context, this));
}
@ -102,6 +106,22 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
}
}
/**
* Increments and tests.
* @since 0.7.11
*/
boolean shouldThrottleFlood(Hash key) {
return _floodThrottler != null && _floodThrottler.shouldThrottle(key);
}
/**
* Increments and tests.
* @since 0.7.11
*/
boolean shouldThrottleLookup(Hash from, TunnelId id) {
return _lookupThrottler.shouldThrottle(from, id);
}
private static final int MAX_TO_FLOOD = 7;
/**
@ -124,6 +144,10 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
RouterInfo target = lookupRouterInfoLocally(peer);
if ( (target == null) || (_context.shitlist().isShitlisted(peer)) )
continue;
// Don't flood a RI back to itself
// Not necessary, a ff will do its own flooding (reply token == 0)
//if (peer.equals(target.getIdentity().getHash()))
// continue;
if (peer.equals(_context.routerHash()))
continue;
DatabaseStoreMessage msg = new DatabaseStoreMessage(_context);
@ -176,7 +200,17 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
@Override
protected PeerSelector createPeerSelector() { return new FloodfillPeerSelector(_context); }
public void setFloodfillEnabled(boolean yes) { _floodfillEnabled = yes; }
synchronized void setFloodfillEnabled(boolean yes) {
_floodfillEnabled = yes;
if (yes && _floodThrottler == null) {
_floodThrottler = new FloodThrottler();
_context.statManager().createRateStat("netDb.floodThrottled", "How often do we decline to flood?", "NetworkDatabase", new long[] { 60*60*1000l });
// following are for HFDSMJ
_context.statManager().createRateStat("netDb.storeFloodNew", "How long it takes to flood out a newly received entry?", "NetworkDatabase", new long[] { 60*60*1000l });
_context.statManager().createRateStat("netDb.storeFloodOld", "How often we receive an old entry?", "NetworkDatabase", new long[] { 60*60*1000l });
}
}
public boolean floodfillEnabled() { return _floodfillEnabled; }
public static boolean floodfillEnabled(RouterContext ctx) {
return ((FloodfillNetworkDatabaseFacade)ctx.netDb()).floodfillEnabled();

View File

@ -54,6 +54,7 @@ class FloodfillStoreJob extends StoreJob {
@Override
protected void succeed() {
super.succeed();
if (_state != null) {
if (_facade.isVerifyInProgress(_state.getTarget())) {
if (_log.shouldLog(Log.INFO))
@ -67,6 +68,8 @@ class FloodfillStoreJob extends StoreJob {
boolean isRouterInfo = data instanceof RouterInfo;
if (isRouterInfo) {
published = ((RouterInfo) data).getPublished();
// Temporarily disable
return;
} else if (data instanceof LeaseSet) {
published = ((LeaseSet) data).getEarliestLeaseDate();
}

View File

@ -39,14 +39,6 @@ public class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
public HandleFloodfillDatabaseStoreMessageJob(RouterContext ctx, DatabaseStoreMessage receivedMessage, RouterIdentity from, Hash fromHash, FloodfillNetworkDatabaseFacade facade) {
super(ctx);
_log = ctx.logManager().getLog(getClass());
ctx.statManager().createRateStat("netDb.storeHandled", "How many netDb store messages have we handled?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("netDb.storeLeaseSetHandled", "How many leaseSet store messages have we handled?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
//ctx.statManager().createRateStat("netDb.storeLocalLeaseSetAttempt", "Peer tries to store our leaseset (multihome?)", "NetworkDatabase", new long[] { 60*60*1000l });
//ctx.statManager().createRateStat("netDb.storeLocalRouterInfoAttempt", "Peer tries to store our router info", "NetworkDatabase", new long[] { 60*60*1000l });
ctx.statManager().createRateStat("netDb.storeRouterInfoHandled", "How many routerInfo store messages have we handled?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("netDb.storeRecvTime", "How long it takes to handle the local store part of a dbStore?", "NetworkDatabase", new long[] { 60*1000l, 10*60*1000l });
ctx.statManager().createRateStat("netDb.storeFloodNew", "How long it takes to flood out a newly received entry?", "NetworkDatabase", new long[] { 60*1000l, 10*60*1000l });
ctx.statManager().createRateStat("netDb.storeFloodOld", "How often we receive an old entry?", "NetworkDatabase", new long[] { 60*1000l, 10*60*1000l });
_message = receivedMessage;
_from = from;
_fromHash = fromHash;
@ -62,9 +54,9 @@ public class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
String invalidMessage = null;
boolean wasNew = false;
RouterInfo prevNetDb = null;
Hash key = _message.getKey();
if (_message.getValueType() == DatabaseStoreMessage.KEY_TYPE_LEASESET) {
getContext().statManager().addRateData("netDb.storeLeaseSetHandled", 1, 0);
Hash key = _message.getKey();
if (_log.shouldLog(Log.INFO))
_log.info("Handling dbStore of leaseset " + _message);
//_log.info("Handling dbStore of leasset " + key + " with expiration of "
@ -100,7 +92,6 @@ public class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
}
} else if (_message.getValueType() == DatabaseStoreMessage.KEY_TYPE_ROUTERINFO) {
getContext().statManager().addRateData("netDb.storeRouterInfoHandled", 1, 0);
Hash key = _message.getKey();
if (_log.shouldLog(Log.INFO))
_log.info("Handling dbStore of router " + key + " with publishDate of "
+ new Date(_message.getRouterInfo().getPublished()));
@ -171,6 +162,14 @@ public class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
FloodfillNetworkDatabaseFacade.floodfillEnabled(getContext()) &&
_message.getReplyToken() > 0) {
if (wasNew) {
// DOS prevention
// Note this does not throttle the ack above
if (_facade.shouldThrottleFlood(key)) {
if (_log.shouldLog(Log.WARN))
_log.warn("Too many recent stores, not flooding key: " + key);
getContext().statManager().addRateData("netDb.floodThrottled", 1, 0);
return;
}
long floodBegin = System.currentTimeMillis();
if (_message.getValueType() == DatabaseStoreMessage.KEY_TYPE_LEASESET)
_facade.flood(_message.getLeaseSet());

View File

@ -231,7 +231,7 @@ class KBucketImpl implements KBucket {
/**
* Todo: shuffling here is a hack and doesn't work since
* wwe witched back to a HashSet implementation
* we switched back to a HashSet implementation
*/
public int add(Hash peer) {
_entries.add(peer);

View File

@ -44,7 +44,6 @@ import net.i2p.router.networkdb.DatabaseStoreMessageHandler;
import net.i2p.router.networkdb.PublishLocalRouterInfoJob;
import net.i2p.router.peermanager.PeerProfile;
import net.i2p.util.Log;
import net.i2p.util.ObjectCounter;
/**
* Kademlia based version of the network database
@ -140,8 +139,17 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
_lastExploreNew = 0;
_activeRequests = new HashMap(8);
_enforceNetId = DEFAULT_ENFORCE_NETID;
context.statManager().createRateStat("netDb.lookupLeaseSetDeferred", "how many lookups are deferred for a single leaseSet lookup?", "NetworkDatabase", new long[] { 60*1000, 5*60*1000 });
context.statManager().createRateStat("netDb.exploreKeySet", "how many keys are queued for exploration?", "NetworkDatabase", new long[] { 10*60*1000 });
context.statManager().createRateStat("netDb.lookupLeaseSetDeferred", "how many lookups are deferred for a single leaseSet lookup?", "NetworkDatabase", new long[] { 60*60*1000 });
context.statManager().createRateStat("netDb.exploreKeySet", "how many keys are queued for exploration?", "NetworkDatabase", new long[] { 60*60*1000 });
// following are for StoreJob
context.statManager().createRateStat("netDb.storeRouterInfoSent", "How many routerInfo store messages have we sent?", "NetworkDatabase", new long[] { 60*60*1000l });
context.statManager().createRateStat("netDb.storeLeaseSetSent", "How many leaseSet store messages have we sent?", "NetworkDatabase", new long[] { 60*60*1000l });
context.statManager().createRateStat("netDb.storePeers", "How many peers each netDb must be sent to before success?", "NetworkDatabase", new long[] { 60*60*1000l });
context.statManager().createRateStat("netDb.storeFailedPeers", "How many peers each netDb must be sent to before failing completely?", "NetworkDatabase", new long[] { 60*60*1000l });
context.statManager().createRateStat("netDb.ackTime", "How long does it take for a peer to ack a netDb store?", "NetworkDatabase", new long[] { 60*60*1000l });
context.statManager().createRateStat("netDb.replyTimeout", "How long after a netDb send does the timeout expire (when the peer doesn't reply in time)?", "NetworkDatabase", new long[] { 60*60*1000l });
// following is for RepublishLeaseSetJob
context.statManager().createRateStat("netDb.republishLeaseSetCount", "How often we republish a leaseSet?", "NetworkDatabase", new long[] { 60*60*1000l });
}
@Override
@ -634,6 +642,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
// Iterate through the old failure / success count, copying over the old
// values (if any tunnels overlap between leaseSets). no need to be
// ueberthreadsafe fascists here, since these values are just heuristics
/****** unused
if (rv != null) {
for (int i = 0; i < rv.getLeaseCount(); i++) {
Lease old = rv.getLease(i);
@ -647,6 +656,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
}
}
}
*******/
return rv;
}
@ -910,6 +920,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
return TIMEOUT_MULTIPLIER * (int)responseTime; // give it up to 3x the average response time
}
/** unused (overridden in FNDF) */
public void sendStore(Hash key, DataStructure ds, Job onSuccess, Job onFailure, long sendTimeout, Set toIgnore) {
if ( (ds == null) || (key == null) ) {
if (onFailure != null)

View File

@ -0,0 +1,70 @@
package net.i2p.router.networkdb.kademlia;
import net.i2p.data.Hash;
import net.i2p.data.TunnelId;
import net.i2p.util.ObjectCounter;
import net.i2p.util.SimpleScheduler;
import net.i2p.util.SimpleTimer;
/**
* Count how often we have recently received a lookup request with
* the reply specified to go to a peer/TunnelId pair.
* This offers basic DOS protection but is not a complete solution.
* The reply peer/tunnel could be spoofed, for example.
* And a requestor could have up to 6 reply tunnels.
*
* @since 0.7.11
*/
class LookupThrottler {
private ObjectCounter<ReplyTunnel> counter;
/** the id of this is -1 */
private static final TunnelId DUMMY_ID = new TunnelId();
/** this seems like plenty */
private static final int MAX_LOOKUPS = 30;
private static final long CLEAN_TIME = 60*1000;
LookupThrottler() {
this.counter = new ObjectCounter();
SimpleScheduler.getInstance().addPeriodicEvent(new Cleaner(), CLEAN_TIME);
}
/**
* increments before checking
* @param key non-null
* @param id null if for direct lookups
*/
boolean shouldThrottle(Hash key, TunnelId id) {
return this.counter.increment(new ReplyTunnel(key, id)) > MAX_LOOKUPS;
}
private class Cleaner implements SimpleTimer.TimedEvent {
public void timeReached() {
LookupThrottler.this.counter.clear();
}
}
/** yes, we could have a two-level lookup, or just do h.tostring() + id.tostring() */
private static class ReplyTunnel {
public Hash h;
public TunnelId id;
ReplyTunnel(Hash h, TunnelId id) {
this.h = h;
if (id != null)
this.id = id;
else
this.id = DUMMY_ID;
}
@Override
public boolean equals(Object obj) {
return this.h.equals(((ReplyTunnel)obj).h) &&
this.id.equals(((ReplyTunnel)obj).id);
}
@Override
public int hashCode() {
return this.h.hashCode() + this.id.hashCode();
}
}
}

View File

@ -411,6 +411,10 @@ class PersistentDataStore extends TransientDataStore {
try {
// persist = false so we don't write what we just read
_facade.store(ri.getIdentity().getHash(), ri, false);
// when heardAbout() was removed from TransientDataStore, it broke
// profile bootstrapping for new routers,
// so add it here.
getContext().profileManager().heardAbout(ri.getIdentity().getHash(), ri.getPublished());
} catch (IllegalArgumentException iae) {
_log.info("Refused locally loaded routerInfo - deleting");
corrupt = true;

View File

@ -36,7 +36,6 @@ public class RepublishLeaseSetJob extends JobImpl {
_dest = destHash;
_lastPublished = 0;
//getTiming().setStartAfter(ctx.clock().now()+REPUBLISH_LEASESET_DELAY);
getContext().statManager().createRateStat("netDb.republishLeaseSetCount", "How often we republish a leaseSet?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
}
public String getName() { return "Republish a local leaseSet"; }
public void runJob() {

View File

@ -73,12 +73,6 @@ class StoreJob extends JobImpl {
DataStructure data, Job onSuccess, Job onFailure, long timeoutMs, Set<Hash> toSkip) {
super(context);
_log = context.logManager().getLog(StoreJob.class);
getContext().statManager().createRateStat("netDb.storeRouterInfoSent", "How many routerInfo store messages have we sent?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.storeLeaseSetSent", "How many leaseSet store messages have we sent?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.storePeers", "How many peers each netDb must be sent to before success?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.storeFailedPeers", "How many peers each netDb must be sent to before failing completely?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.ackTime", "How long does it take for a peer to ack a netDb store?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.replyTimeout", "How long after a netDb send does the timeout expire (when the peer doesn't reply in time)?", "NetworkDatabase", new long[] { 60*1000, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
_facade = facade;
_state = new StoreState(getContext(), key, data, toSkip);
_onSuccess = onSuccess;
@ -448,6 +442,11 @@ class StoreJob extends JobImpl {
} else {
sent = msg;
_state.addPending(to);
// now that almost all floodfills are at 0.7.10,
// just refuse to store unencrypted to older ones.
_state.replyTimeout(to);
getContext().jobQueue().addJob(new WaitJob(getContext()));
return;
}
SendSuccessJob onReply = new SendSuccessJob(getContext(), peer, outTunnel, sent.getMessageSize());

View File

@ -17,6 +17,7 @@ import net.i2p.router.RouterContext;
import net.i2p.util.EepGet;
import net.i2p.util.I2PAppThread;
import net.i2p.util.Log;
import net.i2p.util.SSLEepGet;
/**
* Moved from ReseedHandler in routerconsole. See ReseedChecker for additional comments.
@ -34,10 +35,12 @@ public class Reseeder {
// Reject unreasonably big files, because we download into a ByteArrayOutputStream.
private static final long MAX_RESEED_RESPONSE_SIZE = 8 * 1024 * 1024;
private static final String DEFAULT_SEED_URL = "http://netdb.i2p2.de/,http://b.netdb.i2p2.de/,http://reseed.i2p-projekt.de/";
private static final String DEFAULT_SEED_URL = "http://a.netdb.i2p2.de/,http://b.netdb.i2p2.de/,http://reseed.i2p-projekt.de/";
private static final String PROP_INPROGRESS = "net.i2p.router.web.ReseedHandler.reseedInProgress";
private static final String PROP_ERROR = "net.i2p.router.web.ReseedHandler.errorMessage";
private static final String PROP_STATUS = "net.i2p.router.web.ReseedHandler.statusMessage";
public static final String PROP_PROXY_HOST = "router.reseedProxyHost";
public static final String PROP_PROXY_PORT = "router.reseedProxyPort";
public Reseeder(RouterContext ctx) {
_context = ctx;
@ -63,6 +66,8 @@ public class Reseeder {
/** Todo: translate the messages sent via PROP_STATUS */
public class ReseedRunner implements Runnable, EepGet.StatusListener {
private boolean _isRunning;
private String _proxyHost;
private int _proxyPort;
public ReseedRunner() {
_isRunning = false;
@ -71,6 +76,8 @@ public class Reseeder {
public boolean isRunning() { return _isRunning; }
public void run() {
_isRunning = true;
_proxyHost = _context.getProperty(PROP_PROXY_HOST);
_proxyPort = _context.getProperty(PROP_PROXY_PORT, -1);
System.out.println("Reseed start");
reseed(false);
System.out.println("Reseed complete");
@ -237,9 +244,15 @@ public class Reseeder {
private byte[] readURL(URL url) throws Exception {
ByteArrayOutputStream baos = new ByteArrayOutputStream(4*1024);
// Do a non-proxied eepget into our ByteArrayOutputStream with 0 retries
EepGet get = new EepGet( I2PAppContext.getGlobalContext(), false, null, -1, 0, 0, MAX_RESEED_RESPONSE_SIZE,
null, baos, url.toString(), false, null, null);
EepGet get;
if (url.toString().startsWith("https")) {
get = new SSLEepGet(I2PAppContext.getGlobalContext(), baos, url.toString());
} else {
// Do a (probably) non-proxied eepget into our ByteArrayOutputStream with 0 retries
boolean shouldProxy = _proxyHost != null && _proxyHost.length() > 0 && _proxyPort > 0;
get = new EepGet(I2PAppContext.getGlobalContext(), shouldProxy, _proxyHost, _proxyPort, 0, 0, MAX_RESEED_RESPONSE_SIZE,
null, baos, url.toString(), false, null, null);
}
get.addStatusListener(ReseedRunner.this);
if (get.fetch()) return baos.toByteArray(); else return null;
}

View File

@ -311,6 +311,17 @@ public class ProfileManagerImpl implements ProfileManager {
if (data == null) return;
data.setLastHeardAbout(_context.clock().now());
}
/**
* Note that the local router received a reference to the given peer
* at a certain time. Only update the time if newer.
*/
public void heardAbout(Hash peer, long when) {
PeerProfile data = getProfile(peer);
if (data == null) return;
if (when > data.getLastHeardAbout())
data.setLastHeardAbout(when);
}
/**
* Note that the router received a message from the given peer on the specified

View File

@ -70,9 +70,9 @@ public class ClientAppConfig {
/*
* Go through the properties, and return a List of ClientAppConfig structures
*/
public static List getClientApps(RouterContext ctx) {
public static List<ClientAppConfig> getClientApps(RouterContext ctx) {
Properties clientApps = getClientAppProps(ctx);
List rv = new ArrayList(5);
List<ClientAppConfig> rv = new ArrayList(8);
int i = 0;
while (true) {
String className = clientApps.getProperty(PREFIX + i + ".main");

View File

@ -56,7 +56,8 @@ public class CreateRouterInfoJob extends JobImpl {
stats.setProperty(RouterInfo.PROP_NETWORK_ID, Router.NETWORK_ID+"");
getContext().router().addCapabilities(info);
info.setOptions(stats);
info.setPeers(new HashSet());
// not necessary, in constructor
//info.setPeers(new HashSet());
info.setPublished(getCurrentPublishDate(getContext()));
RouterIdentity ident = new RouterIdentity();
Certificate cert = getContext().router().createCertificate();

View File

@ -47,6 +47,9 @@ public class WorkingDir {
private final static String PROP_WORKING_DIR = "i2p.dir.config";
private final static String WORKING_DIR_DEFAULT_WINDOWS = "I2P";
private final static String WORKING_DIR_DEFAULT = ".i2p";
private final static String WORKING_DIR_DEFAULT_DAEMON = "i2p-config";
/** we do a couple of things differently if this is the username */
private final static String DAEMON_USER = "i2psvc";
/**
* Only call this once on router invocation.
@ -70,7 +73,10 @@ public class WorkingDir {
home = appdata;
dirf = new File(home, WORKING_DIR_DEFAULT_WINDOWS);
} else {
dirf = new File(home, WORKING_DIR_DEFAULT);
if (DAEMON_USER.equals(System.getProperty("user.name")))
dirf = new File(home, WORKING_DIR_DEFAULT_DAEMON);
else
dirf = new File(home, WORKING_DIR_DEFAULT);
}
}
@ -194,11 +200,15 @@ public class WorkingDir {
out = new PrintWriter(new BufferedWriter(new OutputStreamWriter(new FileOutputStream(newFile), "UTF-8")));
out.println("# Modified by I2P User dir migration script");
String s = null;
boolean isDaemon = DAEMON_USER.equals(System.getProperty("user.name"));
while ((s = DataHelper.readLine(in)) != null) {
if (s.endsWith("=\"eepsite/jetty.xml\"")) {
s = s.replace("=\"eepsite/jetty.xml\"", "=\"" + todir.getAbsolutePath() +
File.separatorChar + "eepsite" +
File.separatorChar + "jetty.xml\"");
} else if (isDaemon && s.equals("clientApp.4.startOnLoad=true")) {
// disable browser launch for daemon
s = "clientApp.4.startOnLoad=false";
}
out.println(s);
}

View File

@ -77,7 +77,8 @@ public class CommSystemFacadeImpl extends CommSystemFacade {
public boolean haveHighOutboundCapacity() { return (_manager == null ? false : _manager.haveHighOutboundCapacity()); }
/**
* Framed average clock skew of connected peers in seconds, or the clock offset if we cannot answer.
* @param percentToInclude 1-100
* @return Framed average clock skew of connected peers in seconds, or the clock offset if we cannot answer.
* Average is calculated over the middle "percentToInclude" peers.
*/
@Override
@ -87,21 +88,23 @@ public class CommSystemFacadeImpl extends CommSystemFacade {
return Long.valueOf(_context.clock().getOffset() / 1000);
}
Vector skews = _manager.getClockSkews();
if (skews == null) {
return Long.valueOf(_context.clock().getOffset() / 1000);
}
if (skews.size() < 5) {
if (skews == null ||
skews.size() <= 0 ||
(skews.size() < 5 && _context.clock().getUpdatedSuccessfully())) {
return Long.valueOf(_context.clock().getOffset() / 1000);
}
// Going to calculate, sort them
Collections.sort(skews);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Clock skews: " + skews);
// Calculate frame size
int frameSize = Math.min((skews.size() * percentToInclude / 100), 2);
int frameSize = Math.max((skews.size() * percentToInclude / 100), 1);
int first = (skews.size() / 2) - (frameSize / 2);
int last = (skews.size() / 2) + (frameSize / 2);
int last = Math.min((skews.size() / 2) + (frameSize / 2), skews.size() - 1);
// Sum skew values
long sum = 0;
for (int i = first; i < last; i++) {
for (int i = first; i <= last; i++) {
long value = ((Long) (skews.get(i))).longValue();
if (_log.shouldLog(Log.DEBUG))
_log.debug("Adding clock skew " + i + " valued " + value + " s.");

View File

@ -365,12 +365,20 @@ public class EstablishState {
// the skew is not authenticated yet, but it is certainly fatal to
// the establishment, so fail hard if appropriate
long diff = 1000*Math.abs(_tsA-_tsB);
if (diff >= Router.CLOCK_FUDGE_FACTOR) {
if (!_context.clock().getUpdatedSuccessfully()) {
// Adjust the clock one time in desperation
_context.clock().setOffset(1000 * (_tsB - _tsA), true);
_tsA = _tsB;
if (diff != 0)
_log.error("NTP failure, NTCP adjusting clock by " + DataHelper.formatDuration(diff));
} else if (diff >= Router.CLOCK_FUDGE_FACTOR) {
_context.statManager().addRateData("ntcp.invalidOutboundSkew", diff, 0);
_transport.markReachable(_con.getRemotePeer().calculateHash(), false);
// Only shitlist if we know what time it is
_context.shitlist().shitlistRouter(DataHelper.formatDuration(diff),
_con.getRemotePeer().calculateHash(),
_x("Excessive clock skew: {0}"));
_transport.setLastBadSkew(_tsA- _tsB);
fail("Clocks too skewed (" + diff + " ms)", null, true);
return;
} else if (_log.shouldLog(Log.DEBUG)) {
@ -570,12 +578,21 @@ public class EstablishState {
_log.debug(prefix() + "verification successful for " + _con);
long diff = 1000*Math.abs(tsA-_tsB);
if (diff >= Router.CLOCK_FUDGE_FACTOR) {
if (!_context.clock().getUpdatedSuccessfully()) {
// Adjust the clock one time in desperation
// This isn't very likely, outbound will do it first
_context.clock().setOffset(1000 * (_tsB - tsA), true);
tsA = _tsB;
if (diff != 0)
_log.error("NTP failure, NTCP adjusting clock by " + DataHelper.formatDuration(diff));
} else if (diff >= Router.CLOCK_FUDGE_FACTOR) {
_context.statManager().addRateData("ntcp.invalidInboundSkew", diff, 0);
_transport.markReachable(alice.calculateHash(), true);
// Only shitlist if we know what time it is
_context.shitlist().shitlistRouter(DataHelper.formatDuration(diff),
alice.calculateHash(),
_x("Excessive clock skew: {0}"));
_transport.setLastBadSkew(tsA- _tsB);
fail("Clocks too skewed (" + diff + " ms)", null, true);
return;
} else if (_log.shouldLog(Log.DEBUG)) {

View File

@ -418,6 +418,8 @@ public class EventPumper implements Runnable {
try { chan.close(); } catch (IOException ioe) { }
return;
}
// BUGFIX for firewalls. --Sponge
chan.socket().setKeepAlive(true);
SelectionKey ckey = chan.register(_selector, SelectionKey.OP_READ);
NTCPConnection con = new NTCPConnection(_context, _transport, chan, ckey);
@ -436,6 +438,8 @@ public class EventPumper implements Runnable {
if (_log.shouldLog(Log.DEBUG))
_log.debug("processing connect for " + key + " / " + con + ": connected? " + connected);
if (connected) {
// BUGFIX for firewalls. --Sponge
chan.socket().setKeepAlive(true);
con.setKey(key);
con.outboundConnected();
_context.statManager().addRateData("ntcp.connectSuccessful", 1, 0);

View File

@ -53,6 +53,7 @@ public class NTCPTransport extends TransportImpl {
private List _sent;
private NTCPSendFinisher _finisher;
private long _lastBadSkew;
private static final long[] RATES = { 10*60*1000 };
public NTCPTransport(RouterContext ctx) {
@ -382,24 +383,36 @@ public class NTCPTransport extends TransportImpl {
return active;
}
/** @param skew in seconds */
void setLastBadSkew(long skew) {
_lastBadSkew = skew;
}
/**
* Return our peer clock skews on this transport.
* Vector composed of Long, each element representing a peer skew in seconds.
*/
@Override
public Vector getClockSkews() {
public Vector<Long> getClockSkews() {
Vector peers = new Vector();
Vector skews = new Vector();
Vector<NTCPConnection> peers = new Vector();
Vector<Long> skews = new Vector();
synchronized (_conLock) {
peers.addAll(_conByIdent.values());
}
for (Iterator iter = peers.iterator(); iter.hasNext(); ) {
NTCPConnection con = (NTCPConnection)iter.next();
skews.addElement(new Long (con.getClockSkew()));
for (Iterator<NTCPConnection> iter = peers.iterator(); iter.hasNext(); ) {
NTCPConnection con = iter.next();
if (con.isEstablished())
skews.addElement(Long.valueOf(con.getClockSkew()));
}
// If we don't have many peers, maybe it is because of a bad clock, so
// return the last bad skew we got
if (skews.size() < 5 && _lastBadSkew != 0)
skews.addElement(Long.valueOf(_lastBadSkew));
if (_log.shouldLog(Log.DEBUG))
_log.debug("NTCP transport returning " + skews.size() + " peer clock skews.");
return skews;

View File

@ -6,6 +6,7 @@ import java.util.List;
import net.i2p.router.Router;
import net.i2p.router.RouterContext;
import net.i2p.data.DataHelper;
import net.i2p.util.I2PThread;
import net.i2p.util.Log;
@ -417,26 +418,36 @@ public class PacketHandler {
long recvOn = packet.getBegin();
long sendOn = reader.readTimestamp() * 1000;
long skew = recvOn - sendOn;
// update skew whether or not we will be dropping the packet for excessive skew
if (state != null) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Received packet from " + state.getRemoteHostId().toString() + " with skew " + skew);
state.adjustClockSkew(skew);
}
_context.statManager().addRateData("udp.receivePacketSkew", skew, packet.getLifetime());
if (!_context.clock().getUpdatedSuccessfully()) {
// adjust the clock one time in desperation
// this doesn't seem to work for big skews, we never get anything back,
// so we have to wait for NTCP to do it
_context.clock().setOffset(0 - skew, true);
if (skew != 0)
_log.error("NTP failure, UDP adjusting clock by " + DataHelper.formatDuration(Math.abs(skew)));
}
if (skew > GRACE_PERIOD) {
if (_log.shouldLog(Log.WARN))
_log.warn("Packet too far in the past: " + new Date(sendOn/1000) + ": " + packet);
_log.warn("Packet too far in the past: " + new Date(sendOn) + ": " + packet);
_context.statManager().addRateData("udp.droppedInvalidSkew", skew, packet.getExpiration());
return;
} else if (skew < 0 - GRACE_PERIOD) {
if (_log.shouldLog(Log.WARN))
_log.warn("Packet too far in the future: " + new Date(sendOn/1000) + ": " + packet);
_log.warn("Packet too far in the future: " + new Date(sendOn) + ": " + packet);
_context.statManager().addRateData("udp.droppedInvalidSkew", 0-skew, packet.getExpiration());
return;
}
if (state != null) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Received packet from " + state.getRemoteHostId().toString() + " with skew " + skew);
state.adjustClockSkew((short)skew);
}
_context.statManager().addRateData("udp.receivePacketSkew", skew, packet.getLifetime());
//InetAddress fromHost = packet.getPacket().getAddress();
//int fromPort = packet.getPacket().getPort();
//RemoteHostId from = new RemoteHostId(fromHost.getAddress(), fromPort);

View File

@ -59,8 +59,8 @@ public class PeerState {
private boolean _rekeyBeganLocally;
/** when were the current cipher and MAC keys established/rekeyed? */
private long _keyEstablishedTime;
/** how far off is the remote peer from our clock, in seconds? */
private short _clockSkew;
/** how far off is the remote peer from our clock, in milliseconds? */
private long _clockSkew;
/** what is the current receive second, for congestion control? */
private long _currentReceiveSecond;
/** when did we last send them a packet? */
@ -327,8 +327,8 @@ public class PeerState {
public boolean getRekeyBeganLocally() { return _rekeyBeganLocally; }
/** when were the current cipher and MAC keys established/rekeyed? */
public long getKeyEstablishedTime() { return _keyEstablishedTime; }
/** how far off is the remote peer from our clock, in seconds? */
public short getClockSkew() { return ( (short) (_clockSkew / 1000)); }
/** how far off is the remote peer from our clock, in milliseconds? */
public long getClockSkew() { return _clockSkew ; }
/** what is the current receive second, for congestion control? */
public long getCurrentReceiveSecond() { return _currentReceiveSecond; }
/** when did we last send them a packet? */
@ -425,9 +425,9 @@ public class PeerState {
public void setRekeyBeganLocally(boolean local) { _rekeyBeganLocally = local; }
/** when were the current cipher and MAC keys established/rekeyed? */
public void setKeyEstablishedTime(long when) { _keyEstablishedTime = when; }
/** how far off is the remote peer from our clock, in seconds? */
public void adjustClockSkew(short skew) {
_clockSkew = (short)(0.9*(float)_clockSkew + 0.1*(float)skew);
/** how far off is the remote peer from our clock, in milliseconds? */
public void adjustClockSkew(long skew) {
_clockSkew = (long) (0.9*(float)_clockSkew + 0.1*(float)skew);
}
/** what is the current receive second, for congestion control? */
public void setCurrentReceiveSecond(long sec) { _currentReceiveSecond = sec; }

View File

@ -74,6 +74,7 @@ public class UDPPacketReader {
return (_message[_payloadBeginOffset] & (1 << 2)) != 0;
}
/** @return seconds */
public long readTimestamp() {
return DataHelper.fromLong(_message, _payloadBeginOffset + 1, 4);
}

View File

@ -42,9 +42,9 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
private Log _log;
private UDPEndpoint _endpoint;
/** Peer (Hash) to PeerState */
private final Map _peersByIdent;
private final Map<Hash, PeerState> _peersByIdent;
/** RemoteHostId to PeerState */
private final Map _peersByRemoteHost;
private final Map<RemoteHostId, PeerState> _peersByRemoteHost;
private PacketHandler _handler;
private EstablishmentManager _establisher;
private MessageQueue _outboundMessages;
@ -575,7 +575,7 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
*/
PeerState getPeerState(RemoteHostId hostInfo) {
synchronized (_peersByRemoteHost) {
return (PeerState)_peersByRemoteHost.get(hostInfo);
return _peersByRemoteHost.get(hostInfo);
}
}
@ -585,7 +585,7 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
*/
public PeerState getPeerState(Hash remotePeer) {
synchronized (_peersByIdent) {
return (PeerState)_peersByIdent.get(remotePeer);
return _peersByIdent.get(remotePeer);
}
}
@ -664,7 +664,7 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
PeerState oldPeer = null;
if (remotePeer != null) {
synchronized (_peersByIdent) {
oldPeer = (PeerState)_peersByIdent.put(remotePeer, peer);
oldPeer = _peersByIdent.put(remotePeer, peer);
if ( (oldPeer != null) && (oldPeer != peer) ) {
// transfer over the old state/inbound message fragments/etc
peer.loadFrom(oldPeer);
@ -684,7 +684,7 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
if (remoteId == null) return false;
synchronized (_peersByRemoteHost) {
oldPeer = (PeerState)_peersByRemoteHost.put(remoteId, peer);
oldPeer = _peersByRemoteHost.put(remoteId, peer);
if ( (oldPeer != null) && (oldPeer != peer) ) {
// transfer over the old state/inbound message fragments/etc
peer.loadFrom(oldPeer);
@ -883,14 +883,14 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
long now = _context.clock().now();
_context.statManager().addRateData("udp.droppedPeer", now - peer.getLastReceiveTime(), now - peer.getKeyEstablishedTime());
synchronized (_peersByIdent) {
altByIdent = (PeerState)_peersByIdent.remove(peer.getRemotePeer());
altByIdent = _peersByIdent.remove(peer.getRemotePeer());
}
}
RemoteHostId remoteId = peer.getRemoteHostId();
if (remoteId != null) {
synchronized (_peersByRemoteHost) {
altByHost = (PeerState)_peersByRemoteHost.remove(remoteId);
altByHost = _peersByRemoteHost.remove(remoteId);
}
}
@ -1417,8 +1417,8 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
int active = 0;
int inactive = 0;
synchronized (_peersByIdent) {
for (Iterator iter = _peersByIdent.values().iterator(); iter.hasNext(); ) {
PeerState peer = (PeerState)iter.next();
for (Iterator<PeerState> iter = _peersByIdent.values().iterator(); iter.hasNext(); ) {
PeerState peer = iter.next();
if (now-peer.getLastReceiveTime() > 5*60*1000)
inactive++;
else
@ -1434,8 +1434,8 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
int active = 0;
int inactive = 0;
synchronized (_peersByIdent) {
for (Iterator iter = _peersByIdent.values().iterator(); iter.hasNext(); ) {
PeerState peer = (PeerState)iter.next();
for (Iterator<PeerState> iter = _peersByIdent.values().iterator(); iter.hasNext(); ) {
PeerState peer = iter.next();
if (now-peer.getLastSendFullyTime() > 1*60*1000)
inactive++;
else
@ -1461,20 +1461,24 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
* Vector composed of Long, each element representing a peer skew in seconds.
*/
@Override
public Vector getClockSkews() {
public Vector<Long> getClockSkews() {
Vector skews = new Vector();
Vector peers = new Vector();
Vector<Long> skews = new Vector();
Vector<PeerState> peers = new Vector();
synchronized (_peersByIdent) {
peers.addAll(_peersByIdent.values());
}
// If our clock is way off, we may not have many (or any) successful connections,
// so try hard in that case to return good data
boolean includeEverybody = _context.router().getUptime() < 10*60*1000 || peers.size() < 10;
long now = _context.clock().now();
for (Iterator iter = peers.iterator(); iter.hasNext(); ) {
PeerState peer = (PeerState)iter.next();
if (now-peer.getLastReceiveTime() > 60*60*1000) continue; // skip old peers
skews.addElement(new Long (peer.getClockSkew()));
for (Iterator<PeerState> iter = peers.iterator(); iter.hasNext(); ) {
PeerState peer = iter.next();
if ((!includeEverybody) && now - peer.getLastReceiveTime() > 15*60*1000)
continue; // skip old peers
skews.addElement(Long.valueOf(peer.getClockSkew() / 1000));
}
if (_log.shouldLog(Log.DEBUG))
_log.debug("UDP transport returning " + skews.size() + " peer clock skews.");
@ -1813,7 +1817,7 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
public void renderStatusHTML(Writer out, int sortFlags) throws IOException {}
@Override
public void renderStatusHTML(Writer out, String urlBase, int sortFlags) throws IOException {
TreeSet peers = new TreeSet(getComparator(sortFlags));
TreeSet<PeerState> peers = new TreeSet(getComparator(sortFlags));
synchronized (_peersByIdent) {
peers.addAll(_peersByIdent.values());
}
@ -1958,7 +1962,7 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
buf.append("</td>");
buf.append(" <td class=\"cells\" align=\"center\" >");
buf.append(peer.getClockSkew());
buf.append(peer.getClockSkew() / 1000);
buf.append("s</td>");
offsetTotal = offsetTotal + peer.getClockSkew();
@ -2055,7 +2059,7 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
buf.append(formatKBps(bpsIn)).append("/").append(formatKBps(bpsOut));
buf.append("K/s</b></td>");
buf.append(" <td align=\"center\"><b>").append(numPeers > 0 ? DataHelper.formatDuration(uptimeMsTotal/numPeers) : "0s");
buf.append("</b></td> <td align=\"center\"><b>").append(numPeers > 0 ? DataHelper.formatDuration(offsetTotal*1000/numPeers) : "0ms").append("</b></td>\n");
buf.append("</b></td> <td align=\"center\"><b>").append(numPeers > 0 ? DataHelper.formatDuration(offsetTotal/numPeers) : "0ms").append("</b></td>\n");
buf.append(" <td align=\"center\"><b>");
buf.append(numPeers > 0 ? cwinTotal/(numPeers*1024) + "K" : "0K");
buf.append("</b></td> <td>&nbsp;</td>\n");
@ -2276,13 +2280,13 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
}
PeerState pickTestPeer(RemoteHostId dontInclude) {
List peers = null;
List<PeerState> peers = null;
synchronized (_peersByIdent) {
peers = new ArrayList(_peersByIdent.values());
}
Collections.shuffle(peers, _context.random());
for (int i = 0; i < peers.size(); i++) {
PeerState peer = (PeerState)peers.get(i);
PeerState peer = peers.get(i);
if ( (dontInclude != null) && (dontInclude.equals(peer.getRemoteHostId())) )
continue;
RouterInfo peerInfo = _context.netDb().lookupRouterInfoLocally(peer.getRemotePeer());

View File

@ -260,6 +260,7 @@ public class TunnelPool {
/** list of tunnelInfo instances of tunnels currently being built */
public List listPending() { synchronized (_inProgress) { return new ArrayList(_inProgress); } }
/** duplicate of size(), let's pick one */
int getTunnelCount() { synchronized (_tunnels) { return _tunnels.size(); } }
public TunnelPoolSettings getSettings() { return _settings; }
@ -273,6 +274,7 @@ public class TunnelPool {
}
public TunnelPeerSelector getSelector() { return _peerSelector; }
public boolean isAlive() { return _alive; }
/** duplicate of getTunnelCount(), let's pick one */
public int size() {
synchronized (_tunnels) {
return _tunnels.size();

View File

@ -193,6 +193,29 @@ public class TunnelPoolManager implements TunnelManagerFacade {
public int getParticipatingCount() { return _context.tunnelDispatcher().getParticipatingCount(); }
public long getLastParticipatingExpiration() { return _context.tunnelDispatcher().getLastParticipatingExpiration(); }
/**
* @return (number of part. tunnels) / (estimated total number of hops in our expl.+client tunnels)
* 100 max.
* We just use length setting, not variance, for speed
* @since 0.7.10
*/
public double getShareRatio() {
int part = getParticipatingCount();
if (part <= 0)
return 0d;
List<TunnelPool> pools = new ArrayList();
listPools(pools);
int count = 0;
for (int i = 0; i < pools.size(); i++) {
TunnelPool pool = pools.get(i);
count += pool.size() * pool.getSettings().getLength();
}
if (count <= 0)
return 100d;
return Math.min(part / (double) count, 100d);
}
public boolean isValidTunnel(Hash client, TunnelInfo tunnel) {
if (tunnel.getExpiration() < _context.clock().now())
return false;

View File

@ -92,8 +92,9 @@ public class RouterGenerator {
RouterInfo info = new RouterInfo();
try {
info.setAddresses(createAddresses(num));
info.setOptions(new Properties());
info.setPeers(new HashSet());
// not necessary, in constructor
//info.setOptions(new Properties());
//info.setPeers(new HashSet());
info.setPublished(Clock.getInstance().now());
RouterIdentity ident = new RouterIdentity();
BigInteger bv = new BigInteger(""+num);