- Explore even more aggressively at startup
    - Increase SingleSearchJob timeout
    - Increase time at startup for processing DSRM
    - Cleanups, final, javadoc, concurrent
This commit is contained in:
zzz
2011-07-26 13:30:54 +00:00
parent 164b0e5463
commit 1061e86fad
16 changed files with 90 additions and 55 deletions

View File

@ -1,3 +1,13 @@
2011-07-26 zzz
* NetDB:
- Explore even more aggressively at startup
- Increase SingleSearchJob timeout
- Increase time at startup for processing DSRM
- Cleanups, final, javadoc, concurrent
* OutNetMessage: Cleanups
* Tunnels: Change another log error to a warn
* Watchdog: Improve the way it calls for a thread dump
2011-07-22 kytv
* Add a 64bit wrapper to OSX. With 0.8.7 came a fat Mac wrapper with i386 and PPC.
The included wrapper is now quad-fat (i386, ppc, x86_64, and ppc64).

View File

@ -18,7 +18,7 @@ public class RouterVersion {
/** deprecated */
public final static String ID = "Monotone";
public final static String VERSION = CoreVersion.VERSION;
public final static long BUILD = 14;
public final static long BUILD = 15;
/** for example "-test" */
public final static String EXTRA = "";

View File

@ -25,6 +25,7 @@ import net.i2p.util.Log;
* or other criteria to minimize netdb size, but for now we just use _facade's
* validate(), which is a sliding expriation based on netdb size.
*
* @deprecated unused - see comments in KNDF
*/
class ExpireRoutersJob extends JobImpl {
private final Log _log;

View File

@ -22,6 +22,7 @@ import net.i2p.util.Log;
* yet full, attempting to keep a pool of keys we can explore with (at least one
* per bucket)
*
* @deprecated unused, see comments in KNDF
*/
class ExploreKeySelectorJob extends JobImpl {
private Log _log;

View File

@ -12,15 +12,16 @@ import net.i2p.router.RouterContext;
import net.i2p.util.Log;
class FloodOnlyLookupMatchJob extends JobImpl implements ReplyJob {
private Log _log;
private FloodOnlySearchJob _search;
private final Log _log;
private final FloodOnlySearchJob _search;
private DatabaseSearchReplyMessage _dsrm;
public FloodOnlyLookupMatchJob(RouterContext ctx, FloodOnlySearchJob job) {
super(ctx);
_log = ctx.logManager().getLog(getClass());
_search = job;
_dsrm = null;
}
public void runJob() {
if ( (getContext().netDb().lookupLeaseSetLocally(_search.getKey()) != null) ||
(getContext().netDb().lookupRouterInfoLocally(_search.getKey()) != null) ) {
@ -44,7 +45,9 @@ class FloodOnlyLookupMatchJob extends JobImpl implements ReplyJob {
_search.failed();
}
}
public String getName() { return "NetDb flood search (phase 1) match"; }
public void setMessage(I2NPMessage message) {
if (message instanceof DatabaseSearchReplyMessage) {
// a dsrm is only passed in when there are no more lookups remaining

View File

@ -8,20 +8,23 @@ import net.i2p.router.RouterContext;
import net.i2p.util.Log;
class FloodOnlyLookupSelector implements MessageSelector {
private RouterContext _context;
private FloodOnlySearchJob _search;
private final RouterContext _context;
private final FloodOnlySearchJob _search;
private boolean _matchFound;
private Log _log;
private final Log _log;
public FloodOnlyLookupSelector(RouterContext ctx, FloodOnlySearchJob search) {
_context = ctx;
_search = search;
_log = ctx.logManager().getLog(getClass());
_matchFound = false;
}
public boolean continueMatching() {
return _search.getLookupsRemaining() > 0 && !_matchFound && _context.clock().now() < getExpiration();
}
public long getExpiration() { return (_matchFound ? -1 : _search.getExpiration()); }
public boolean isMatch(I2NPMessage message) {
if (message == null) return false;
if (message instanceof DatabaseStoreMessage) {

View File

@ -5,17 +5,20 @@ import net.i2p.router.RouterContext;
import net.i2p.util.Log;
class FloodOnlyLookupTimeoutJob extends JobImpl {
private FloodSearchJob _search;
private Log _log;
private final FloodSearchJob _search;
private final Log _log;
public FloodOnlyLookupTimeoutJob(RouterContext ctx, FloodOnlySearchJob job) {
super(ctx);
_search = job;
_log = ctx.logManager().getLog(getClass());
}
public void runJob() {
if (_log.shouldLog(Log.INFO))
_log.info(_search.getJobId() + ": search timed out");
_search.failed();
}
public String getName() { return "NetDb flood search (phase 1) timeout"; }
}

View File

@ -38,14 +38,14 @@ import net.i2p.util.Log;
*/
class FloodOnlySearchJob extends FloodSearchJob {
private volatile boolean _dead;
private long _created;
private final long _created;
private boolean _shouldProcessDSRM;
private final HashSet<Hash> _unheardFrom;
private final List<OutNetMessage> _out;
protected MessageSelector _replySelector;
protected ReplyJob _onReply;
protected Job _onTimeout;
protected final MessageSelector _replySelector;
protected final ReplyJob _onReply;
protected final Job _onTimeout;
public FloodOnlySearchJob(RouterContext ctx, FloodfillNetworkDatabaseFacade facade, Hash key, Job onFind, Job onFailed, int timeoutMs, boolean isLease) {
super(ctx, facade, key, onFind, onFailed, timeoutMs, isLease);
@ -61,7 +61,6 @@ class FloodOnlySearchJob extends FloodSearchJob {
_onReply = new FloodOnlyLookupMatchJob(getContext(), this);
_onTimeout = new FloodOnlyLookupTimeoutJob(getContext(), this);
_created = System.currentTimeMillis();
_shouldProcessDSRM = false;
}
public long getCreated() { return _created; }
@ -90,7 +89,7 @@ class FloodOnlySearchJob extends FloodSearchJob {
// or the global network routing key just changed (which is set at statrtup,
// so this includes the first few minutes of uptime)
_shouldProcessDSRM = floodfillPeers.size() < MIN_FOR_NO_DSRM ||
getContext().routingKeyGenerator().getLastChanged() > getContext().clock().now() - 30*60*1000;
getContext().routingKeyGenerator().getLastChanged() > getContext().clock().now() - 60*60*1000;
if (floodfillPeers.isEmpty()) {
// ask anybody, they may not return the answer but they will return a few ff peers we can go look up,

View File

@ -30,14 +30,14 @@ import net.i2p.util.Log;
*/
public class FloodSearchJob extends JobImpl {
protected Log _log;
protected FloodfillNetworkDatabaseFacade _facade;
protected Hash _key;
protected final FloodfillNetworkDatabaseFacade _facade;
protected final Hash _key;
protected final List<Job> _onFind;
protected final List<Job> _onFailed;
protected long _expiration;
protected int _timeoutMs;
protected long _origExpiration;
protected boolean _isLease;
protected final boolean _isLease;
protected volatile int _lookupsRemaining;
protected volatile boolean _dead;

View File

@ -44,7 +44,6 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
public FloodfillNetworkDatabaseFacade(RouterContext context) {
super(context);
_activeFloodQueries = new HashMap();
_floodfillEnabled = false;
_verifiesInProgress = new ConcurrentHashSet(8);
_alwaysQuery = _context.getProperty("netDb.alwaysQuery");
@ -406,8 +405,8 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
}
private class DropLookupFailedJob extends JobImpl {
private Hash _peer;
private RouterInfo _info;
private final Hash _peer;
private final RouterInfo _info;
public DropLookupFailedJob(RouterContext ctx, Hash peer, RouterInfo info) {
super(ctx);
@ -419,9 +418,10 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
dropAfterLookupFailed(_peer, _info);
}
}
private class DropLookupFoundJob extends JobImpl {
private Hash _peer;
private RouterInfo _info;
private final Hash _peer;
private final RouterInfo _info;
public DropLookupFoundJob(RouterContext ctx, Hash peer, RouterInfo info) {
super(ctx);

View File

@ -25,7 +25,8 @@ import net.i2p.util.Log;
*
*/
class FloodfillStoreJob extends StoreJob {
private FloodfillNetworkDatabaseFacade _facade;
private final FloodfillNetworkDatabaseFacade _facade;
/**
* Send a data structure to the floodfills
*
@ -45,6 +46,7 @@ class FloodfillStoreJob extends StoreJob {
@Override
protected int getParallelization() { return 1; }
@Override
protected int getRedundancy() { return 1; }

View File

@ -25,15 +25,15 @@ import net.i2p.util.Log;
*
*/
public class FloodfillVerifyStoreJob extends JobImpl {
private Log _log;
private Hash _key;
private final Log _log;
private final Hash _key;
private Hash _target;
private Hash _sentTo;
private FloodfillNetworkDatabaseFacade _facade;
private final Hash _sentTo;
private final FloodfillNetworkDatabaseFacade _facade;
private long _expiration;
private long _sendTime;
private long _published;
private boolean _isRouterInfo;
private final boolean _isRouterInfo;
private MessageWrapper.WrappedMessage _wrappedMessage;
private final Set<Hash> _ignore;
@ -64,6 +64,7 @@ public class FloodfillVerifyStoreJob extends JobImpl {
getContext().statManager().createRateStat("netDb.floodfillVerifyFail", "How long a floodfill verify takes when it fails", "NetworkDatabase", new long[] { 60*60*1000 });
getContext().statManager().createRateStat("netDb.floodfillVerifyTimeout", "How long a floodfill verify takes when it times out", "NetworkDatabase", new long[] { 60*60*1000 });
}
public String getName() { return "Verify netdb store"; }
/**

View File

@ -43,6 +43,7 @@ import net.i2p.router.networkdb.DatabaseLookupMessageHandler;
import net.i2p.router.networkdb.DatabaseStoreMessageHandler;
import net.i2p.router.networkdb.PublishLocalRouterInfoJob;
import net.i2p.router.peermanager.PeerProfile;
import net.i2p.util.ConcurrentHashSet;
import net.i2p.util.Log;
/**
@ -55,7 +56,8 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
private DataStore _ds; // hash to DataStructure mapping, persisted when necessary
/** where the data store is pushing the data */
private String _dbDir;
private final Set<Hash> _exploreKeys = new HashSet(64); // set of Hash objects that we should search on (to fill up a bucket, not to get data)
// set of Hash objects that we should search on (to fill up a bucket, not to get data)
private final Set<Hash> _exploreKeys = new ConcurrentHashSet(64);
private boolean _initialized;
/** Clock independent time of when we started up */
private long _started;
@ -167,26 +169,23 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
_exploreJob.updateExploreSchedule();
}
/** @return unmodifiable set */
public Set<Hash> getExploreKeys() {
if (!_initialized) return null;
synchronized (_exploreKeys) {
return new HashSet(_exploreKeys);
}
if (!_initialized)
return Collections.EMPTY_SET;
return Collections.unmodifiableSet(_exploreKeys);
}
public void removeFromExploreKeys(Set toRemove) {
public void removeFromExploreKeys(Set<Hash> toRemove) {
if (!_initialized) return;
synchronized (_exploreKeys) {
_exploreKeys.removeAll(toRemove);
_context.statManager().addRateData("netDb.exploreKeySet", _exploreKeys.size(), 0);
}
_exploreKeys.removeAll(toRemove);
_context.statManager().addRateData("netDb.exploreKeySet", _exploreKeys.size(), 0);
}
public void queueForExploration(Set keys) {
public void queueForExploration(Set<Hash> keys) {
if (!_initialized) return;
synchronized (_exploreKeys) {
_exploreKeys.addAll(keys);
_context.statManager().addRateData("netDb.exploreKeySet", _exploreKeys.size(), 0);
}
_exploreKeys.addAll(keys);
_context.statManager().addRateData("netDb.exploreKeySet", _exploreKeys.size(), 0);
}
public void shutdown() {
@ -215,7 +214,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
else
_enforceNetId = DEFAULT_ENFORCE_NETID;
_ds.restart();
synchronized (_exploreKeys) { _exploreKeys.clear(); }
_exploreKeys.clear();
_initialized = true;
@ -474,9 +473,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
} else {
fail(key);
// this was an interesting key, so either refetch it or simply explore with it
synchronized (_exploreKeys) {
_exploreKeys.add(key);
}
_exploreKeys.add(key);
return null;
}
} else {
@ -709,6 +706,8 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
* given what we know now.
*
* TODO this is called several times, only check the key and signature once
*
* @return reason why the entry is not valid, or null if it is valid
*/
String validate(Hash key, RouterInfo routerInfo) throws IllegalArgumentException {
long now = _context.clock().now();

View File

@ -20,13 +20,15 @@ import net.i2p.util.Log;
*
*/
class SingleLookupJob extends JobImpl {
private Log _log;
private DatabaseSearchReplyMessage _dsrm;
private final Log _log;
private final DatabaseSearchReplyMessage _dsrm;
public SingleLookupJob(RouterContext ctx, DatabaseSearchReplyMessage dsrm) {
super(ctx);
_log = ctx.logManager().getLog(getClass());
_dsrm = dsrm;
}
public void runJob() {
Hash from = _dsrm.getFromHash();
for (int i = 0; i < _dsrm.getNumReplies(); i++) {
@ -43,5 +45,6 @@ class SingleLookupJob extends JobImpl {
getContext().jobQueue().addJob(new SingleSearchJob(getContext(), peer, peer));
}
}
public String getName() { return "NetDb process DSRM"; }
}

View File

@ -14,18 +14,24 @@ import net.i2p.util.Log;
*
*/
class SingleSearchJob extends FloodOnlySearchJob {
Hash _to;
OutNetMessage _onm;
private final Hash _to;
private OutNetMessage _onm;
private static final int TIMEOUT = 8*1000;
public SingleSearchJob(RouterContext ctx, Hash key, Hash to) {
// warning, null FloodfillNetworkDatabaseFacade ...
// define our own failed() and success() below so _facade isn't used.
super(ctx, null, key, null, null, 5*1000, false);
super(ctx, null, key, null, null, TIMEOUT, false);
_to = to;
}
@Override
public String getName() { return "NetDb search key from DSRM"; }
@Override
public boolean shouldProcessDSRM() { return false; } // don't loop
@Override
public void runJob() {
_onm = getContext().messageRegistry().registerPending(_replySelector, _onReply, _onTimeout, _timeoutMs);
@ -46,10 +52,12 @@ class SingleSearchJob extends FloodOnlySearchJob {
getContext().tunnelDispatcher().dispatchOutbound(dlm, outTunnel.getSendTunnelId(0), _to);
_lookupsRemaining = 1;
}
@Override
void failed() {
getContext().messageRegistry().unregisterPending(_onm);
}
@Override
void success() {}
}

View File

@ -53,7 +53,9 @@ class StartExplorersJob extends JobImpl {
getContext().router().gracefulShutdownInProgress())) {
int num = MAX_PER_RUN;
if (_facade.getDataStore().size() < LOW_ROUTERS)
num *= 2;
num *= 3;
if (getContext().router().getUptime() < STARTUP_TIME)
num *= 3;
Set<Hash> toExplore = selectKeysToExplore(num);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Keys to explore during this run: " + toExplore);