forked from I2P_Developers/i2p.i2p
* NetDB:
- Explore even more aggressively at startup - Increase SingleSearchJob timeout - Increase time at startup for processing DSRM - Cleanups, final, javadoc, concurrent
This commit is contained in:
10
history.txt
10
history.txt
@ -1,3 +1,13 @@
|
|||||||
|
2011-07-26 zzz
|
||||||
|
* NetDB:
|
||||||
|
- Explore even more aggressively at startup
|
||||||
|
- Increase SingleSearchJob timeout
|
||||||
|
- Increase time at startup for processing DSRM
|
||||||
|
- Cleanups, final, javadoc, concurrent
|
||||||
|
* OutNetMessage: Cleanups
|
||||||
|
* Tunnels: Change another log error to a warn
|
||||||
|
* Watchdog: Improve the way it calls for a thread dump
|
||||||
|
|
||||||
2011-07-22 kytv
|
2011-07-22 kytv
|
||||||
* Add a 64bit wrapper to OSX. With 0.8.7 came a fat Mac wrapper with i386 and PPC.
|
* Add a 64bit wrapper to OSX. With 0.8.7 came a fat Mac wrapper with i386 and PPC.
|
||||||
The included wrapper is now quad-fat (i386, ppc, x86_64, and ppc64).
|
The included wrapper is now quad-fat (i386, ppc, x86_64, and ppc64).
|
||||||
|
@ -18,7 +18,7 @@ public class RouterVersion {
|
|||||||
/** deprecated */
|
/** deprecated */
|
||||||
public final static String ID = "Monotone";
|
public final static String ID = "Monotone";
|
||||||
public final static String VERSION = CoreVersion.VERSION;
|
public final static String VERSION = CoreVersion.VERSION;
|
||||||
public final static long BUILD = 14;
|
public final static long BUILD = 15;
|
||||||
|
|
||||||
/** for example "-test" */
|
/** for example "-test" */
|
||||||
public final static String EXTRA = "";
|
public final static String EXTRA = "";
|
||||||
|
@ -25,6 +25,7 @@ import net.i2p.util.Log;
|
|||||||
* or other criteria to minimize netdb size, but for now we just use _facade's
|
* or other criteria to minimize netdb size, but for now we just use _facade's
|
||||||
* validate(), which is a sliding expriation based on netdb size.
|
* validate(), which is a sliding expriation based on netdb size.
|
||||||
*
|
*
|
||||||
|
* @deprecated unused - see comments in KNDF
|
||||||
*/
|
*/
|
||||||
class ExpireRoutersJob extends JobImpl {
|
class ExpireRoutersJob extends JobImpl {
|
||||||
private final Log _log;
|
private final Log _log;
|
||||||
|
@ -22,6 +22,7 @@ import net.i2p.util.Log;
|
|||||||
* yet full, attempting to keep a pool of keys we can explore with (at least one
|
* yet full, attempting to keep a pool of keys we can explore with (at least one
|
||||||
* per bucket)
|
* per bucket)
|
||||||
*
|
*
|
||||||
|
* @deprecated unused, see comments in KNDF
|
||||||
*/
|
*/
|
||||||
class ExploreKeySelectorJob extends JobImpl {
|
class ExploreKeySelectorJob extends JobImpl {
|
||||||
private Log _log;
|
private Log _log;
|
||||||
|
@ -12,15 +12,16 @@ import net.i2p.router.RouterContext;
|
|||||||
import net.i2p.util.Log;
|
import net.i2p.util.Log;
|
||||||
|
|
||||||
class FloodOnlyLookupMatchJob extends JobImpl implements ReplyJob {
|
class FloodOnlyLookupMatchJob extends JobImpl implements ReplyJob {
|
||||||
private Log _log;
|
private final Log _log;
|
||||||
private FloodOnlySearchJob _search;
|
private final FloodOnlySearchJob _search;
|
||||||
private DatabaseSearchReplyMessage _dsrm;
|
private DatabaseSearchReplyMessage _dsrm;
|
||||||
|
|
||||||
public FloodOnlyLookupMatchJob(RouterContext ctx, FloodOnlySearchJob job) {
|
public FloodOnlyLookupMatchJob(RouterContext ctx, FloodOnlySearchJob job) {
|
||||||
super(ctx);
|
super(ctx);
|
||||||
_log = ctx.logManager().getLog(getClass());
|
_log = ctx.logManager().getLog(getClass());
|
||||||
_search = job;
|
_search = job;
|
||||||
_dsrm = null;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public void runJob() {
|
public void runJob() {
|
||||||
if ( (getContext().netDb().lookupLeaseSetLocally(_search.getKey()) != null) ||
|
if ( (getContext().netDb().lookupLeaseSetLocally(_search.getKey()) != null) ||
|
||||||
(getContext().netDb().lookupRouterInfoLocally(_search.getKey()) != null) ) {
|
(getContext().netDb().lookupRouterInfoLocally(_search.getKey()) != null) ) {
|
||||||
@ -44,7 +45,9 @@ class FloodOnlyLookupMatchJob extends JobImpl implements ReplyJob {
|
|||||||
_search.failed();
|
_search.failed();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public String getName() { return "NetDb flood search (phase 1) match"; }
|
public String getName() { return "NetDb flood search (phase 1) match"; }
|
||||||
|
|
||||||
public void setMessage(I2NPMessage message) {
|
public void setMessage(I2NPMessage message) {
|
||||||
if (message instanceof DatabaseSearchReplyMessage) {
|
if (message instanceof DatabaseSearchReplyMessage) {
|
||||||
// a dsrm is only passed in when there are no more lookups remaining
|
// a dsrm is only passed in when there are no more lookups remaining
|
||||||
|
@ -8,20 +8,23 @@ import net.i2p.router.RouterContext;
|
|||||||
import net.i2p.util.Log;
|
import net.i2p.util.Log;
|
||||||
|
|
||||||
class FloodOnlyLookupSelector implements MessageSelector {
|
class FloodOnlyLookupSelector implements MessageSelector {
|
||||||
private RouterContext _context;
|
private final RouterContext _context;
|
||||||
private FloodOnlySearchJob _search;
|
private final FloodOnlySearchJob _search;
|
||||||
private boolean _matchFound;
|
private boolean _matchFound;
|
||||||
private Log _log;
|
private final Log _log;
|
||||||
|
|
||||||
public FloodOnlyLookupSelector(RouterContext ctx, FloodOnlySearchJob search) {
|
public FloodOnlyLookupSelector(RouterContext ctx, FloodOnlySearchJob search) {
|
||||||
_context = ctx;
|
_context = ctx;
|
||||||
_search = search;
|
_search = search;
|
||||||
_log = ctx.logManager().getLog(getClass());
|
_log = ctx.logManager().getLog(getClass());
|
||||||
_matchFound = false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public boolean continueMatching() {
|
public boolean continueMatching() {
|
||||||
return _search.getLookupsRemaining() > 0 && !_matchFound && _context.clock().now() < getExpiration();
|
return _search.getLookupsRemaining() > 0 && !_matchFound && _context.clock().now() < getExpiration();
|
||||||
}
|
}
|
||||||
|
|
||||||
public long getExpiration() { return (_matchFound ? -1 : _search.getExpiration()); }
|
public long getExpiration() { return (_matchFound ? -1 : _search.getExpiration()); }
|
||||||
|
|
||||||
public boolean isMatch(I2NPMessage message) {
|
public boolean isMatch(I2NPMessage message) {
|
||||||
if (message == null) return false;
|
if (message == null) return false;
|
||||||
if (message instanceof DatabaseStoreMessage) {
|
if (message instanceof DatabaseStoreMessage) {
|
||||||
|
@ -5,17 +5,20 @@ import net.i2p.router.RouterContext;
|
|||||||
import net.i2p.util.Log;
|
import net.i2p.util.Log;
|
||||||
|
|
||||||
class FloodOnlyLookupTimeoutJob extends JobImpl {
|
class FloodOnlyLookupTimeoutJob extends JobImpl {
|
||||||
private FloodSearchJob _search;
|
private final FloodSearchJob _search;
|
||||||
private Log _log;
|
private final Log _log;
|
||||||
|
|
||||||
public FloodOnlyLookupTimeoutJob(RouterContext ctx, FloodOnlySearchJob job) {
|
public FloodOnlyLookupTimeoutJob(RouterContext ctx, FloodOnlySearchJob job) {
|
||||||
super(ctx);
|
super(ctx);
|
||||||
_search = job;
|
_search = job;
|
||||||
_log = ctx.logManager().getLog(getClass());
|
_log = ctx.logManager().getLog(getClass());
|
||||||
}
|
}
|
||||||
|
|
||||||
public void runJob() {
|
public void runJob() {
|
||||||
if (_log.shouldLog(Log.INFO))
|
if (_log.shouldLog(Log.INFO))
|
||||||
_log.info(_search.getJobId() + ": search timed out");
|
_log.info(_search.getJobId() + ": search timed out");
|
||||||
_search.failed();
|
_search.failed();
|
||||||
}
|
}
|
||||||
|
|
||||||
public String getName() { return "NetDb flood search (phase 1) timeout"; }
|
public String getName() { return "NetDb flood search (phase 1) timeout"; }
|
||||||
}
|
}
|
||||||
|
@ -38,14 +38,14 @@ import net.i2p.util.Log;
|
|||||||
*/
|
*/
|
||||||
class FloodOnlySearchJob extends FloodSearchJob {
|
class FloodOnlySearchJob extends FloodSearchJob {
|
||||||
private volatile boolean _dead;
|
private volatile boolean _dead;
|
||||||
private long _created;
|
private final long _created;
|
||||||
private boolean _shouldProcessDSRM;
|
private boolean _shouldProcessDSRM;
|
||||||
private final HashSet<Hash> _unheardFrom;
|
private final HashSet<Hash> _unheardFrom;
|
||||||
|
|
||||||
private final List<OutNetMessage> _out;
|
private final List<OutNetMessage> _out;
|
||||||
protected MessageSelector _replySelector;
|
protected final MessageSelector _replySelector;
|
||||||
protected ReplyJob _onReply;
|
protected final ReplyJob _onReply;
|
||||||
protected Job _onTimeout;
|
protected final Job _onTimeout;
|
||||||
|
|
||||||
public FloodOnlySearchJob(RouterContext ctx, FloodfillNetworkDatabaseFacade facade, Hash key, Job onFind, Job onFailed, int timeoutMs, boolean isLease) {
|
public FloodOnlySearchJob(RouterContext ctx, FloodfillNetworkDatabaseFacade facade, Hash key, Job onFind, Job onFailed, int timeoutMs, boolean isLease) {
|
||||||
super(ctx, facade, key, onFind, onFailed, timeoutMs, isLease);
|
super(ctx, facade, key, onFind, onFailed, timeoutMs, isLease);
|
||||||
@ -61,7 +61,6 @@ class FloodOnlySearchJob extends FloodSearchJob {
|
|||||||
_onReply = new FloodOnlyLookupMatchJob(getContext(), this);
|
_onReply = new FloodOnlyLookupMatchJob(getContext(), this);
|
||||||
_onTimeout = new FloodOnlyLookupTimeoutJob(getContext(), this);
|
_onTimeout = new FloodOnlyLookupTimeoutJob(getContext(), this);
|
||||||
_created = System.currentTimeMillis();
|
_created = System.currentTimeMillis();
|
||||||
_shouldProcessDSRM = false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public long getCreated() { return _created; }
|
public long getCreated() { return _created; }
|
||||||
@ -90,7 +89,7 @@ class FloodOnlySearchJob extends FloodSearchJob {
|
|||||||
// or the global network routing key just changed (which is set at statrtup,
|
// or the global network routing key just changed (which is set at statrtup,
|
||||||
// so this includes the first few minutes of uptime)
|
// so this includes the first few minutes of uptime)
|
||||||
_shouldProcessDSRM = floodfillPeers.size() < MIN_FOR_NO_DSRM ||
|
_shouldProcessDSRM = floodfillPeers.size() < MIN_FOR_NO_DSRM ||
|
||||||
getContext().routingKeyGenerator().getLastChanged() > getContext().clock().now() - 30*60*1000;
|
getContext().routingKeyGenerator().getLastChanged() > getContext().clock().now() - 60*60*1000;
|
||||||
|
|
||||||
if (floodfillPeers.isEmpty()) {
|
if (floodfillPeers.isEmpty()) {
|
||||||
// ask anybody, they may not return the answer but they will return a few ff peers we can go look up,
|
// ask anybody, they may not return the answer but they will return a few ff peers we can go look up,
|
||||||
|
@ -30,14 +30,14 @@ import net.i2p.util.Log;
|
|||||||
*/
|
*/
|
||||||
public class FloodSearchJob extends JobImpl {
|
public class FloodSearchJob extends JobImpl {
|
||||||
protected Log _log;
|
protected Log _log;
|
||||||
protected FloodfillNetworkDatabaseFacade _facade;
|
protected final FloodfillNetworkDatabaseFacade _facade;
|
||||||
protected Hash _key;
|
protected final Hash _key;
|
||||||
protected final List<Job> _onFind;
|
protected final List<Job> _onFind;
|
||||||
protected final List<Job> _onFailed;
|
protected final List<Job> _onFailed;
|
||||||
protected long _expiration;
|
protected long _expiration;
|
||||||
protected int _timeoutMs;
|
protected int _timeoutMs;
|
||||||
protected long _origExpiration;
|
protected long _origExpiration;
|
||||||
protected boolean _isLease;
|
protected final boolean _isLease;
|
||||||
protected volatile int _lookupsRemaining;
|
protected volatile int _lookupsRemaining;
|
||||||
protected volatile boolean _dead;
|
protected volatile boolean _dead;
|
||||||
|
|
||||||
|
@ -44,7 +44,6 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
|
|||||||
public FloodfillNetworkDatabaseFacade(RouterContext context) {
|
public FloodfillNetworkDatabaseFacade(RouterContext context) {
|
||||||
super(context);
|
super(context);
|
||||||
_activeFloodQueries = new HashMap();
|
_activeFloodQueries = new HashMap();
|
||||||
_floodfillEnabled = false;
|
|
||||||
_verifiesInProgress = new ConcurrentHashSet(8);
|
_verifiesInProgress = new ConcurrentHashSet(8);
|
||||||
_alwaysQuery = _context.getProperty("netDb.alwaysQuery");
|
_alwaysQuery = _context.getProperty("netDb.alwaysQuery");
|
||||||
|
|
||||||
@ -406,8 +405,8 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
|
|||||||
}
|
}
|
||||||
|
|
||||||
private class DropLookupFailedJob extends JobImpl {
|
private class DropLookupFailedJob extends JobImpl {
|
||||||
private Hash _peer;
|
private final Hash _peer;
|
||||||
private RouterInfo _info;
|
private final RouterInfo _info;
|
||||||
|
|
||||||
public DropLookupFailedJob(RouterContext ctx, Hash peer, RouterInfo info) {
|
public DropLookupFailedJob(RouterContext ctx, Hash peer, RouterInfo info) {
|
||||||
super(ctx);
|
super(ctx);
|
||||||
@ -419,9 +418,10 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
|
|||||||
dropAfterLookupFailed(_peer, _info);
|
dropAfterLookupFailed(_peer, _info);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private class DropLookupFoundJob extends JobImpl {
|
private class DropLookupFoundJob extends JobImpl {
|
||||||
private Hash _peer;
|
private final Hash _peer;
|
||||||
private RouterInfo _info;
|
private final RouterInfo _info;
|
||||||
|
|
||||||
public DropLookupFoundJob(RouterContext ctx, Hash peer, RouterInfo info) {
|
public DropLookupFoundJob(RouterContext ctx, Hash peer, RouterInfo info) {
|
||||||
super(ctx);
|
super(ctx);
|
||||||
|
@ -25,7 +25,8 @@ import net.i2p.util.Log;
|
|||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
class FloodfillStoreJob extends StoreJob {
|
class FloodfillStoreJob extends StoreJob {
|
||||||
private FloodfillNetworkDatabaseFacade _facade;
|
private final FloodfillNetworkDatabaseFacade _facade;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Send a data structure to the floodfills
|
* Send a data structure to the floodfills
|
||||||
*
|
*
|
||||||
@ -45,6 +46,7 @@ class FloodfillStoreJob extends StoreJob {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected int getParallelization() { return 1; }
|
protected int getParallelization() { return 1; }
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected int getRedundancy() { return 1; }
|
protected int getRedundancy() { return 1; }
|
||||||
|
|
||||||
|
@ -25,15 +25,15 @@ import net.i2p.util.Log;
|
|||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
public class FloodfillVerifyStoreJob extends JobImpl {
|
public class FloodfillVerifyStoreJob extends JobImpl {
|
||||||
private Log _log;
|
private final Log _log;
|
||||||
private Hash _key;
|
private final Hash _key;
|
||||||
private Hash _target;
|
private Hash _target;
|
||||||
private Hash _sentTo;
|
private final Hash _sentTo;
|
||||||
private FloodfillNetworkDatabaseFacade _facade;
|
private final FloodfillNetworkDatabaseFacade _facade;
|
||||||
private long _expiration;
|
private long _expiration;
|
||||||
private long _sendTime;
|
private long _sendTime;
|
||||||
private long _published;
|
private long _published;
|
||||||
private boolean _isRouterInfo;
|
private final boolean _isRouterInfo;
|
||||||
private MessageWrapper.WrappedMessage _wrappedMessage;
|
private MessageWrapper.WrappedMessage _wrappedMessage;
|
||||||
private final Set<Hash> _ignore;
|
private final Set<Hash> _ignore;
|
||||||
|
|
||||||
@ -64,6 +64,7 @@ public class FloodfillVerifyStoreJob extends JobImpl {
|
|||||||
getContext().statManager().createRateStat("netDb.floodfillVerifyFail", "How long a floodfill verify takes when it fails", "NetworkDatabase", new long[] { 60*60*1000 });
|
getContext().statManager().createRateStat("netDb.floodfillVerifyFail", "How long a floodfill verify takes when it fails", "NetworkDatabase", new long[] { 60*60*1000 });
|
||||||
getContext().statManager().createRateStat("netDb.floodfillVerifyTimeout", "How long a floodfill verify takes when it times out", "NetworkDatabase", new long[] { 60*60*1000 });
|
getContext().statManager().createRateStat("netDb.floodfillVerifyTimeout", "How long a floodfill verify takes when it times out", "NetworkDatabase", new long[] { 60*60*1000 });
|
||||||
}
|
}
|
||||||
|
|
||||||
public String getName() { return "Verify netdb store"; }
|
public String getName() { return "Verify netdb store"; }
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -43,6 +43,7 @@ import net.i2p.router.networkdb.DatabaseLookupMessageHandler;
|
|||||||
import net.i2p.router.networkdb.DatabaseStoreMessageHandler;
|
import net.i2p.router.networkdb.DatabaseStoreMessageHandler;
|
||||||
import net.i2p.router.networkdb.PublishLocalRouterInfoJob;
|
import net.i2p.router.networkdb.PublishLocalRouterInfoJob;
|
||||||
import net.i2p.router.peermanager.PeerProfile;
|
import net.i2p.router.peermanager.PeerProfile;
|
||||||
|
import net.i2p.util.ConcurrentHashSet;
|
||||||
import net.i2p.util.Log;
|
import net.i2p.util.Log;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -55,7 +56,8 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
|||||||
private DataStore _ds; // hash to DataStructure mapping, persisted when necessary
|
private DataStore _ds; // hash to DataStructure mapping, persisted when necessary
|
||||||
/** where the data store is pushing the data */
|
/** where the data store is pushing the data */
|
||||||
private String _dbDir;
|
private String _dbDir;
|
||||||
private final Set<Hash> _exploreKeys = new HashSet(64); // set of Hash objects that we should search on (to fill up a bucket, not to get data)
|
// set of Hash objects that we should search on (to fill up a bucket, not to get data)
|
||||||
|
private final Set<Hash> _exploreKeys = new ConcurrentHashSet(64);
|
||||||
private boolean _initialized;
|
private boolean _initialized;
|
||||||
/** Clock independent time of when we started up */
|
/** Clock independent time of when we started up */
|
||||||
private long _started;
|
private long _started;
|
||||||
@ -167,26 +169,23 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
|||||||
_exploreJob.updateExploreSchedule();
|
_exploreJob.updateExploreSchedule();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** @return unmodifiable set */
|
||||||
public Set<Hash> getExploreKeys() {
|
public Set<Hash> getExploreKeys() {
|
||||||
if (!_initialized) return null;
|
if (!_initialized)
|
||||||
synchronized (_exploreKeys) {
|
return Collections.EMPTY_SET;
|
||||||
return new HashSet(_exploreKeys);
|
return Collections.unmodifiableSet(_exploreKeys);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public void removeFromExploreKeys(Set toRemove) {
|
public void removeFromExploreKeys(Set<Hash> toRemove) {
|
||||||
if (!_initialized) return;
|
if (!_initialized) return;
|
||||||
synchronized (_exploreKeys) {
|
_exploreKeys.removeAll(toRemove);
|
||||||
_exploreKeys.removeAll(toRemove);
|
_context.statManager().addRateData("netDb.exploreKeySet", _exploreKeys.size(), 0);
|
||||||
_context.statManager().addRateData("netDb.exploreKeySet", _exploreKeys.size(), 0);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
public void queueForExploration(Set keys) {
|
|
||||||
|
public void queueForExploration(Set<Hash> keys) {
|
||||||
if (!_initialized) return;
|
if (!_initialized) return;
|
||||||
synchronized (_exploreKeys) {
|
_exploreKeys.addAll(keys);
|
||||||
_exploreKeys.addAll(keys);
|
_context.statManager().addRateData("netDb.exploreKeySet", _exploreKeys.size(), 0);
|
||||||
_context.statManager().addRateData("netDb.exploreKeySet", _exploreKeys.size(), 0);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public void shutdown() {
|
public void shutdown() {
|
||||||
@ -215,7 +214,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
|||||||
else
|
else
|
||||||
_enforceNetId = DEFAULT_ENFORCE_NETID;
|
_enforceNetId = DEFAULT_ENFORCE_NETID;
|
||||||
_ds.restart();
|
_ds.restart();
|
||||||
synchronized (_exploreKeys) { _exploreKeys.clear(); }
|
_exploreKeys.clear();
|
||||||
|
|
||||||
_initialized = true;
|
_initialized = true;
|
||||||
|
|
||||||
@ -474,9 +473,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
|||||||
} else {
|
} else {
|
||||||
fail(key);
|
fail(key);
|
||||||
// this was an interesting key, so either refetch it or simply explore with it
|
// this was an interesting key, so either refetch it or simply explore with it
|
||||||
synchronized (_exploreKeys) {
|
_exploreKeys.add(key);
|
||||||
_exploreKeys.add(key);
|
|
||||||
}
|
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@ -709,6 +706,8 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
|||||||
* given what we know now.
|
* given what we know now.
|
||||||
*
|
*
|
||||||
* TODO this is called several times, only check the key and signature once
|
* TODO this is called several times, only check the key and signature once
|
||||||
|
*
|
||||||
|
* @return reason why the entry is not valid, or null if it is valid
|
||||||
*/
|
*/
|
||||||
String validate(Hash key, RouterInfo routerInfo) throws IllegalArgumentException {
|
String validate(Hash key, RouterInfo routerInfo) throws IllegalArgumentException {
|
||||||
long now = _context.clock().now();
|
long now = _context.clock().now();
|
||||||
|
@ -20,13 +20,15 @@ import net.i2p.util.Log;
|
|||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
class SingleLookupJob extends JobImpl {
|
class SingleLookupJob extends JobImpl {
|
||||||
private Log _log;
|
private final Log _log;
|
||||||
private DatabaseSearchReplyMessage _dsrm;
|
private final DatabaseSearchReplyMessage _dsrm;
|
||||||
|
|
||||||
public SingleLookupJob(RouterContext ctx, DatabaseSearchReplyMessage dsrm) {
|
public SingleLookupJob(RouterContext ctx, DatabaseSearchReplyMessage dsrm) {
|
||||||
super(ctx);
|
super(ctx);
|
||||||
_log = ctx.logManager().getLog(getClass());
|
_log = ctx.logManager().getLog(getClass());
|
||||||
_dsrm = dsrm;
|
_dsrm = dsrm;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void runJob() {
|
public void runJob() {
|
||||||
Hash from = _dsrm.getFromHash();
|
Hash from = _dsrm.getFromHash();
|
||||||
for (int i = 0; i < _dsrm.getNumReplies(); i++) {
|
for (int i = 0; i < _dsrm.getNumReplies(); i++) {
|
||||||
@ -43,5 +45,6 @@ class SingleLookupJob extends JobImpl {
|
|||||||
getContext().jobQueue().addJob(new SingleSearchJob(getContext(), peer, peer));
|
getContext().jobQueue().addJob(new SingleSearchJob(getContext(), peer, peer));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public String getName() { return "NetDb process DSRM"; }
|
public String getName() { return "NetDb process DSRM"; }
|
||||||
}
|
}
|
||||||
|
@ -14,18 +14,24 @@ import net.i2p.util.Log;
|
|||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
class SingleSearchJob extends FloodOnlySearchJob {
|
class SingleSearchJob extends FloodOnlySearchJob {
|
||||||
Hash _to;
|
private final Hash _to;
|
||||||
OutNetMessage _onm;
|
private OutNetMessage _onm;
|
||||||
|
|
||||||
|
private static final int TIMEOUT = 8*1000;
|
||||||
|
|
||||||
public SingleSearchJob(RouterContext ctx, Hash key, Hash to) {
|
public SingleSearchJob(RouterContext ctx, Hash key, Hash to) {
|
||||||
// warning, null FloodfillNetworkDatabaseFacade ...
|
// warning, null FloodfillNetworkDatabaseFacade ...
|
||||||
// define our own failed() and success() below so _facade isn't used.
|
// define our own failed() and success() below so _facade isn't used.
|
||||||
super(ctx, null, key, null, null, 5*1000, false);
|
super(ctx, null, key, null, null, TIMEOUT, false);
|
||||||
_to = to;
|
_to = to;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String getName() { return "NetDb search key from DSRM"; }
|
public String getName() { return "NetDb search key from DSRM"; }
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean shouldProcessDSRM() { return false; } // don't loop
|
public boolean shouldProcessDSRM() { return false; } // don't loop
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void runJob() {
|
public void runJob() {
|
||||||
_onm = getContext().messageRegistry().registerPending(_replySelector, _onReply, _onTimeout, _timeoutMs);
|
_onm = getContext().messageRegistry().registerPending(_replySelector, _onReply, _onTimeout, _timeoutMs);
|
||||||
@ -46,10 +52,12 @@ class SingleSearchJob extends FloodOnlySearchJob {
|
|||||||
getContext().tunnelDispatcher().dispatchOutbound(dlm, outTunnel.getSendTunnelId(0), _to);
|
getContext().tunnelDispatcher().dispatchOutbound(dlm, outTunnel.getSendTunnelId(0), _to);
|
||||||
_lookupsRemaining = 1;
|
_lookupsRemaining = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
void failed() {
|
void failed() {
|
||||||
getContext().messageRegistry().unregisterPending(_onm);
|
getContext().messageRegistry().unregisterPending(_onm);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
void success() {}
|
void success() {}
|
||||||
}
|
}
|
||||||
|
@ -53,7 +53,9 @@ class StartExplorersJob extends JobImpl {
|
|||||||
getContext().router().gracefulShutdownInProgress())) {
|
getContext().router().gracefulShutdownInProgress())) {
|
||||||
int num = MAX_PER_RUN;
|
int num = MAX_PER_RUN;
|
||||||
if (_facade.getDataStore().size() < LOW_ROUTERS)
|
if (_facade.getDataStore().size() < LOW_ROUTERS)
|
||||||
num *= 2;
|
num *= 3;
|
||||||
|
if (getContext().router().getUptime() < STARTUP_TIME)
|
||||||
|
num *= 3;
|
||||||
Set<Hash> toExplore = selectKeysToExplore(num);
|
Set<Hash> toExplore = selectKeysToExplore(num);
|
||||||
if (_log.shouldLog(Log.DEBUG))
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
_log.debug("Keys to explore during this run: " + toExplore);
|
_log.debug("Keys to explore during this run: " + toExplore);
|
||||||
|
Reference in New Issue
Block a user