* Netdb:
- Use new receivedAsReply flag in LeaseSet to mark those received as response to a query - Mark which methods in FloodfillPeerSelector may return our own hash - Redefine selectNearest() so it may return our own hash, so it can be used for closeness measurement - Redefine findNearestRouters() to return Hashes instead of RouterInfos - Fix LeaseSet response decisions for floodfills, based on partial keyspace and closeness measurements - List only count of published leasesets in netdb
This commit is contained in:
@ -119,9 +119,10 @@ public class LeaseSet extends DataStructureImpl {
|
|||||||
/**
|
/**
|
||||||
* If true, we received this LeaseSet by searching for it
|
* If true, we received this LeaseSet by searching for it
|
||||||
* Default false.
|
* Default false.
|
||||||
|
* @since 0.7.14
|
||||||
*/
|
*/
|
||||||
public boolean getReceivedAsReply() { return _receivedAsReply; }
|
public boolean getReceivedAsReply() { return _receivedAsReply; }
|
||||||
/** set to true */
|
/** set to true @since 0.7.14 */
|
||||||
public void setReceivedAsReply() { _receivedAsReply = true; }
|
public void setReceivedAsReply() { _receivedAsReply = true; }
|
||||||
|
|
||||||
public void addLease(Lease lease) {
|
public void addLease(Lease lease) {
|
||||||
|
14
history.txt
14
history.txt
@ -1,3 +1,17 @@
|
|||||||
|
2010-06-05 zzz
|
||||||
|
* Netdb:
|
||||||
|
- Use new receivedAsReply flag in LeaseSet to mark
|
||||||
|
those received as response to a query
|
||||||
|
- Mark which methods in FloodfillPeerSelector may return
|
||||||
|
our own hash
|
||||||
|
- Redefine selectNearest() so it may return our own hash,
|
||||||
|
so it can be used for closeness measurement
|
||||||
|
- Redefine findNearestRouters() to return Hashes
|
||||||
|
instead of RouterInfos
|
||||||
|
- Fix LeaseSet response decisions for floodfills, based
|
||||||
|
on partial keyspace and closeness measurements
|
||||||
|
- List only count of published leasesets in netdb
|
||||||
|
|
||||||
2010-06-03 zzz
|
2010-06-03 zzz
|
||||||
* NewsFetcher: Delay a minimum amount at startup
|
* NewsFetcher: Delay a minimum amount at startup
|
||||||
* Update: Fix multiple updates after manually
|
* Update: Fix multiple updates after manually
|
||||||
|
@ -59,5 +59,5 @@ class DummyNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public Set<Hash> getAllRouters() { return new HashSet(_routers.keySet()); }
|
public Set<Hash> getAllRouters() { return new HashSet(_routers.keySet()); }
|
||||||
public Set findNearestRouters(Hash key, int maxNumRouters, Set peersToIgnore) { return new HashSet(_routers.values()); }
|
public Set<Hash> findNearestRouters(Hash key, int maxNumRouters, Set<Hash> peersToIgnore) { return new HashSet(_routers.values()); }
|
||||||
}
|
}
|
||||||
|
@ -30,7 +30,7 @@ public abstract class NetworkDatabaseFacade implements Service {
|
|||||||
* @param maxNumRouters The maximum number of routers to return
|
* @param maxNumRouters The maximum number of routers to return
|
||||||
* @param peersToIgnore Hash of routers not to include
|
* @param peersToIgnore Hash of routers not to include
|
||||||
*/
|
*/
|
||||||
public abstract Set findNearestRouters(Hash key, int maxNumRouters, Set peersToIgnore);
|
public abstract Set<Hash> findNearestRouters(Hash key, int maxNumRouters, Set<Hash> peersToIgnore);
|
||||||
|
|
||||||
public abstract void lookupLeaseSet(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs);
|
public abstract void lookupLeaseSet(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs);
|
||||||
public abstract LeaseSet lookupLeaseSetLocally(Hash key);
|
public abstract LeaseSet lookupLeaseSetLocally(Hash key);
|
||||||
|
@ -18,7 +18,7 @@ public class RouterVersion {
|
|||||||
/** deprecated */
|
/** deprecated */
|
||||||
public final static String ID = "Monotone";
|
public final static String ID = "Monotone";
|
||||||
public final static String VERSION = CoreVersion.VERSION;
|
public final static String VERSION = CoreVersion.VERSION;
|
||||||
public final static long BUILD = 17;
|
public final static long BUILD = 18;
|
||||||
|
|
||||||
/** for example "-test" */
|
/** for example "-test" */
|
||||||
public final static String EXTRA = "-rc";
|
public final static String EXTRA = "-rc";
|
||||||
|
@ -41,14 +41,13 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
|
|||||||
private RouterIdentity _from;
|
private RouterIdentity _from;
|
||||||
private Hash _fromHash;
|
private Hash _fromHash;
|
||||||
private final static int MAX_ROUTERS_RETURNED = 3;
|
private final static int MAX_ROUTERS_RETURNED = 3;
|
||||||
private final static int CLOSENESS_THRESHOLD = 10; // StoreJob.REDUNDANCY * 2
|
private final static int CLOSENESS_THRESHOLD = 8; // FNDF.MAX_TO_FLOOD + 1
|
||||||
private final static int REPLY_TIMEOUT = 60*1000;
|
private final static int REPLY_TIMEOUT = 60*1000;
|
||||||
private final static int MESSAGE_PRIORITY = 300;
|
private final static int MESSAGE_PRIORITY = 300;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* If a routerInfo structure isn't updated within an hour, drop it
|
* If a routerInfo structure isn't this recent, don't send it out.
|
||||||
* and search for a later version. This value should be large enough
|
* Equal to KNDF.ROUTER_INFO_EXPIRATION_FLOODFILL.
|
||||||
* to deal with the Router.CLOCK_FUDGE_FACTOR.
|
|
||||||
*/
|
*/
|
||||||
public final static long EXPIRE_DELAY = 60*60*1000;
|
public final static long EXPIRE_DELAY = 60*60*1000;
|
||||||
|
|
||||||
@ -85,29 +84,66 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
|
|||||||
|
|
||||||
LeaseSet ls = getContext().netDb().lookupLeaseSetLocally(_message.getSearchKey());
|
LeaseSet ls = getContext().netDb().lookupLeaseSetLocally(_message.getSearchKey());
|
||||||
if (ls != null) {
|
if (ls != null) {
|
||||||
boolean publish = getContext().clientManager().shouldPublishLeaseSet(_message.getSearchKey());
|
// We have to be very careful here to decide whether or not to send out the leaseSet,
|
||||||
|
// to avoid anonymity vulnerabilities.
|
||||||
|
// As this is complex, lots of comments follow...
|
||||||
|
|
||||||
// only answer a request for a LeaseSet if it has been published
|
boolean isLocal = getContext().clientManager().isLocal(ls.getDestination());
|
||||||
|
boolean shouldPublishLocal = isLocal && getContext().clientManager().shouldPublishLeaseSet(_message.getSearchKey());
|
||||||
|
|
||||||
|
// Only answer a request for a LeaseSet if it has been published
|
||||||
// to us, or, if its local, if we would have published to ourselves
|
// to us, or, if its local, if we would have published to ourselves
|
||||||
if (publish && (answerAllQueries() || ls.getReceivedAsPublished())) {
|
|
||||||
|
// answerAllQueries: We are floodfill
|
||||||
|
// getReceivedAsPublished:
|
||||||
|
// false for local
|
||||||
|
// false for received over a tunnel
|
||||||
|
// false for received in response to our lookups
|
||||||
|
// true for received in a DatabaseStoreMessage unsolicited
|
||||||
|
if (ls.getReceivedAsPublished()) {
|
||||||
|
// Answer anything that was stored to us directly
|
||||||
|
// (i.e. "received as published" - not the result of a query, or received
|
||||||
|
// over a client tunnel).
|
||||||
|
// This is probably because we are floodfill, but also perhaps we used to be floodfill,
|
||||||
|
// so we don't check the answerAllQueries() flag.
|
||||||
|
// Local leasesets are not handled here
|
||||||
|
if (_log.shouldLog(Log.INFO))
|
||||||
|
_log.info("We have the published LS " + _message.getSearchKey().toBase64() + ", answering query");
|
||||||
getContext().statManager().addRateData("netDb.lookupsMatchedReceivedPublished", 1, 0);
|
getContext().statManager().addRateData("netDb.lookupsMatchedReceivedPublished", 1, 0);
|
||||||
sendData(_message.getSearchKey(), ls, fromKey, _message.getReplyTunnel());
|
sendData(_message.getSearchKey(), ls, fromKey, _message.getReplyTunnel());
|
||||||
} else {
|
} else if (shouldPublishLocal && answerAllQueries()) {
|
||||||
Set<RouterInfo> routerInfoSet = getContext().netDb().findNearestRouters(_message.getSearchKey(),
|
// We are floodfill, and this is our local leaseset, and we publish it.
|
||||||
CLOSENESS_THRESHOLD,
|
// Only send it out if it is in our estimated keyspace.
|
||||||
_message.getDontIncludePeers());
|
// For this, we do NOT use their dontInclude list as it can't be trusted
|
||||||
if (getContext().clientManager().isLocal(ls.getDestination())) {
|
// (i.e. it could mess up the closeness calculation)
|
||||||
if (publish && weAreClosest(routerInfoSet)) {
|
Set<Hash> closestHashes = getContext().netDb().findNearestRouters(_message.getSearchKey(),
|
||||||
getContext().statManager().addRateData("netDb.lookupsMatchedLocalClosest", 1, 0);
|
CLOSENESS_THRESHOLD, null);
|
||||||
sendData(_message.getSearchKey(), ls, fromKey, _message.getReplyTunnel());
|
if (weAreClosest(closestHashes)) {
|
||||||
} else {
|
// It's in our keyspace, so give it to them
|
||||||
getContext().statManager().addRateData("netDb.lookupsMatchedLocalNotClosest", 1, 0);
|
if (_log.shouldLog(Log.INFO))
|
||||||
sendClosest(_message.getSearchKey(), routerInfoSet, fromKey, _message.getReplyTunnel());
|
_log.info("We have local LS " + _message.getSearchKey().toBase64() + ", answering query, in our keyspace");
|
||||||
}
|
getContext().statManager().addRateData("netDb.lookupsMatchedLocalClosest", 1, 0);
|
||||||
|
sendData(_message.getSearchKey(), ls, fromKey, _message.getReplyTunnel());
|
||||||
} else {
|
} else {
|
||||||
getContext().statManager().addRateData("netDb.lookupsMatchedRemoteNotClosest", 1, 0);
|
// Lie, pretend we don't have it
|
||||||
sendClosest(_message.getSearchKey(), routerInfoSet, fromKey, _message.getReplyTunnel());
|
if (_log.shouldLog(Log.INFO))
|
||||||
|
_log.info("We have local LS " + _message.getSearchKey().toBase64() + ", NOT answering query, out of our keyspace");
|
||||||
|
getContext().statManager().addRateData("netDb.lookupsMatchedLocalNotClosest", 1, 0);
|
||||||
|
Set<Hash> routerHashSet = getNearestRouters();
|
||||||
|
sendClosest(_message.getSearchKey(), routerHashSet, fromKey, _message.getReplyTunnel());
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
// It was not published to us (we looked it up, for example)
|
||||||
|
// or it's local and we aren't floodfill,
|
||||||
|
// or it's local and we don't publish it.
|
||||||
|
// Lie, pretend we don't have it
|
||||||
|
if (_log.shouldLog(Log.INFO))
|
||||||
|
_log.info("We have LS " + _message.getSearchKey().toBase64() +
|
||||||
|
", NOT answering query - local? " + isLocal + " shouldPublish? " + shouldPublishLocal +
|
||||||
|
" RAP? " + ls.getReceivedAsPublished() + " RAR? " + ls.getReceivedAsReply());
|
||||||
|
getContext().statManager().addRateData("netDb.lookupsMatchedRemoteNotClosest", 1, 0);
|
||||||
|
Set<Hash> routerHashSet = getNearestRouters();
|
||||||
|
sendClosest(_message.getSearchKey(), routerHashSet, fromKey, _message.getReplyTunnel());
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
RouterInfo info = getContext().netDb().lookupRouterInfoLocally(_message.getSearchKey());
|
RouterInfo info = getContext().netDb().lookupRouterInfoLocally(_message.getSearchKey());
|
||||||
@ -134,14 +170,8 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
|
|||||||
sendData(_message.getSearchKey(), info, fromKey, _message.getReplyTunnel());
|
sendData(_message.getSearchKey(), info, fromKey, _message.getReplyTunnel());
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// not found locally - return closest peer routerInfo structs
|
// not found locally - return closest peer hashes
|
||||||
Set<Hash> dontInclude = _message.getDontIncludePeers();
|
Set<Hash> routerHashSet = getNearestRouters();
|
||||||
// Honor flag to exclude all floodfills
|
|
||||||
//if (dontInclude.contains(Hash.FAKE_HASH)) {
|
|
||||||
// This is handled in FloodfillPeerSelector
|
|
||||||
Set<RouterInfo> routerInfoSet = getContext().netDb().findNearestRouters(_message.getSearchKey(),
|
|
||||||
MAX_ROUTERS_RETURNED,
|
|
||||||
dontInclude);
|
|
||||||
|
|
||||||
// ERR: see above
|
// ERR: see above
|
||||||
// // Remove hidden nodes from set..
|
// // Remove hidden nodes from set..
|
||||||
@ -154,13 +184,32 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
|
|||||||
|
|
||||||
if (_log.shouldLog(Log.DEBUG))
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
_log.debug("We do not have key " + _message.getSearchKey().toBase64() +
|
_log.debug("We do not have key " + _message.getSearchKey().toBase64() +
|
||||||
" locally. sending back " + routerInfoSet.size() + " peers to " + fromKey.toBase64());
|
" locally. sending back " + routerHashSet.size() + " peers to " + fromKey.toBase64());
|
||||||
sendClosest(_message.getSearchKey(), routerInfoSet, fromKey, _message.getReplyTunnel());
|
sendClosest(_message.getSearchKey(), routerHashSet, fromKey, _message.getReplyTunnel());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private boolean isUnreachable(RouterInfo info) {
|
/**
|
||||||
|
* Closest to the message's search key,
|
||||||
|
* honoring the message's dontInclude set.
|
||||||
|
* Will not include us.
|
||||||
|
* Side effect - adds us to the message's dontInclude set.
|
||||||
|
*/
|
||||||
|
private Set<Hash> getNearestRouters() {
|
||||||
|
Set<Hash> dontInclude = _message.getDontIncludePeers();
|
||||||
|
if (dontInclude == null)
|
||||||
|
dontInclude = new HashSet(1);
|
||||||
|
dontInclude.add(getContext().routerHash());
|
||||||
|
// Honor flag to exclude all floodfills
|
||||||
|
//if (dontInclude.contains(Hash.FAKE_HASH)) {
|
||||||
|
// This is handled in FloodfillPeerSelector
|
||||||
|
return getContext().netDb().findNearestRouters(_message.getSearchKey(),
|
||||||
|
MAX_ROUTERS_RETURNED,
|
||||||
|
dontInclude);
|
||||||
|
}
|
||||||
|
|
||||||
|
private static boolean isUnreachable(RouterInfo info) {
|
||||||
if (info == null) return true;
|
if (info == null) return true;
|
||||||
String cap = info.getCapabilities();
|
String cap = info.getCapabilities();
|
||||||
if (cap == null) return false;
|
if (cap == null) return false;
|
||||||
@ -171,21 +220,11 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
|
|||||||
public static final boolean DEFAULT_PUBLISH_UNREACHABLE = true;
|
public static final boolean DEFAULT_PUBLISH_UNREACHABLE = true;
|
||||||
|
|
||||||
private boolean publishUnreachable() {
|
private boolean publishUnreachable() {
|
||||||
String publish = getContext().getProperty(PROP_PUBLISH_UNREACHABLE);
|
return getContext().getProperty(PROP_PUBLISH_UNREACHABLE, DEFAULT_PUBLISH_UNREACHABLE);
|
||||||
if (publish != null)
|
|
||||||
return Boolean.valueOf(publish).booleanValue();
|
|
||||||
else
|
|
||||||
return DEFAULT_PUBLISH_UNREACHABLE;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private boolean weAreClosest(Set routerInfoSet) {
|
private boolean weAreClosest(Set<Hash> routerHashSet) {
|
||||||
for (Iterator iter = routerInfoSet.iterator(); iter.hasNext(); ) {
|
return routerHashSet.contains(getContext().routerHash());
|
||||||
RouterInfo cur = (RouterInfo)iter.next();
|
|
||||||
if (cur.getIdentity().calculateHash().equals(getContext().routerHash())) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private void sendData(Hash key, DataStructure data, Hash toPeer, TunnelId replyTunnel) {
|
private void sendData(Hash key, DataStructure data, Hash toPeer, TunnelId replyTunnel) {
|
||||||
@ -207,17 +246,17 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
|
|||||||
sendMessage(msg, toPeer, replyTunnel);
|
sendMessage(msg, toPeer, replyTunnel);
|
||||||
}
|
}
|
||||||
|
|
||||||
protected void sendClosest(Hash key, Set<RouterInfo> routerInfoSet, Hash toPeer, TunnelId replyTunnel) {
|
protected void sendClosest(Hash key, Set<Hash> routerHashes, Hash toPeer, TunnelId replyTunnel) {
|
||||||
if (_log.shouldLog(Log.DEBUG))
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
_log.debug("Sending closest routers to key " + key.toBase64() + ": # peers = "
|
_log.debug("Sending closest routers to key " + key.toBase64() + ": # peers = "
|
||||||
+ routerInfoSet.size() + " tunnel " + replyTunnel);
|
+ routerHashes.size() + " tunnel " + replyTunnel);
|
||||||
DatabaseSearchReplyMessage msg = new DatabaseSearchReplyMessage(getContext());
|
DatabaseSearchReplyMessage msg = new DatabaseSearchReplyMessage(getContext());
|
||||||
msg.setFromHash(getContext().routerHash());
|
msg.setFromHash(getContext().routerHash());
|
||||||
msg.setSearchKey(key);
|
msg.setSearchKey(key);
|
||||||
for (Iterator iter = routerInfoSet.iterator(); iter.hasNext(); ) {
|
int i = 0;
|
||||||
RouterInfo peer = (RouterInfo)iter.next();
|
for (Hash h : routerHashes) {
|
||||||
msg.addReply(peer.getIdentity().getHash());
|
msg.addReply(h);
|
||||||
if (msg.getNumReplies() >= MAX_ROUTERS_RETURNED)
|
if (++i >= MAX_ROUTERS_RETURNED)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
getContext().statManager().addRateData("netDb.lookupsHandled", 1, 0);
|
getContext().statManager().addRateData("netDb.lookupsHandled", 1, 0);
|
||||||
|
@ -54,10 +54,22 @@ class FloodOnlyLookupMatchJob extends JobImpl implements ReplyJob {
|
|||||||
}
|
}
|
||||||
try {
|
try {
|
||||||
DatabaseStoreMessage dsm = (DatabaseStoreMessage)message;
|
DatabaseStoreMessage dsm = (DatabaseStoreMessage)message;
|
||||||
if (dsm.getValueType() == DatabaseStoreMessage.KEY_TYPE_LEASESET)
|
if (_log.shouldLog(Log.INFO))
|
||||||
|
_log.info(_search.getJobId() + ": got a DSM for "
|
||||||
|
+ dsm.getKey().toBase64());
|
||||||
|
// This store will be duplicated by HFDSMJ
|
||||||
|
// We do it here first to make sure it is in the DB before
|
||||||
|
// runJob() and search.success() is called???
|
||||||
|
// Should we just pass the DataStructure directly back to somebody?
|
||||||
|
if (dsm.getValueType() == DatabaseStoreMessage.KEY_TYPE_LEASESET) {
|
||||||
|
// Since HFDSMJ wants to setReceivedAsPublished(), we have to
|
||||||
|
// set a flag saying this was really the result of a query,
|
||||||
|
// so don't do that.
|
||||||
|
dsm.getLeaseSet().setReceivedAsReply();
|
||||||
getContext().netDb().store(dsm.getKey(), dsm.getLeaseSet());
|
getContext().netDb().store(dsm.getKey(), dsm.getLeaseSet());
|
||||||
else
|
} else {
|
||||||
getContext().netDb().store(dsm.getKey(), dsm.getRouterInfo());
|
getContext().netDb().store(dsm.getKey(), dsm.getRouterInfo());
|
||||||
|
}
|
||||||
} catch (IllegalArgumentException iae) {
|
} catch (IllegalArgumentException iae) {
|
||||||
if (_log.shouldLog(Log.WARN))
|
if (_log.shouldLog(Log.WARN))
|
||||||
_log.warn(_search.getJobId() + ": Received an invalid store reply", iae);
|
_log.warn(_search.getJobId() + ": Received an invalid store reply", iae);
|
||||||
|
@ -38,11 +38,13 @@ class FloodfillPeerSelector extends PeerSelector {
|
|||||||
* Pick out peers with the floodfill capacity set, returning them first, but then
|
* Pick out peers with the floodfill capacity set, returning them first, but then
|
||||||
* after they're complete, sort via kademlia.
|
* after they're complete, sort via kademlia.
|
||||||
* Puts the floodfill peers that are directly connected first in the list.
|
* Puts the floodfill peers that are directly connected first in the list.
|
||||||
|
* List will not include our own hash.
|
||||||
*
|
*
|
||||||
|
* @param peersToIgnore can be null
|
||||||
* @return List of Hash for the peers selected
|
* @return List of Hash for the peers selected
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public List<Hash> selectMostReliablePeers(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets) {
|
List<Hash> selectMostReliablePeers(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets) {
|
||||||
return selectNearestExplicitThin(key, maxNumRouters, peersToIgnore, kbuckets, true);
|
return selectNearestExplicitThin(key, maxNumRouters, peersToIgnore, kbuckets, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -50,22 +52,32 @@ class FloodfillPeerSelector extends PeerSelector {
|
|||||||
* Pick out peers with the floodfill capacity set, returning them first, but then
|
* Pick out peers with the floodfill capacity set, returning them first, but then
|
||||||
* after they're complete, sort via kademlia.
|
* after they're complete, sort via kademlia.
|
||||||
* Does not prefer the floodfill peers that are directly connected.
|
* Does not prefer the floodfill peers that are directly connected.
|
||||||
|
* List will not include our own hash.
|
||||||
*
|
*
|
||||||
|
* @param peersToIgnore can be null
|
||||||
* @return List of Hash for the peers selected
|
* @return List of Hash for the peers selected
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public List<Hash> selectNearestExplicitThin(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets) {
|
List<Hash> selectNearestExplicitThin(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets) {
|
||||||
return selectNearestExplicitThin(key, maxNumRouters, peersToIgnore, kbuckets, false);
|
return selectNearestExplicitThin(key, maxNumRouters, peersToIgnore, kbuckets, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
public List<Hash> selectNearestExplicitThin(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets, boolean preferConnected) {
|
/**
|
||||||
|
* Pick out peers with the floodfill capacity set, returning them first, but then
|
||||||
|
* after they're complete, sort via kademlia.
|
||||||
|
* List will not include our own hash.
|
||||||
|
*
|
||||||
|
* @param peersToIgnore can be null
|
||||||
|
* @return List of Hash for the peers selected
|
||||||
|
*/
|
||||||
|
List<Hash> selectNearestExplicitThin(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets, boolean preferConnected) {
|
||||||
if (peersToIgnore == null)
|
if (peersToIgnore == null)
|
||||||
peersToIgnore = new HashSet(1);
|
peersToIgnore = new HashSet(1);
|
||||||
peersToIgnore.add(_context.routerHash());
|
peersToIgnore.add(_context.routerHash());
|
||||||
FloodfillSelectionCollector matches = new FloodfillSelectionCollector(key, peersToIgnore, maxNumRouters);
|
FloodfillSelectionCollector matches = new FloodfillSelectionCollector(key, peersToIgnore, maxNumRouters);
|
||||||
if (kbuckets == null) return new ArrayList();
|
if (kbuckets == null) return new ArrayList();
|
||||||
kbuckets.getAll(matches);
|
kbuckets.getAll(matches);
|
||||||
List rv = matches.get(maxNumRouters, preferConnected);
|
List<Hash> rv = matches.get(maxNumRouters, preferConnected);
|
||||||
if (_log.shouldLog(Log.DEBUG))
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
_log.debug("Searching for " + maxNumRouters + " peers close to " + key + ": "
|
_log.debug("Searching for " + maxNumRouters + " peers close to " + key + ": "
|
||||||
+ rv + " (not including " + peersToIgnore + ") [allHashes.size = "
|
+ rv + " (not including " + peersToIgnore + ") [allHashes.size = "
|
||||||
@ -74,15 +86,24 @@ class FloodfillPeerSelector extends PeerSelector {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return all floodfills not shitlisted forever. list will not include our own hash
|
* @return all floodfills not shitlisted forever.
|
||||||
|
* List will not include our own hash.
|
||||||
* List is not sorted and not shuffled.
|
* List is not sorted and not shuffled.
|
||||||
*/
|
*/
|
||||||
public List<Hash> selectFloodfillParticipants(KBucketSet kbuckets) {
|
List<Hash> selectFloodfillParticipants(KBucketSet kbuckets) {
|
||||||
return selectFloodfillParticipants(null, kbuckets);
|
Set<Hash> ignore = new HashSet(1);
|
||||||
|
ignore.add(_context.routerHash());
|
||||||
|
return selectFloodfillParticipants(ignore, kbuckets);
|
||||||
}
|
}
|
||||||
|
|
||||||
public List<Hash> selectFloodfillParticipants(Set<Hash> toIgnore, KBucketSet kbuckets) {
|
/**
|
||||||
if (kbuckets == null) return new ArrayList();
|
* @param toIgnore can be null
|
||||||
|
* @return all floodfills not shitlisted forever.
|
||||||
|
* List MAY INCLUDE our own hash.
|
||||||
|
* List is not sorted and not shuffled.
|
||||||
|
*/
|
||||||
|
private List<Hash> selectFloodfillParticipants(Set<Hash> toIgnore, KBucketSet kbuckets) {
|
||||||
|
if (kbuckets == null) return Collections.EMPTY_LIST;
|
||||||
FloodfillSelectionCollector matches = new FloodfillSelectionCollector(null, toIgnore, 0);
|
FloodfillSelectionCollector matches = new FloodfillSelectionCollector(null, toIgnore, 0);
|
||||||
kbuckets.getAll(matches);
|
kbuckets.getAll(matches);
|
||||||
return matches.getFloodfillParticipants();
|
return matches.getFloodfillParticipants();
|
||||||
@ -92,8 +113,9 @@ class FloodfillPeerSelector extends PeerSelector {
|
|||||||
* Sort the floodfills. The challenge here is to keep the good ones
|
* Sort the floodfills. The challenge here is to keep the good ones
|
||||||
* at the front and the bad ones at the back. If they are all good or bad,
|
* at the front and the bad ones at the back. If they are all good or bad,
|
||||||
* searches and stores won't work well.
|
* searches and stores won't work well.
|
||||||
|
* List will not include our own hash.
|
||||||
*
|
*
|
||||||
* @return all floodfills not shitlisted foreverx
|
* @return floodfills closest to the key that are not shitlisted forever
|
||||||
* @param key the routing key
|
* @param key the routing key
|
||||||
* @param maxNumRouters max to return
|
* @param maxNumRouters max to return
|
||||||
* Sorted by closest to the key if > maxNumRouters, otherwise not
|
* Sorted by closest to the key if > maxNumRouters, otherwise not
|
||||||
@ -104,8 +126,10 @@ class FloodfillPeerSelector extends PeerSelector {
|
|||||||
* success newer than failure
|
* success newer than failure
|
||||||
* Group 3: All others
|
* Group 3: All others
|
||||||
*/
|
*/
|
||||||
public List<Hash> selectFloodfillParticipants(Hash key, int maxNumRouters, KBucketSet kbuckets) {
|
List<Hash> selectFloodfillParticipants(Hash key, int maxNumRouters, KBucketSet kbuckets) {
|
||||||
return selectFloodfillParticipants(key, maxNumRouters, null, kbuckets);
|
Set<Hash> ignore = new HashSet(1);
|
||||||
|
ignore.add(_context.routerHash());
|
||||||
|
return selectFloodfillParticipants(key, maxNumRouters, ignore, kbuckets);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** .5 * PublishLocalRouterInfoJob.PUBLISH_DELAY */
|
/** .5 * PublishLocalRouterInfoJob.PUBLISH_DELAY */
|
||||||
@ -116,7 +140,29 @@ class FloodfillPeerSelector extends PeerSelector {
|
|||||||
private static final int NO_FAIL_LOOKUP_GOOD = NO_FAIL_LOOKUP_OK * 3;
|
private static final int NO_FAIL_LOOKUP_GOOD = NO_FAIL_LOOKUP_OK * 3;
|
||||||
private static final int MAX_GOOD_RESP_TIME = 5*1000;
|
private static final int MAX_GOOD_RESP_TIME = 5*1000;
|
||||||
|
|
||||||
public List<Hash> selectFloodfillParticipants(Hash key, int howMany, Set<Hash> toIgnore, KBucketSet kbuckets) {
|
/**
|
||||||
|
* See above for description
|
||||||
|
* List will not include our own hash
|
||||||
|
* @param toIgnore can be null
|
||||||
|
*/
|
||||||
|
List<Hash> selectFloodfillParticipants(Hash key, int howMany, Set<Hash> toIgnore, KBucketSet kbuckets) {
|
||||||
|
if (toIgnore == null) {
|
||||||
|
toIgnore = new HashSet(1);
|
||||||
|
toIgnore.add(_context.routerHash());
|
||||||
|
} else if (!toIgnore.contains(_context.routerHash())) {
|
||||||
|
// copy the Set so we don't confuse StoreJob
|
||||||
|
toIgnore = new HashSet(toIgnore);
|
||||||
|
toIgnore.add(_context.routerHash());
|
||||||
|
}
|
||||||
|
return selectFloodfillParticipantsIncludingUs(key, howMany, toIgnore, kbuckets);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* See above for description
|
||||||
|
* List MAY CONTAIN our own hash unless included in toIgnore
|
||||||
|
* @param toIgnore can be null
|
||||||
|
*/
|
||||||
|
private List<Hash> selectFloodfillParticipantsIncludingUs(Hash key, int howMany, Set<Hash> toIgnore, KBucketSet kbuckets) {
|
||||||
List<Hash> ffs = selectFloodfillParticipants(toIgnore, kbuckets);
|
List<Hash> ffs = selectFloodfillParticipants(toIgnore, kbuckets);
|
||||||
TreeSet<Hash> sorted = new TreeSet(new XORComparator(key));
|
TreeSet<Hash> sorted = new TreeSet(new XORComparator(key));
|
||||||
sorted.addAll(ffs);
|
sorted.addAll(ffs);
|
||||||
@ -204,6 +250,11 @@ class FloodfillPeerSelector extends PeerSelector {
|
|||||||
private Set<Hash> _toIgnore;
|
private Set<Hash> _toIgnore;
|
||||||
private int _matches;
|
private int _matches;
|
||||||
private int _wanted;
|
private int _wanted;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Warning - may return our router hash - add to toIgnore if necessary
|
||||||
|
* @param toIgnore can be null
|
||||||
|
*/
|
||||||
public FloodfillSelectionCollector(Hash key, Set<Hash> toIgnore, int wanted) {
|
public FloodfillSelectionCollector(Hash key, Set<Hash> toIgnore, int wanted) {
|
||||||
_key = key;
|
_key = key;
|
||||||
_sorted = new TreeSet(new XORComparator(key));
|
_sorted = new TreeSet(new XORComparator(key));
|
||||||
@ -225,8 +276,8 @@ class FloodfillPeerSelector extends PeerSelector {
|
|||||||
// return;
|
// return;
|
||||||
if ( (_toIgnore != null) && (_toIgnore.contains(entry)) )
|
if ( (_toIgnore != null) && (_toIgnore.contains(entry)) )
|
||||||
return;
|
return;
|
||||||
if (entry.equals(_context.routerHash()))
|
//if (entry.equals(_context.routerHash()))
|
||||||
return;
|
// return;
|
||||||
// it isn't direct, so who cares if they're shitlisted
|
// it isn't direct, so who cares if they're shitlisted
|
||||||
//if (_context.shitlist().isShitlisted(entry))
|
//if (_context.shitlist().isShitlisted(entry))
|
||||||
// return;
|
// return;
|
||||||
@ -328,12 +379,14 @@ class FloodfillPeerSelector extends PeerSelector {
|
|||||||
* Floodfill peers only. Used only by HandleDatabaseLookupMessageJob to populate the DSRM.
|
* Floodfill peers only. Used only by HandleDatabaseLookupMessageJob to populate the DSRM.
|
||||||
* UNLESS peersToIgnore contains Hash.FAKE_HASH (all zeros), in which case this is an exploratory
|
* UNLESS peersToIgnore contains Hash.FAKE_HASH (all zeros), in which case this is an exploratory
|
||||||
* lookup, and the response should not include floodfills.
|
* lookup, and the response should not include floodfills.
|
||||||
|
* List MAY INCLUDE our own router - add to peersToIgnore if you don't want
|
||||||
*
|
*
|
||||||
* @param key the original key (NOT the routing key)
|
* @param key the original key (NOT the routing key)
|
||||||
|
* @param peersToIgnore can be null
|
||||||
* @return List of Hash for the peers selected, ordered
|
* @return List of Hash for the peers selected, ordered
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public List<Hash> selectNearest(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets) {
|
List<Hash> selectNearest(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets) {
|
||||||
Hash rkey = _context.routingKeyGenerator().getRoutingKey(key);
|
Hash rkey = _context.routingKeyGenerator().getRoutingKey(key);
|
||||||
if (peersToIgnore != null && peersToIgnore.contains(Hash.FAKE_HASH)) {
|
if (peersToIgnore != null && peersToIgnore.contains(Hash.FAKE_HASH)) {
|
||||||
// return non-ff
|
// return non-ff
|
||||||
@ -343,7 +396,7 @@ class FloodfillPeerSelector extends PeerSelector {
|
|||||||
return matches.get(maxNumRouters);
|
return matches.get(maxNumRouters);
|
||||||
} else {
|
} else {
|
||||||
// return ff
|
// return ff
|
||||||
return selectFloodfillParticipants(rkey, maxNumRouters, peersToIgnore, kbuckets);
|
return selectFloodfillParticipantsIncludingUs(rkey, maxNumRouters, peersToIgnore, kbuckets);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -30,6 +30,12 @@ public class HandleFloodfillDatabaseLookupMessageJob extends HandleDatabaseLooku
|
|||||||
super(ctx, receivedMessage, from, fromHash);
|
super(ctx, receivedMessage, from, fromHash);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return are we floodfill
|
||||||
|
* We don't really answer all queries if this is true,
|
||||||
|
* since floodfills don't have the whole keyspace any more,
|
||||||
|
* see ../HTLMJ for discussion
|
||||||
|
*/
|
||||||
@Override
|
@Override
|
||||||
protected boolean answerAllQueries() {
|
protected boolean answerAllQueries() {
|
||||||
if (!FloodfillNetworkDatabaseFacade.floodfillEnabled(getContext())) return false;
|
if (!FloodfillNetworkDatabaseFacade.floodfillEnabled(getContext())) return false;
|
||||||
@ -42,7 +48,7 @@ public class HandleFloodfillDatabaseLookupMessageJob extends HandleDatabaseLooku
|
|||||||
* will stop bugging us.
|
* will stop bugging us.
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
protected void sendClosest(Hash key, Set routerInfoSet, Hash toPeer, TunnelId replyTunnel) {
|
protected void sendClosest(Hash key, Set<Hash> routerInfoSet, Hash toPeer, TunnelId replyTunnel) {
|
||||||
super.sendClosest(key, routerInfoSet, toPeer, replyTunnel);
|
super.sendClosest(key, routerInfoSet, toPeer, replyTunnel);
|
||||||
|
|
||||||
// go away, you got the wrong guy, send our RI back unsolicited
|
// go away, you got the wrong guy, send our RI back unsolicited
|
||||||
|
@ -76,16 +76,43 @@ public class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
|
|||||||
key.toBase64().substring(0, 4));
|
key.toBase64().substring(0, 4));
|
||||||
}
|
}
|
||||||
LeaseSet ls = _message.getLeaseSet();
|
LeaseSet ls = _message.getLeaseSet();
|
||||||
// mark it as something we received, so we'll answer queries
|
//boolean oldrar = ls.getReceivedAsReply();
|
||||||
// for it. this flag does NOT get set on entries that we
|
//boolean oldrap = ls.getReceivedAsPublished();
|
||||||
|
// If this was received as a response to a query,
|
||||||
|
// FloodOnlyLookupMatchJob called setReceivedAsReply(),
|
||||||
|
// and we are seeing this only as a duplicate,
|
||||||
|
// so we don't set the receivedAsPublished() flag.
|
||||||
|
// Otherwise, mark it as something we received unsolicited, so we'll answer queries
|
||||||
|
// for it. This flag must NOT get set on entries that we
|
||||||
// receive in response to our own lookups.
|
// receive in response to our own lookups.
|
||||||
ls.setReceivedAsPublished(true);
|
// See ../HDLMJ for more info
|
||||||
|
if (!ls.getReceivedAsReply())
|
||||||
|
ls.setReceivedAsPublished(true);
|
||||||
|
//boolean rap = ls.getReceivedAsPublished();
|
||||||
|
//if (_log.shouldLog(Log.INFO))
|
||||||
|
// _log.info("oldrap? " + oldrap + " oldrar? " + oldrar + " newrap? " + rap);
|
||||||
LeaseSet match = getContext().netDb().store(key, _message.getLeaseSet());
|
LeaseSet match = getContext().netDb().store(key, _message.getLeaseSet());
|
||||||
if ( (match == null) || (match.getEarliestLeaseDate() < _message.getLeaseSet().getEarliestLeaseDate()) ) {
|
if (match == null) {
|
||||||
wasNew = true;
|
wasNew = true;
|
||||||
|
} else if (match.getEarliestLeaseDate() < _message.getLeaseSet().getEarliestLeaseDate()) {
|
||||||
|
wasNew = true;
|
||||||
|
// If it is in our keyspace and we are talking to it
|
||||||
|
|
||||||
|
|
||||||
|
if (match.getReceivedAsPublished())
|
||||||
|
ls.setReceivedAsPublished(true);
|
||||||
} else {
|
} else {
|
||||||
wasNew = false;
|
wasNew = false;
|
||||||
match.setReceivedAsPublished(true);
|
// The FloodOnlyLookupSelector goes away after the first good reply
|
||||||
|
// So on the second reply, FloodOnlyMatchJob is not called to set ReceivedAsReply.
|
||||||
|
// So then we think it's an unsolicited store.
|
||||||
|
// So we should skip this.
|
||||||
|
// If the 2nd reply is newer than the first, ReceivedAsPublished will be set incorrectly,
|
||||||
|
// that will hopefully be rare.
|
||||||
|
// A more elaborate solution would be a List of recent ReceivedAsReply LeaseSets, with receive time ?
|
||||||
|
// A real unsolicited store is likely to be new - hopefully...
|
||||||
|
//if (!ls.getReceivedAsReply())
|
||||||
|
// match.setReceivedAsPublished(true);
|
||||||
}
|
}
|
||||||
} catch (IllegalArgumentException iae) {
|
} catch (IllegalArgumentException iae) {
|
||||||
invalidMessage = iae.getMessage();
|
invalidMessage = iae.getMessage();
|
||||||
|
@ -314,12 +314,18 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the routers closest to that key in response to a remote lookup
|
* Get the routers closest to that key in response to a remote lookup
|
||||||
|
* Only used by ../HDLMJ
|
||||||
|
* Set MAY INCLUDE our own router - add to peersToIgnore if you don't want
|
||||||
|
*
|
||||||
|
* @param key the real key, NOT the routing key
|
||||||
|
* @param peersToIgnore can be null
|
||||||
*/
|
*/
|
||||||
public Set<RouterInfo> findNearestRouters(Hash key, int maxNumRouters, Set peersToIgnore) {
|
public Set<Hash> findNearestRouters(Hash key, int maxNumRouters, Set<Hash> peersToIgnore) {
|
||||||
if (!_initialized) return null;
|
if (!_initialized) return Collections.EMPTY_SET;
|
||||||
return getRouters(_peerSelector.selectNearest(key, maxNumRouters, peersToIgnore, _kb));
|
return new HashSet(_peerSelector.selectNearest(key, maxNumRouters, peersToIgnore, _kb));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*****
|
||||||
private Set<RouterInfo> getRouters(Collection hashes) {
|
private Set<RouterInfo> getRouters(Collection hashes) {
|
||||||
if (!_initialized) return null;
|
if (!_initialized) return null;
|
||||||
Set rv = new HashSet(hashes.size());
|
Set rv = new HashSet(hashes.size());
|
||||||
@ -337,17 +343,16 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
|||||||
}
|
}
|
||||||
return rv;
|
return rv;
|
||||||
}
|
}
|
||||||
|
*****/
|
||||||
|
|
||||||
/** get the hashes for all known routers */
|
/** get the hashes for all known routers */
|
||||||
public Set<Hash> getAllRouters() {
|
public Set<Hash> getAllRouters() {
|
||||||
if (!_initialized) return new HashSet(0);
|
if (!_initialized) return Collections.EMPTY_SET;
|
||||||
Set keys = _ds.getKeys();
|
Set<Hash> keys = _ds.getKeys();
|
||||||
Set rv = new HashSet(keys.size());
|
Set<Hash> rv = new HashSet(keys.size());
|
||||||
if (_log.shouldLog(Log.DEBUG))
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
_log.debug("getAllRouters(): # keys in the datastore: " + keys.size());
|
_log.debug("getAllRouters(): # keys in the datastore: " + keys.size());
|
||||||
for (Iterator iter = keys.iterator(); iter.hasNext(); ) {
|
for (Hash key : keys) {
|
||||||
Hash key = (Hash)iter.next();
|
|
||||||
|
|
||||||
DataStructure ds = _ds.get(key);
|
DataStructure ds = _ds.get(key);
|
||||||
if (ds == null) {
|
if (ds == null) {
|
||||||
if (_log.shouldLog(Log.INFO))
|
if (_log.shouldLog(Log.INFO))
|
||||||
@ -382,10 +387,27 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is only used by StatisticsManager to publish
|
||||||
|
* the count if we are floodfill.
|
||||||
|
* So to hide a clue that a popular eepsite is hosted
|
||||||
|
* on a floodfill router, only count leasesets that
|
||||||
|
* are "received as published", as of 0.7.14
|
||||||
|
*/
|
||||||
@Override
|
@Override
|
||||||
public int getKnownLeaseSets() {
|
public int getKnownLeaseSets() {
|
||||||
if (_ds == null) return 0;
|
if (_ds == null) return 0;
|
||||||
return _ds.countLeaseSets();
|
//return _ds.countLeaseSets();
|
||||||
|
Set<Hash> keys = _ds.getKeys();
|
||||||
|
int rv = 0;
|
||||||
|
for (Hash key : keys) {
|
||||||
|
DataStructure ds = _ds.get(key);
|
||||||
|
if (ds != null &&
|
||||||
|
ds instanceof LeaseSet &&
|
||||||
|
((LeaseSet)ds).getReceivedAsPublished())
|
||||||
|
rv++;
|
||||||
|
}
|
||||||
|
return rv;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* aparently, not used?? should be public if used elsewhere. */
|
/* aparently, not used?? should be public if used elsewhere. */
|
||||||
@ -622,6 +644,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
|||||||
* Store the leaseSet
|
* Store the leaseSet
|
||||||
*
|
*
|
||||||
* @throws IllegalArgumentException if the leaseSet is not valid
|
* @throws IllegalArgumentException if the leaseSet is not valid
|
||||||
|
* @return previous entry or null
|
||||||
*/
|
*/
|
||||||
public LeaseSet store(Hash key, LeaseSet leaseSet) throws IllegalArgumentException {
|
public LeaseSet store(Hash key, LeaseSet leaseSet) throws IllegalArgumentException {
|
||||||
if (!_initialized) return null;
|
if (!_initialized) return null;
|
||||||
@ -742,6 +765,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
|||||||
* store the routerInfo
|
* store the routerInfo
|
||||||
*
|
*
|
||||||
* @throws IllegalArgumentException if the routerInfo is not valid
|
* @throws IllegalArgumentException if the routerInfo is not valid
|
||||||
|
* @return previous entry or null
|
||||||
*/
|
*/
|
||||||
public RouterInfo store(Hash key, RouterInfo routerInfo) throws IllegalArgumentException {
|
public RouterInfo store(Hash key, RouterInfo routerInfo) throws IllegalArgumentException {
|
||||||
return store(key, routerInfo, true);
|
return store(key, routerInfo, true);
|
||||||
|
@ -27,7 +27,10 @@ import net.i2p.stat.Rate;
|
|||||||
import net.i2p.stat.RateStat;
|
import net.i2p.stat.RateStat;
|
||||||
import net.i2p.util.Log;
|
import net.i2p.util.Log;
|
||||||
|
|
||||||
public class PeerSelector {
|
/**
|
||||||
|
* Mostly unused, see overrides in FloodfillPeerSelector
|
||||||
|
*/
|
||||||
|
class PeerSelector {
|
||||||
protected Log _log;
|
protected Log _log;
|
||||||
protected RouterContext _context;
|
protected RouterContext _context;
|
||||||
|
|
||||||
@ -37,13 +40,14 @@ public class PeerSelector {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
* UNUSED - See FloodfillPeerSelector override
|
||||||
* Search through the kbucket set to find the most reliable peers close to the
|
* Search through the kbucket set to find the most reliable peers close to the
|
||||||
* given key, skipping all of the ones already checked
|
* given key, skipping all of the ones already checked
|
||||||
|
* List will not include our own hash.
|
||||||
*
|
*
|
||||||
* @return ordered list of Hash objects
|
* @return ordered list of Hash objects
|
||||||
*/
|
*/
|
||||||
/* FIXME Exporting non-public type through public API FIXME */
|
List<Hash> selectMostReliablePeers(Hash key, int numClosest, Set<Hash> alreadyChecked, KBucketSet kbuckets) {
|
||||||
public List<Hash> selectMostReliablePeers(Hash key, int numClosest, Set<Hash> alreadyChecked, KBucketSet kbuckets) {// LINT -- Exporting non-public type through public API
|
|
||||||
// get the peers closest to the key
|
// get the peers closest to the key
|
||||||
return selectNearestExplicit(key, numClosest, alreadyChecked, kbuckets);
|
return selectNearestExplicit(key, numClosest, alreadyChecked, kbuckets);
|
||||||
}
|
}
|
||||||
@ -52,11 +56,11 @@ public class PeerSelector {
|
|||||||
* Ignore KBucket ordering and do the XOR explicitly per key. Runs in O(n*log(n))
|
* Ignore KBucket ordering and do the XOR explicitly per key. Runs in O(n*log(n))
|
||||||
* time (n=routing table size with c ~ 32 xor ops). This gets strict ordering
|
* time (n=routing table size with c ~ 32 xor ops). This gets strict ordering
|
||||||
* on closest
|
* on closest
|
||||||
|
* List will not include our own hash.
|
||||||
*
|
*
|
||||||
* @return List of Hash for the peers selected, ordered by bucket (but intra bucket order is not defined)
|
* @return List of Hash for the peers selected, ordered by bucket (but intra bucket order is not defined)
|
||||||
*/
|
*/
|
||||||
/* FIXME Exporting non-public type through public API FIXME */
|
List<Hash> selectNearestExplicit(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets) {
|
||||||
public List<Hash> selectNearestExplicit(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets) {// LINT -- Exporting non-public type through public API
|
|
||||||
//if (true)
|
//if (true)
|
||||||
return selectNearestExplicitThin(key, maxNumRouters, peersToIgnore, kbuckets);
|
return selectNearestExplicitThin(key, maxNumRouters, peersToIgnore, kbuckets);
|
||||||
|
|
||||||
@ -88,14 +92,15 @@ public class PeerSelector {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
* UNUSED - See FloodfillPeerSelector override
|
||||||
* Ignore KBucket ordering and do the XOR explicitly per key. Runs in O(n*log(n))
|
* Ignore KBucket ordering and do the XOR explicitly per key. Runs in O(n*log(n))
|
||||||
* time (n=routing table size with c ~ 32 xor ops). This gets strict ordering
|
* time (n=routing table size with c ~ 32 xor ops). This gets strict ordering
|
||||||
* on closest
|
* on closest
|
||||||
|
* List will not include our own hash.
|
||||||
*
|
*
|
||||||
* @return List of Hash for the peers selected, ordered by bucket (but intra bucket order is not defined)
|
* @return List of Hash for the peers selected, ordered by bucket (but intra bucket order is not defined)
|
||||||
*/
|
*/
|
||||||
/* FIXME Exporting non-public type through public API FIXME */
|
List<Hash> selectNearestExplicitThin(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets) {
|
||||||
public List<Hash> selectNearestExplicitThin(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets) { // LINT -- Exporting non-public type through public API
|
|
||||||
if (peersToIgnore == null)
|
if (peersToIgnore == null)
|
||||||
peersToIgnore = new HashSet(1);
|
peersToIgnore = new HashSet(1);
|
||||||
peersToIgnore.add(_context.routerHash());
|
peersToIgnore.add(_context.routerHash());
|
||||||
@ -109,6 +114,7 @@ public class PeerSelector {
|
|||||||
return rv;
|
return rv;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** UNUSED */
|
||||||
private class MatchSelectionCollector implements SelectionCollector {
|
private class MatchSelectionCollector implements SelectionCollector {
|
||||||
private TreeMap<BigInteger, Hash> _sorted;
|
private TreeMap<BigInteger, Hash> _sorted;
|
||||||
private Hash _key;
|
private Hash _key;
|
||||||
@ -132,7 +138,7 @@ public class PeerSelector {
|
|||||||
if (info.getIdentity().isHidden())
|
if (info.getIdentity().isHidden())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
BigInteger diff = getDistance(_key, entry);
|
BigInteger diff = HashDistance.getDistance(_key, entry);
|
||||||
_sorted.put(diff, entry);
|
_sorted.put(diff, entry);
|
||||||
_matches++;
|
_matches++;
|
||||||
}
|
}
|
||||||
@ -189,21 +195,18 @@ public class PeerSelector {
|
|||||||
}
|
}
|
||||||
**********/
|
**********/
|
||||||
|
|
||||||
public static BigInteger getDistance(Hash targetKey, Hash routerInQuestion) {
|
|
||||||
// plain XOR of the key and router
|
|
||||||
byte diff[] = DataHelper.xor(routerInQuestion.getData(), targetKey.getData());
|
|
||||||
return new BigInteger(1, diff);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
* UNUSED - See FloodfillPeerSelector override
|
||||||
* Generic KBucket filtering to find the hashes close to a key, regardless of other considerations.
|
* Generic KBucket filtering to find the hashes close to a key, regardless of other considerations.
|
||||||
* This goes through the kbuckets, starting with the key's location, moving towards us, and then away from the
|
* This goes through the kbuckets, starting with the key's location, moving towards us, and then away from the
|
||||||
* key's location's bucket, selecting peers until we have numClosest.
|
* key's location's bucket, selecting peers until we have numClosest.
|
||||||
|
* List MAY INCLUDE our own router - add to peersToIgnore if you don't want
|
||||||
*
|
*
|
||||||
|
* @param key the original key (NOT the routing key)
|
||||||
|
* @param peersToIgnore can be null
|
||||||
* @return List of Hash for the peers selected, ordered by bucket (but intra bucket order is not defined)
|
* @return List of Hash for the peers selected, ordered by bucket (but intra bucket order is not defined)
|
||||||
*/
|
*/
|
||||||
/* FIXME Exporting non-public type through public API FIXME */
|
List<Hash> selectNearest(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets) {
|
||||||
public List<Hash> selectNearest(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets) { // LINT -- Exporting non-public type through public API
|
|
||||||
// sure, this may not be exactly correct per kademlia (peers on the border of a kbucket in strict kademlia
|
// sure, this may not be exactly correct per kademlia (peers on the border of a kbucket in strict kademlia
|
||||||
// would behave differently) but I can see no reason to keep around an /additional/ more complicated algorithm.
|
// would behave differently) but I can see no reason to keep around an /additional/ more complicated algorithm.
|
||||||
// later if/when selectNearestExplicit gets costly, we may revisit this (since kbuckets let us cache the distance()
|
// later if/when selectNearestExplicit gets costly, we may revisit this (since kbuckets let us cache the distance()
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package net.i2p.router.tunnel;
|
package net.i2p.router.tunnel;
|
||||||
|
|
||||||
import net.i2p.data.Hash;
|
import net.i2p.data.Hash;
|
||||||
|
import net.i2p.data.LeaseSet;
|
||||||
import net.i2p.data.Payload;
|
import net.i2p.data.Payload;
|
||||||
import net.i2p.data.TunnelId;
|
import net.i2p.data.TunnelId;
|
||||||
import net.i2p.data.i2np.DataMessage;
|
import net.i2p.data.i2np.DataMessage;
|
||||||
@ -16,6 +17,7 @@ import net.i2p.router.ClientMessage;
|
|||||||
import net.i2p.router.RouterContext;
|
import net.i2p.router.RouterContext;
|
||||||
import net.i2p.router.TunnelInfo;
|
import net.i2p.router.TunnelInfo;
|
||||||
import net.i2p.router.message.GarlicMessageReceiver;
|
import net.i2p.router.message.GarlicMessageReceiver;
|
||||||
|
//import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseFacade;
|
||||||
import net.i2p.util.Log;
|
import net.i2p.util.Log;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -35,8 +37,8 @@ public class InboundMessageDistributor implements GarlicMessageReceiver.CloveRec
|
|||||||
_client = client;
|
_client = client;
|
||||||
_log = ctx.logManager().getLog(InboundMessageDistributor.class);
|
_log = ctx.logManager().getLog(InboundMessageDistributor.class);
|
||||||
_receiver = new GarlicMessageReceiver(ctx, this, client);
|
_receiver = new GarlicMessageReceiver(ctx, this, client);
|
||||||
_context.statManager().createRateStat("tunnel.dropDangerousClientTunnelMessage", "How many tunnel messages come down a client tunnel that we shouldn't expect (lifetime is the 'I2NP type')", "Tunnels", new long[] { 10*60*1000, 60*60*1000 });
|
_context.statManager().createRateStat("tunnel.dropDangerousClientTunnelMessage", "How many tunnel messages come down a client tunnel that we shouldn't expect (lifetime is the 'I2NP type')", "Tunnels", new long[] { 60*60*1000 });
|
||||||
_context.statManager().createRateStat("tunnel.handleLoadClove", "When do we receive load test cloves", "Tunnels", new long[] { 60*1000, 10*60*1000, 60*60*1000 });
|
_context.statManager().createRateStat("tunnel.handleLoadClove", "When do we receive load test cloves", "Tunnels", new long[] { 60*60*1000 });
|
||||||
}
|
}
|
||||||
|
|
||||||
public void distribute(I2NPMessage msg, Hash target) {
|
public void distribute(I2NPMessage msg, Hash target) {
|
||||||
@ -164,11 +166,19 @@ public class InboundMessageDistributor implements GarlicMessageReceiver.CloveRec
|
|||||||
DatabaseStoreMessage dsm = (DatabaseStoreMessage)data;
|
DatabaseStoreMessage dsm = (DatabaseStoreMessage)data;
|
||||||
try {
|
try {
|
||||||
if (dsm.getValueType() == DatabaseStoreMessage.KEY_TYPE_LEASESET) {
|
if (dsm.getValueType() == DatabaseStoreMessage.KEY_TYPE_LEASESET) {
|
||||||
// dont tell anyone else about it if we got it through a client tunnel
|
// If it was stored to us before, don't undo the
|
||||||
// (though this is the default, but it doesn't hurt to make it explicit)
|
// receivedAsPublished flag so we will continue to respond to requests
|
||||||
if (_client != null)
|
// for the leaseset. That is, we don't want this to change the
|
||||||
dsm.getLeaseSet().setReceivedAsPublished(false);
|
// RAP flag of the leaseset.
|
||||||
_context.netDb().store(dsm.getKey(), dsm.getLeaseSet());
|
// When the keyspace rotates at midnight, and this leaseset moves out
|
||||||
|
// of our keyspace, maybe we shouldn't do this?
|
||||||
|
// Should we do this whether ff or not?
|
||||||
|
LeaseSet old = _context.netDb().store(dsm.getKey(), dsm.getLeaseSet());
|
||||||
|
if (old != null && old.getReceivedAsPublished()
|
||||||
|
/** && ((FloodfillNetworkDatabaseFacade)_context.netDb()).floodfillEnabled() **/ )
|
||||||
|
dsm.getLeaseSet().setReceivedAsPublished(true);
|
||||||
|
if (_log.shouldLog(Log.INFO))
|
||||||
|
_log.info("Storing LS for: " + dsm.getKey() + " sent to: " + _client);
|
||||||
} else {
|
} else {
|
||||||
if (_client != null) {
|
if (_client != null) {
|
||||||
// drop it, since the data we receive shouldn't include router
|
// drop it, since the data we receive shouldn't include router
|
||||||
|
Reference in New Issue
Block a user