* NetDb: Fix exploration by adding a null hash to mean

"don't give me floodfills"
    * PeerSelector: Downgrade floodfills with high fail rate
This commit is contained in:
zzz
2009-12-30 22:36:53 +00:00
parent b2a137c5bc
commit cefc1f130d
3 changed files with 42 additions and 18 deletions

View File

@ -100,7 +100,7 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
getContext().statManager().addRateData("netDb.lookupsMatchedReceivedPublished", 1, 0); getContext().statManager().addRateData("netDb.lookupsMatchedReceivedPublished", 1, 0);
sendData(_message.getSearchKey(), ls, fromKey, _message.getReplyTunnel()); sendData(_message.getSearchKey(), ls, fromKey, _message.getReplyTunnel());
} else { } else {
Set routerInfoSet = getContext().netDb().findNearestRouters(_message.getSearchKey(), Set<RouterInfo> routerInfoSet = getContext().netDb().findNearestRouters(_message.getSearchKey(),
CLOSENESS_THRESHOLD, CLOSENESS_THRESHOLD,
_message.getDontIncludePeers()); _message.getDontIncludePeers());
if (getContext().clientManager().isLocal(ls.getDestination())) { if (getContext().clientManager().isLocal(ls.getDestination())) {
@ -142,13 +142,11 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
} }
} else { } else {
// not found locally - return closest peer routerInfo structs // not found locally - return closest peer routerInfo structs
Set dontInclude = _message.getDontIncludePeers(); Set<Hash> dontInclude = _message.getDontIncludePeers();
// TODO: Honor flag to exclude all floodfills // Honor flag to exclude all floodfills
//if (dontInclude.contains(Hash.FAKE_HASH)) { //if (dontInclude.contains(Hash.FAKE_HASH)) {
// dontInclude = new HashSet(dontInclude); // This is handled in FloodfillPeerSelector
// dontInclude.addAll( pfffft ); Set<RouterInfo> routerInfoSet = getContext().netDb().findNearestRouters(_message.getSearchKey(),
//}
Set routerInfoSet = getContext().netDb().findNearestRouters(_message.getSearchKey(),
MAX_ROUTERS_RETURNED, MAX_ROUTERS_RETURNED,
dontInclude); dontInclude);
@ -199,7 +197,7 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
private void sendData(Hash key, DataStructure data, Hash toPeer, TunnelId replyTunnel) { private void sendData(Hash key, DataStructure data, Hash toPeer, TunnelId replyTunnel) {
if (_log.shouldLog(Log.DEBUG)) if (_log.shouldLog(Log.DEBUG))
_log.debug("Sending data matching key key " + key.toBase64() + " to peer " + toPeer.toBase64() _log.debug("Sending data matching key " + key.toBase64() + " to peer " + toPeer.toBase64()
+ " tunnel " + replyTunnel); + " tunnel " + replyTunnel);
DatabaseStoreMessage msg = new DatabaseStoreMessage(getContext()); DatabaseStoreMessage msg = new DatabaseStoreMessage(getContext());
msg.setKey(key); msg.setKey(key);
@ -216,7 +214,7 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
sendMessage(msg, toPeer, replyTunnel); sendMessage(msg, toPeer, replyTunnel);
} }
protected void sendClosest(Hash key, Set routerInfoSet, Hash toPeer, TunnelId replyTunnel) { protected void sendClosest(Hash key, Set<RouterInfo> routerInfoSet, Hash toPeer, TunnelId replyTunnel) {
if (_log.shouldLog(Log.DEBUG)) if (_log.shouldLog(Log.DEBUG))
_log.debug("Sending closest routers to key " + key.toBase64() + ": # peers = " _log.debug("Sending closest routers to key " + key.toBase64() + ": # peers = "
+ routerInfoSet.size() + " tunnel " + replyTunnel); + routerInfoSet.size() + " tunnel " + replyTunnel);

View File

@ -85,12 +85,14 @@ class ExploreJob extends SearchJob {
msg.setReplyTunnel(replyTunnelId); msg.setReplyTunnel(replyTunnelId);
int available = MAX_CLOSEST - msg.getDontIncludePeers().size(); int available = MAX_CLOSEST - msg.getDontIncludePeers().size();
// TODO: add this once ../HTLMJ handles it if (available > 0) {
//if (available > 0) { // Add a flag to say this is an exploration and we don't want floodfills in the responses.
// // add a flag to say this is an exploration and we don't want floodfills in the responses // Doing it this way is of course backwards-compatible.
// if (msg.getDontIncludePeers().add(Hash.FAKE_HASH)) // Supported as of 0.7.9
// available--; if (msg.getDontIncludePeers().add(Hash.FAKE_HASH))
//} available--;
}
KBucketSet ks = _facade.getKBuckets(); KBucketSet ks = _facade.getKBuckets();
Hash rkey = getContext().routingKeyGenerator().getRoutingKey(getState().getTarget()); Hash rkey = getContext().routingKeyGenerator().getRoutingKey(getState().getTarget());
// in a few releases, we can (and should) remove this, // in a few releases, we can (and should) remove this,

View File

@ -98,7 +98,8 @@ class FloodfillPeerSelector extends PeerSelector {
* @param maxNumRouters max to return * @param maxNumRouters max to return
* Sorted by closest to the key if > maxNumRouters, otherwise not * Sorted by closest to the key if > maxNumRouters, otherwise not
* The list is in 3 groups - sorted by routing key within each group. * The list is in 3 groups - sorted by routing key within each group.
* Group 1: No store or lookup failure in a long time * Group 1: No store or lookup failure in a long time, and
* lookup fail rate no more than 1.5 * average
* Group 2: No store or lookup failure in a little while or * Group 2: No store or lookup failure in a little while or
* success newer than failure * success newer than failure
* Group 3: All others * Group 3: All others
@ -126,6 +127,14 @@ class FloodfillPeerSelector extends PeerSelector {
int found = 0; int found = 0;
long now = _context.clock().now(); long now = _context.clock().now();
double maxFailRate;
if (_context.router().getUptime() > 60*60*1000) {
double currentFailRate = _context.statManager().getRate("peer.failedLookupRate").getRate(60*60*1000).getAverageValue();
maxFailRate = Math.max(0.20d, 1.5d * currentFailRate);
} else {
maxFailRate = 100d; // disable
}
// split sorted list into 3 sorted lists // split sorted list into 3 sorted lists
for (int i = 0; found < howMany && i < ffs.size(); i++) { for (int i = 0; found < howMany && i < ffs.size(); i++) {
Hash entry = sorted.first(); Hash entry = sorted.first();
@ -146,7 +155,8 @@ class FloodfillPeerSelector extends PeerSelector {
if (prof != null && prof.getDBHistory() != null if (prof != null && prof.getDBHistory() != null
&& prof.getDbResponseTime().getRate(10*60*1000).getAverageValue() < maxGoodRespTime && prof.getDbResponseTime().getRate(10*60*1000).getAverageValue() < maxGoodRespTime
&& prof.getDBHistory().getLastStoreFailed() < now - NO_FAIL_STORE_GOOD && prof.getDBHistory().getLastStoreFailed() < now - NO_FAIL_STORE_GOOD
&& prof.getDBHistory().getLastLookupFailed() < now - NO_FAIL_LOOKUP_GOOD) { && prof.getDBHistory().getLastLookupFailed() < now - NO_FAIL_LOOKUP_GOOD
&& prof.getDBHistory().getFailedLookupRate().getRate(60*60*1000).getAverageValue() < maxFailRate) {
// good // good
if (_log.shouldLog(Log.DEBUG)) if (_log.shouldLog(Log.DEBUG))
_log.debug("Good: " + entry); _log.debug("Good: " + entry);
@ -246,11 +256,14 @@ class FloodfillPeerSelector extends PeerSelector {
/** /**
* @return list of all with the 'f' mark in their netdb except for shitlisted ones. * @return list of all with the 'f' mark in their netdb except for shitlisted ones.
* Will return non-floodfills only if there aren't enough floodfills.
*
* The list is in 3 groups - unsorted (shuffled) within each group. * The list is in 3 groups - unsorted (shuffled) within each group.
* Group 1: If preferConnected = true, the peers we are directly * Group 1: If preferConnected = true, the peers we are directly
* connected to, that meet the group 2 criteria * connected to, that meet the group 2 criteria
* Group 2: Netdb published less than 3h ago, no bad send in last 30m. * Group 2: Netdb published less than 3h ago, no bad send in last 30m.
* Group 3: All others * Group 3: All others
* Group 4: Non-floodfills, sorted by closest-to-the-key
*/ */
public List<Hash> get(int howMany, boolean preferConnected) { public List<Hash> get(int howMany, boolean preferConnected) {
Collections.shuffle(_floodfillMatches, _context.random()); Collections.shuffle(_floodfillMatches, _context.random());
@ -310,6 +323,8 @@ class FloodfillPeerSelector extends PeerSelector {
/** /**
* Floodfill peers only. Used only by HandleDatabaseLookupMessageJob to populate the DSRM. * Floodfill peers only. Used only by HandleDatabaseLookupMessageJob to populate the DSRM.
* UNLESS peersToIgnore contains Hash.FAKE_HASH (all zeros), in which case this is an exploratory
* lookup, and the response should not include floodfills.
* *
* @param key the original key (NOT the routing key) * @param key the original key (NOT the routing key)
* @return List of Hash for the peers selected, ordered * @return List of Hash for the peers selected, ordered
@ -317,6 +332,15 @@ class FloodfillPeerSelector extends PeerSelector {
@Override @Override
public List<Hash> selectNearest(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets) { public List<Hash> selectNearest(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets) {
Hash rkey = _context.routingKeyGenerator().getRoutingKey(key); Hash rkey = _context.routingKeyGenerator().getRoutingKey(key);
return selectFloodfillParticipants(rkey, maxNumRouters, peersToIgnore, kbuckets); if (peersToIgnore != null && peersToIgnore.contains(Hash.FAKE_HASH)) {
// return non-ff
peersToIgnore.addAll(selectFloodfillParticipants(peersToIgnore, kbuckets));
FloodfillSelectionCollector matches = new FloodfillSelectionCollector(rkey, peersToIgnore, maxNumRouters);
kbuckets.getAll(matches);
return matches.get(maxNumRouters);
} else {
// return ff
return selectFloodfillParticipants(rkey, maxNumRouters, peersToIgnore, kbuckets);
}
} }
} }