- Put closest-to-the-key in explore don't-include-list

- Use facade's peer selector for exploration rather than
        instantiating a new one
This commit is contained in:
zzz
2009-11-21 13:41:48 +00:00
parent 6144bfb437
commit 647a09b5b9
3 changed files with 29 additions and 14 deletions

View File

@ -142,9 +142,15 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
}
} else {
// not found locally - return closest peer routerInfo structs
Set dontInclude = _message.getDontIncludePeers();
// TODO: Honor flag to exclude all floodfills
//if (dontInclude.contains(Hash.FAKE_HASH)) {
// dontInclude = new HashSet(dontInclude);
// dontInclude.addAll( pfffft );
//}
Set routerInfoSet = getContext().netDb().findNearestRouters(_message.getSearchKey(),
MAX_ROUTERS_RETURNED,
_message.getDontIncludePeers());
dontInclude);
// ERR: see above
// // Remove hidden nodes from set..
@ -182,7 +188,6 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
}
private boolean weAreClosest(Set routerInfoSet) {
boolean weAreClosest = false;
for (Iterator iter = routerInfoSet.iterator(); iter.hasNext(); ) {
RouterInfo cur = (RouterInfo)iter.next();
if (cur.getIdentity().calculateHash().equals(getContext().routerHash())) {

View File

@ -26,16 +26,12 @@ import net.i2p.util.Log;
*
*/
class ExploreJob extends SearchJob {
private Log _log;
private PeerSelector _peerSelector;
private FloodfillPeerSelector _peerSelector;
/** how long each exploration should run for
* The exploration won't "succeed" so we make it long so we query several peers */
private static final long MAX_EXPLORE_TIME = 15*1000;
/** how many of the peers closest to the key being explored do we want to explicitly say "dont send me this"? */
private static final int NUM_CLOSEST_TO_IGNORE = 3;
/** how many peers to explore through concurrently */
private static final int EXPLORE_BREDTH = 1;
@ -58,7 +54,7 @@ class ExploreJob extends SearchJob {
// attempting to send that lease a message!
super(context, facade, key, null, null, MAX_EXPLORE_TIME, false, false);
_log = context.logManager().getLog(ExploreJob.class);
_peerSelector = new PeerSelector(context);
_peerSelector = (FloodfillPeerSelector) (_facade.getPeerSelector());
}
/**
@ -89,11 +85,22 @@ class ExploreJob extends SearchJob {
msg.setReplyTunnel(replyTunnelId);
int available = MAX_CLOSEST - msg.getDontIncludePeers().size();
if (available > 0) {
List peers = ((FloodfillNetworkDatabaseFacade)_facade).getFloodfillPeers();
// TODO: add this once ../HTLMJ handles it
//if (available > 0) {
// // add a flag to say this is an exploration and we don't want floodfills in the responses
// if (msg.getDontIncludePeers().add(Hash.FAKE_HASH))
// available--;
//}
KBucketSet ks = _facade.getKBuckets();
Hash rkey = getContext().routingKeyGenerator().getRoutingKey(getState().getTarget());
// in a few releases, we can (and should) remove this,
// as routers will honor the above flag, and we want the table to include
// only non-floodfills.
if (available > 0 && ks != null) {
List peers = _peerSelector.selectFloodfillParticipants(rkey, available, ks);
int len = peers.size();
if (len > 0)
msg.getDontIncludePeers().addAll(peers.subList(0, Math.min(available, len)));
msg.getDontIncludePeers().addAll(peers);
}
available = MAX_CLOSEST - msg.getDontIncludePeers().size();
@ -104,7 +111,7 @@ class ExploreJob extends SearchJob {
// We're just exploring, but this could give things away, and tie our exploratory tunnels to our router,
// so let's not put our hash in there.
Set dontInclude = new HashSet(msg.getDontIncludePeers());
List peers = _peerSelector.selectNearestExplicit(getState().getTarget(), available, dontInclude, getFacade().getKBuckets());
List peers = _peerSelector.selectNearestExplicit(rkey, available, dontInclude, ks);
msg.getDontIncludePeers().addAll(peers);
}
@ -118,7 +125,7 @@ class ExploreJob extends SearchJob {
@Override
protected int getBredth() { return EXPLORE_BREDTH; }
/**
* We've gotten a search reply that contained the specified
* number of peers that we didn't know about before.

View File

@ -33,9 +33,12 @@ import net.i2p.util.Log;
* Search for a particular key iteratively until we either find a value or we
* run out of peers
*
* Note that this is rarely if ever used directly, and is primary used by the ExploreJob extension.
* FloodOnlySearchJob and FloodSearchJob do not extend this.
* It also does not update peer profile stats.
*/
class SearchJob extends JobImpl {
private Log _log;
protected Log _log;
protected KademliaNetworkDatabaseFacade _facade;
private SearchState _state;
private Job _onSuccess;