* Floodfill Search:

- Fix a bug that caused a single FloodfillOnlySearchJob
         instance to be run multiple times, with unpredictable
         results
       - Select ff peers randomly to improve reliability
       - Add some bulletproofing
This commit is contained in:
zzz
2008-03-13 20:23:46 +00:00
parent 46307c60d4
commit 4fa4357bf1
4 changed files with 20 additions and 4 deletions

View File

@ -1,3 +1,11 @@
2008-03-14 zzz
* Floodfill Search:
- Fix a bug that caused a single FloodfillOnlySearchJob
instance to be run multiple times, with unpredictable
results
- Select ff peers randomly to improve reliability
- Add some bulletproofing
2008-03-11 zzz
* ProfileOrganizer:
- Don't require a peer to be high-capacity to be

View File

@ -17,7 +17,7 @@ import net.i2p.CoreVersion;
public class RouterVersion {
public final static String ID = "$Revision: 1.548 $ $Date: 2008-02-10 15:00:00 $";
public final static String VERSION = "0.6.1.32";
public final static long BUILD = 4;
public final static long BUILD = 5;
public static void main(String args[]) {
System.out.println("I2P Router version: " + VERSION + "-" + BUILD);
System.out.println("Router ID: " + RouterVersion.ID);

View File

@ -72,6 +72,13 @@ class FloodOnlySearchJob extends FloodSearchJob {
OutNetMessage out = getContext().messageRegistry().registerPending(_replySelector, _onReply, _onTimeout, _timeoutMs);
synchronized (_out) { _out.add(out); }
// We need to randomize our ff selection, else we stay with the same ones since
// getFloodfillPeers() is sorted by closest distance. Always using the same
// ones didn't help reliability.
if (floodfillPeers.size() > CONCURRENT_SEARCHES)
Collections.shuffle(floodfillPeers, getContext().random());
int count = 0; // keep a separate count since _lookupsRemaining could be decremented elsewhere
for (int i = 0; _lookupsRemaining < CONCURRENT_SEARCHES && i < floodfillPeers.size(); i++) {
Hash peer = (Hash)floodfillPeers.get(i);
if (peer.equals(getContext().routerHash()))
@ -92,10 +99,11 @@ class FloodOnlySearchJob extends FloodSearchJob {
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": Floodfill search for " + _key.toBase64() + " to " + peer.toBase64());
getContext().tunnelDispatcher().dispatchOutbound(dlm, outTunnel.getSendTunnelId(0), peer);
count++;
_lookupsRemaining++;
}
if (_lookupsRemaining <= 0) {
if (count <= 0) {
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": Floodfill search for " + _key.toBase64() + " had no peers to send to");
// no floodfill peers, fail
@ -105,7 +113,7 @@ class FloodOnlySearchJob extends FloodSearchJob {
public String getName() { return "NetDb flood search (phase 1)"; }
Hash getKey() { return _key; }
void decrementRemaining() { _lookupsRemaining--; }
void decrementRemaining() { if (_lookupsRemaining > 0) _lookupsRemaining--; }
int getLookupsRemaining() { return _lookupsRemaining; }
void failed() {

View File

@ -146,7 +146,7 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
SearchJob search(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs, boolean isLease) {
//if (true) return super.search(key, onFindJob, onFailedLookupJob, timeoutMs, isLease);
if (key == null) throw new IllegalArgumentException("searchin for nothin, eh?");
boolean isNew = true;
boolean isNew = false;
FloodSearchJob searchJob = null;
synchronized (_activeFloodQueries) {
searchJob = (FloodSearchJob)_activeFloodQueries.get(key);