diff --git a/history.txt b/history.txt index e0329e7f7..ab2101172 100644 --- a/history.txt +++ b/history.txt @@ -1,6 +1,10 @@ +2008-03-14 zzz + * Floodfill Search: + - Prefer heard-from, unfailing, unshitlisted floodfill peers + 2008-03-14 zzz * ProfileOrganizer: - - Use more recent stats to calculate integrationory.txt + - Use more recent stats to calculate integration - Show that fast peers are also high-capacity on profiles.jsp * readme.html: Update Syndie link * TunnelPool: Update comments diff --git a/router/java/src/net/i2p/router/RouterVersion.java b/router/java/src/net/i2p/router/RouterVersion.java index 353622d4b..6dee5391b 100644 --- a/router/java/src/net/i2p/router/RouterVersion.java +++ b/router/java/src/net/i2p/router/RouterVersion.java @@ -17,7 +17,7 @@ import net.i2p.CoreVersion; public class RouterVersion { public final static String ID = "$Revision: 1.548 $ $Date: 2008-02-10 15:00:00 $"; public final static String VERSION = "0.6.1.32"; - public final static long BUILD = 6; + public final static long BUILD = 7; public static void main(String args[]) { System.out.println("I2P Router version: " + VERSION + "-" + BUILD); System.out.println("Router ID: " + RouterVersion.ID); diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/FloodOnlySearchJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/FloodOnlySearchJob.java index 646f75d0c..d7152eead 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/FloodOnlySearchJob.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/FloodOnlySearchJob.java @@ -5,6 +5,7 @@ import net.i2p.router.*; import net.i2p.data.Hash; import net.i2p.data.i2np.*; import net.i2p.util.Log; +import net.i2p.router.peermanager.PeerProfile; /** * Try sending a search to some floodfill peers, failing completely if we don't get @@ -75,8 +76,30 @@ class FloodOnlySearchJob extends FloodSearchJob { // We need to randomize our ff selection, else we stay with the same ones since // getFloodfillPeers() is sorted by closest distance. Always using the same // ones didn't help reliability. - if (floodfillPeers.size() > CONCURRENT_SEARCHES) + // Also, query the unheard-from, unprofiled, failing and shitlisted ones last. + // We should hear from floodfills pretty frequently so set a 30m time limit. + // If unprofiled we haven't talked to them in a long time. + // We aren't contacting the peer directly, so shitlist doesn't strictly matter, + // but it's a bad sign, and we often shitlist a peer before we fail it... + if (floodfillPeers.size() > CONCURRENT_SEARCHES) { Collections.shuffle(floodfillPeers, getContext().random()); + List ffp = new ArrayList(floodfillPeers.size()); + int failcount = 0; + long before = getContext().clock().now() - 30*60*1000; + for (int i = 0; i < floodfillPeers.size(); i++) { + Hash peer = (Hash)floodfillPeers.get(i); + PeerProfile profile = getContext().profileOrganizer().getProfile(peer); + if (profile == null || profile.getLastHeardFrom() < before || + profile.getIsFailing() || getContext().shitlist().isShitlisted(peer)) { + failcount++; + ffp.add(peer); + } else + ffp.add(0, peer); + } + if (_log.shouldLog(Log.INFO) && failcount > 0) + _log.info(getJobId() + ": " + failcount + " of " + floodfillPeers.size() + " floodfills are not heard from, unprofiled, failing or shitlisted"); + floodfillPeers = ffp; + } int count = 0; // keep a separate count since _lookupsRemaining could be decremented elsewhere for (int i = 0; _lookupsRemaining < CONCURRENT_SEARCHES && i < floodfillPeers.size(); i++) {