diff --git a/history.txt b/history.txt index e6a17e5854..a80182f681 100644 --- a/history.txt +++ b/history.txt @@ -1,3 +1,8 @@ +2012-11-19 zzz + * BuildHandler: Disable CoDel, wasn't helping + * NetDB: Add negative lookup cache + * Profiles: Split up files into subdirectories + 2012-11-17 zzz * error500.jsp: Add servlet version * i2psnark: Clear PEX peers set after use, cause of bad peer counts diff --git a/router/java/src/net/i2p/router/RouterVersion.java b/router/java/src/net/i2p/router/RouterVersion.java index a6204817e3..f2522cd4ed 100644 --- a/router/java/src/net/i2p/router/RouterVersion.java +++ b/router/java/src/net/i2p/router/RouterVersion.java @@ -18,7 +18,7 @@ public class RouterVersion { /** deprecated */ public final static String ID = "Monotone"; public final static String VERSION = CoreVersion.VERSION; - public final static long BUILD = 8; + public final static long BUILD = 9; /** for example "-test" */ public final static String EXTRA = ""; diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/FloodThrottler.java b/router/java/src/net/i2p/router/networkdb/kademlia/FloodThrottler.java index 96b0f508d7..2fced02c6b 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/FloodThrottler.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/FloodThrottler.java @@ -12,7 +12,7 @@ import net.i2p.util.SimpleTimer; * @since 0.7.11 */ class FloodThrottler { - private ObjectCounter counter; + private final ObjectCounter counter; private static final int MAX_FLOODS = 3; private static final long CLEAN_TIME = 60*1000; diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillNetworkDatabaseFacade.java b/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillNetworkDatabaseFacade.java index 3c7e0e02d0..8496e9a7f0 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillNetworkDatabaseFacade.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillNetworkDatabaseFacade.java @@ -33,6 +33,7 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad private final Set _verifiesInProgress; private FloodThrottler _floodThrottler; private LookupThrottler _lookupThrottler; + private NegativeLookupCache _negativeCache; /** * This is the flood redundancy. Entries are @@ -62,6 +63,7 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad _context.statManager().createRateStat("netDb.searchReplyNotValidated", "How many search replies we get that we are NOT able to validate (fetch)", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l }); _context.statManager().createRateStat("netDb.searchReplyValidationSkipped", "How many search replies we get from unreliable peers that we skip?", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l }); _context.statManager().createRateStat("netDb.republishQuantity", "How many peers do we need to send a found leaseSet to?", "NetworkDatabase", new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l }); + _context.statManager().createRateStat("netDb.negativeCache", "Aborted lookup, already cached", "NetworkDatabase", new long[] { 60*60*1000l }); } @Override @@ -69,6 +71,7 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad super.startup(); _context.jobQueue().addJob(new FloodfillMonitorJob(_context, this)); _lookupThrottler = new LookupThrottler(); + _negativeCache = new NegativeLookupCache(); // refresh old routers Job rrj = new RefreshRoutersJob(_context, this); @@ -166,6 +169,25 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad return _lookupThrottler.shouldThrottle(from, id); } + /** + * Increment in the negative lookup cache + * @since 0.9.4 + */ + void lookupFailed(Hash key) { + _negativeCache.lookupFailed(key); + } + + /** + * Is the key in the negative lookup cache? + * @since 0.9.4 + */ + boolean isNegativeCached(Hash key) { + boolean rv = _negativeCache.isCached(key); + if (rv) + _context.statManager().addRateData("netDb.negativeCache", 1); + return rv; + } + /** * Send to a subset of all floodfill peers. * We do this to implement Kademlia within the floodfills, i.e. diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/IterativeSearchJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/IterativeSearchJob.java index 2565ab13bf..a27c9a5bae 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/IterativeSearchJob.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/IterativeSearchJob.java @@ -14,6 +14,7 @@ import net.i2p.data.DataHelper; import net.i2p.data.Hash; import net.i2p.data.RouterInfo; import net.i2p.data.i2np.DatabaseLookupMessage; +import net.i2p.router.CommSystemFacade; import net.i2p.router.Job; import net.i2p.router.MessageSelector; import net.i2p.router.OutNetMessage; @@ -93,6 +94,12 @@ class IterativeSearchJob extends FloodSearchJob { @Override public void runJob() { + if (_facade.isNegativeCached(_key)) { + if (_log.shouldLog(Log.WARN)) + _log.warn("Negative cached, not searching: " + _key); + failed(); + return; + } // pick some floodfill peers and send out the searches List floodfillPeers; KBucketSet ks = _facade.getKBuckets(); @@ -304,6 +311,8 @@ class IterativeSearchJob extends FloodSearchJob { _dead = true; } _facade.complete(_key); + if (getContext().commSystem().getReachabilityStatus() != CommSystemFacade.STATUS_DISCONNECTED) + _facade.lookupFailed(_key); getContext().messageRegistry().unregisterPending(_out); int tries; synchronized(this) { diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/LookupThrottler.java b/router/java/src/net/i2p/router/networkdb/kademlia/LookupThrottler.java index 3c82810e1f..12feea376a 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/LookupThrottler.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/LookupThrottler.java @@ -16,7 +16,7 @@ import net.i2p.util.SimpleTimer; * @since 0.7.11 */ class LookupThrottler { - private ObjectCounter counter; + private final ObjectCounter counter; /** the id of this is -1 */ private static final TunnelId DUMMY_ID = new TunnelId(); /** this seems like plenty */ diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/NegativeLookupCache.java b/router/java/src/net/i2p/router/networkdb/kademlia/NegativeLookupCache.java new file mode 100644 index 0000000000..c967503085 --- /dev/null +++ b/router/java/src/net/i2p/router/networkdb/kademlia/NegativeLookupCache.java @@ -0,0 +1,36 @@ +package net.i2p.router.networkdb.kademlia; + +import net.i2p.data.Hash; +import net.i2p.util.ObjectCounter; +import net.i2p.util.SimpleScheduler; +import net.i2p.util.SimpleTimer; + +/** + * Track lookup fails + * + * @since 0.9.4 + */ +class NegativeLookupCache { + private final ObjectCounter counter; + private static final int MAX_FAILS = 3; + private static final long CLEAN_TIME = 4*60*1000; + + public NegativeLookupCache() { + this.counter = new ObjectCounter(); + SimpleScheduler.getInstance().addPeriodicEvent(new Cleaner(), CLEAN_TIME); + } + + public void lookupFailed(Hash h) { + this.counter.increment(h); + } + + public boolean isCached(Hash h) { + return this.counter.count(h) >= MAX_FAILS; + } + + private class Cleaner implements SimpleTimer.TimedEvent { + public void timeReached() { + NegativeLookupCache.this.counter.clear(); + } + } +}