diff --git a/apps/syndie/java/src/net/i2p/syndie/sml/HTMLPreviewRenderer.java b/apps/syndie/java/src/net/i2p/syndie/sml/HTMLPreviewRenderer.java
index 64d8d3bab..8a9ad82fd 100644
--- a/apps/syndie/java/src/net/i2p/syndie/sml/HTMLPreviewRenderer.java
+++ b/apps/syndie/java/src/net/i2p/syndie/sml/HTMLPreviewRenderer.java
@@ -103,6 +103,18 @@ public class HTMLPreviewRenderer extends HTMLRenderer {
_postBodyBuffer.append("
\n");
}
+ if (_archives.size() > 0) {
+ _postBodyBuffer.append("Archives:");
+ for (int i = 0; i < _archives.size(); i++) {
+ ArchiveRef a = (ArchiveRef)_archives.get(i);
+ _postBodyBuffer.append(" ").append(sanitizeString(a.name)).append("");
+ if (a.description != null)
+ _postBodyBuffer.append(": ").append(sanitizeString(a.description));
+ }
+ _postBodyBuffer.append("
\n");
+ }
+
_postBodyBuffer.append("\n\n\n");
_postBodyBuffer.append("\n");
}
diff --git a/apps/syndie/java/src/net/i2p/syndie/sml/HTMLRenderer.java b/apps/syndie/java/src/net/i2p/syndie/sml/HTMLRenderer.java
index 1c0606007..b527108bc 100644
--- a/apps/syndie/java/src/net/i2p/syndie/sml/HTMLRenderer.java
+++ b/apps/syndie/java/src/net/i2p/syndie/sml/HTMLRenderer.java
@@ -26,6 +26,7 @@ public class HTMLRenderer extends EventReceiverImpl {
protected List _addresses;
protected List _links;
protected List _blogs;
+ protected List _archives;
protected StringBuffer _preBodyBuffer;
protected StringBuffer _bodyBuffer;
protected StringBuffer _postBodyBuffer;
@@ -93,6 +94,7 @@ public class HTMLRenderer extends EventReceiverImpl {
_addresses = new ArrayList();
_links = new ArrayList();
_blogs = new ArrayList();
+ _archives = new ArrayList();
_cutBody = cutBody;
_showImages = showImages;
_cutReached = false;
@@ -261,9 +263,6 @@ public class HTMLRenderer extends EventReceiverImpl {
*
*/
public void receiveBlog(String name, String hash, String tag, long entryId, List locations, String description) {
- if (!continueBody()) { return; }
- if (hash == null) return;
-
System.out.println("Receiving the blog: " + name + "/" + hash + "/" + tag + "/" + entryId +"/" + locations + ": "+ description);
byte blogData[] = Base64.decode(hash);
if ( (blogData == null) || (blogData.length != Hash.HASH_LENGTH) )
@@ -278,6 +277,9 @@ public class HTMLRenderer extends EventReceiverImpl {
if (!_blogs.contains(b))
_blogs.add(b);
+ if (!continueBody()) { return; }
+ if (hash == null) return;
+
Hash blog = new Hash(blogData);
if (entryId > 0) {
String pageURL = getPageURL(blog, tag, entryId, -1, -1, true, (_user != null ? _user.getShowImages() : false));
@@ -319,6 +321,45 @@ public class HTMLRenderer extends EventReceiverImpl {
_bodyBuffer.append("] ");
}
+ protected static class ArchiveRef {
+ public String name;
+ public String description;
+ public String locationSchema;
+ public String location;
+ public int hashCode() { return -1; }
+ public boolean equals(Object o) {
+ ArchiveRef a = (ArchiveRef)o;
+ return DataHelper.eq(name, a.name) && DataHelper.eq(description, a.description)
+ && DataHelper.eq(locationSchema, a.locationSchema)
+ && DataHelper.eq(location, a.location);
+ }
+ }
+ public void receiveArchive(String name, String description, String locationSchema, String location,
+ String postingKey, String anchorText) {
+ ArchiveRef a = new ArchiveRef();
+ a.name = name;
+ a.description = description;
+ a.locationSchema = locationSchema;
+ a.location = location;
+ if (!_archives.contains(a))
+ _archives.add(a);
+
+ if (!continueBody()) { return; }
+
+ _bodyBuffer.append(sanitizeString(anchorText)).append(" [Archive ");
+ if (name != null)
+ _bodyBuffer.append(sanitizeString(name));
+ if (location != null) {
+ _bodyBuffer.append(" at ");
+ SafeURL surl = new SafeURL(locationSchema + "://" + location);
+ _bodyBuffer.append("").append(sanitizeString(surl.toString())).append("");
+ }
+ if (description != null)
+ _bodyBuffer.append(": ").append(sanitizeString(description));
+ _bodyBuffer.append("]");
+ }
+
protected static class Link {
public String schema;
public String location;
@@ -414,6 +455,12 @@ public class HTMLRenderer extends EventReceiverImpl {
else if (addrs > 1)
_postBodyBuffer.append(addrs).append(" addresses ");
+ int archives = _archives.size();
+ if (archives == 1)
+ _postBodyBuffer.append("1 archive ");
+ else if (archives > 1)
+ _postBodyBuffer.append(archives).append(" archives ");
+
if (_entry != null) {
List replies = _archive.getIndex().getReplies(_entry.getURI());
if ( (replies != null) && (replies.size() > 0) ) {
@@ -490,10 +537,10 @@ public class HTMLRenderer extends EventReceiverImpl {
}
if (_addresses.size() > 0) {
- _postBodyBuffer.append("Addresses: ");
+ _postBodyBuffer.append("Addresses:");
for (int i = 0; i < _addresses.size(); i++) {
Address a = (Address)_addresses.get(i);
- _postBodyBuffer.append("\n");
}
-
+ if (_archives.size() > 0) {
+ _postBodyBuffer.append("Archives:");
+ for (int i = 0; i < _archives.size(); i++) {
+ ArchiveRef a = (ArchiveRef)_archives.get(i);
+ _postBodyBuffer.append(" ").append(sanitizeString(a.name)).append("");
+ if (a.description != null)
+ _postBodyBuffer.append(": ").append(sanitizeString(a.description));
+ }
+ _postBodyBuffer.append("
\n");
+ }
+
if (_entry != null) {
List replies = _archive.getIndex().getReplies(_entry.getURI());
if ( (replies != null) && (replies.size() > 0) ) {
diff --git a/apps/syndie/java/src/net/i2p/syndie/sml/SMLParser.java b/apps/syndie/java/src/net/i2p/syndie/sml/SMLParser.java
index bd438b857..1560fb276 100644
--- a/apps/syndie/java/src/net/i2p/syndie/sml/SMLParser.java
+++ b/apps/syndie/java/src/net/i2p/syndie/sml/SMLParser.java
@@ -196,6 +196,7 @@ public class SMLParser {
private static final String T_HR = "hr";
private static final String T_PRE = "pre";
private static final String T_ATTACHMENT = "attachment";
+ private static final String T_ARCHIVE = "archive";
private static final String P_ATTACHMENT = "attachment";
private static final String P_WHO_QUOTED = "author";
@@ -211,6 +212,11 @@ public class SMLParser {
private static final String P_ADDRESS_LOCATION = "location";
private static final String P_ADDRESS_SCHEMA = "schema";
private static final String P_ATTACHMENT_ID = "id";
+ private static final String P_ARCHIVE_NAME = "name";
+ private static final String P_ARCHIVE_DESCRIPTION = "description";
+ private static final String P_ARCHIVE_LOCATION_SCHEMA = "schema";
+ private static final String P_ARCHIVE_LOCATION = "location";
+ private static final String P_ARCHIVE_POSTING_KEY = "postingkey";
private void parseTag(String tagName, Map attr, String body, EventReceiver receiver) {
tagName = tagName.toLowerCase();
@@ -241,6 +247,10 @@ public class SMLParser {
}
receiver.receiveBlog(getString(P_BLOG_NAME, attr), getString(P_BLOG_HASH, attr), getString(P_BLOG_TAG, attr),
getLong(P_BLOG_ENTRY, attr), locations, body);
+ } else if (T_ARCHIVE.equals(tagName)) {
+ receiver.receiveArchive(getString(P_ARCHIVE_NAME, attr), getString(P_ARCHIVE_DESCRIPTION, attr),
+ getString(P_ARCHIVE_LOCATION_SCHEMA, attr), getString(P_ARCHIVE_LOCATION, attr),
+ getString(P_ARCHIVE_POSTING_KEY, attr), body);
} else if (T_LINK.equals(tagName)) {
receiver.receiveLink(getString(P_LINK_SCHEMA, attr), getString(P_LINK_LOCATION, attr), body);
} else if (T_ADDRESS.equals(tagName)) {
diff --git a/apps/syndie/jsp/post.jsp b/apps/syndie/jsp/post.jsp
index a7a78cc4b..f87df4d63 100644
--- a/apps/syndie/jsp/post.jsp
+++ b/apps/syndie/jsp/post.jsp
@@ -97,6 +97,7 @@ Post content (in raw SML, no headers):
* [blog name="name" bloghash="base64hash" blogtag="tag"]description[/blog] = link to all posts in the blog with the specified tag
* [blog name="name" blogtag="tag"]description[/blog] = link to all posts in all blogs with the specified tag
* [link schema="eep" location="http://forum.i2p"]text[/link] = offer a link to an external resource (accessible with the given schema)
+* [archive name="name" description="they have good stuff" schema="eep" location="http://syndiemedia.i2p/archive/archive.txt"]foo![/archive] = offer an easy way to sync up with a new Syndie archive
SML headers are newline delimited key=value pairs. Example keys are:
* bgcolor = background color of the post (e.g. bgcolor=#ffccaa or bgcolor=red)
diff --git a/core/java/src/net/i2p/data/RouterInfo.java b/core/java/src/net/i2p/data/RouterInfo.java
index aa2bcc1ce..d27434710 100644
--- a/core/java/src/net/i2p/data/RouterInfo.java
+++ b/core/java/src/net/i2p/data/RouterInfo.java
@@ -315,6 +315,17 @@ public class RouterInfo extends DataStructureImpl {
else
return "";
}
+
+ public void addCapability(char cap) {
+ if (_options == null) _options = new OrderedProperties();
+ synchronized (_options) {
+ String caps = _options.getProperty(PROP_CAPABILITIES);
+ if (caps == null)
+ _options.setProperty(PROP_CAPABILITIES, ""+cap);
+ else if (caps.indexOf(cap) == -1)
+ _options.setProperty(PROP_CAPABILITIES, caps + cap);
+ }
+ }
/**
* Get the routing key for the structure using the current modifier in the RoutingKeyGenerator.
diff --git a/history.txt b/history.txt
index da1e13709..98ddfe06a 100644
--- a/history.txt
+++ b/history.txt
@@ -1,4 +1,7 @@
-$Id: history.txt,v 1.230 2005/08/24 17:55:27 jrandom Exp $
+$Id: history.txt,v 1.231 2005/08/27 17:15:38 jrandom Exp $
+
+2005-08-29 jrandom
+ * Added the new test Floodfill netDb
2005-08-27 jrandom
* Minor logging and optimization tweaks in the router and SDK
diff --git a/router/java/src/net/i2p/router/NetworkDatabaseFacade.java b/router/java/src/net/i2p/router/NetworkDatabaseFacade.java
index 6d5a35a53..7d591800e 100644
--- a/router/java/src/net/i2p/router/NetworkDatabaseFacade.java
+++ b/router/java/src/net/i2p/router/NetworkDatabaseFacade.java
@@ -59,6 +59,7 @@ public abstract class NetworkDatabaseFacade implements Service {
public abstract void fail(Hash dbEntry);
public int getKnownRouters() { return 0; }
+ public int getKnownLeaseSets() { return 0; }
}
diff --git a/router/java/src/net/i2p/router/Router.java b/router/java/src/net/i2p/router/Router.java
index 8d3b2da3a..a3749ec83 100644
--- a/router/java/src/net/i2p/router/Router.java
+++ b/router/java/src/net/i2p/router/Router.java
@@ -35,6 +35,7 @@ import net.i2p.data.i2np.GarlicMessage;
//import net.i2p.data.i2np.TunnelMessage;
import net.i2p.router.message.GarlicMessageHandler;
//import net.i2p.router.message.TunnelMessageHandler;
+import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseFacade;
import net.i2p.router.startup.StartupJob;
import net.i2p.stat.Rate;
import net.i2p.stat.RateStat;
@@ -291,6 +292,8 @@ public class Router {
stats.setProperty(RouterInfo.PROP_NETWORK_ID, NETWORK_ID+"");
ri.setOptions(stats);
ri.setAddresses(_context.commSystem().createAddresses());
+ if (FloodfillNetworkDatabaseFacade.floodfillEnabled(_context))
+ ri.addCapability(FloodfillNetworkDatabaseFacade.CAPACITY_FLOODFILL);
SigningPrivateKey key = _context.keyManager().getSigningPrivateKey();
if (key == null) {
_log.log(Log.CRIT, "Internal error - signing private key not known? wtf");
diff --git a/router/java/src/net/i2p/router/RouterContext.java b/router/java/src/net/i2p/router/RouterContext.java
index 197eead2b..a5437d959 100644
--- a/router/java/src/net/i2p/router/RouterContext.java
+++ b/router/java/src/net/i2p/router/RouterContext.java
@@ -9,6 +9,7 @@ import net.i2p.data.Hash;
import net.i2p.router.admin.AdminManager;
import net.i2p.router.client.ClientManagerFacadeImpl;
import net.i2p.router.networkdb.kademlia.KademliaNetworkDatabaseFacade;
+import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseFacade;
import net.i2p.router.peermanager.Calculator;
import net.i2p.router.peermanager.CapacityCalculator;
import net.i2p.router.peermanager.IntegrationCalculator;
@@ -97,7 +98,7 @@ public class RouterContext extends I2PAppContext {
_messageHistory = new MessageHistory(this);
_messageRegistry = new OutboundMessageRegistry(this);
_messageStateMonitor = new MessageStateMonitor(this);
- _netDb = new KademliaNetworkDatabaseFacade(this);
+ _netDb = new FloodfillNetworkDatabaseFacade(this); // new KademliaNetworkDatabaseFacade(this);
_keyManager = new KeyManager(this);
if ("false".equals(getProperty("i2p.vmCommSystem", "false")))
_commSystem = new CommSystemFacadeImpl(this);
diff --git a/router/java/src/net/i2p/router/RouterVersion.java b/router/java/src/net/i2p/router/RouterVersion.java
index d4211e251..5bce0ed0a 100644
--- a/router/java/src/net/i2p/router/RouterVersion.java
+++ b/router/java/src/net/i2p/router/RouterVersion.java
@@ -15,9 +15,9 @@ import net.i2p.CoreVersion;
*
*/
public class RouterVersion {
- public final static String ID = "$Revision: 1.219 $ $Date: 2005/08/24 17:55:26 $";
+ public final static String ID = "$Revision: 1.220 $ $Date: 2005/08/27 17:15:38 $";
public final static String VERSION = "0.6.0.3";
- public final static long BUILD = 3;
+ public final static long BUILD = 4;
public static void main(String args[]) {
System.out.println("I2P Router version: " + VERSION);
System.out.println("Router ID: " + RouterVersion.ID);
diff --git a/router/java/src/net/i2p/router/StatisticsManager.java b/router/java/src/net/i2p/router/StatisticsManager.java
index ccbce2b83..64d27d74f 100644
--- a/router/java/src/net/i2p/router/StatisticsManager.java
+++ b/router/java/src/net/i2p/router/StatisticsManager.java
@@ -19,6 +19,7 @@ import net.i2p.data.DataHelper;
import net.i2p.stat.Rate;
import net.i2p.stat.RateStat;
import net.i2p.util.Log;
+import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseFacade;
/**
* Maintain the statistics about the router
@@ -147,6 +148,12 @@ public class StatisticsManager implements Service {
//includeRate("stream.con.receiveDuplicateSize", stats, new long[] { 60*60*1000 });
stats.setProperty("stat_uptime", DataHelper.formatDuration(_context.router().getUptime()));
stats.setProperty("stat__rateKey", "avg;maxAvg;pctLifetime;[sat;satLim;maxSat;maxSatLim;][num;lifetimeFreq;maxFreq]");
+
+ if (FloodfillNetworkDatabaseFacade.isFloodfill(_context.router().getRouterInfo())) {
+ stats.setProperty("netdb.knownRouters", ""+_context.netDb().getKnownRouters());
+ stats.setProperty("netdb.knownLeaseSets", ""+_context.netDb().getKnownLeaseSets());
+ }
+
_log.debug("Publishing peer rankings");
} else {
_log.debug("Not publishing peer rankings");
diff --git a/router/java/src/net/i2p/router/networkdb/HandleDatabaseLookupMessageJob.java b/router/java/src/net/i2p/router/networkdb/HandleDatabaseLookupMessageJob.java
index 7881c0a28..e4b17e84b 100644
--- a/router/java/src/net/i2p/router/networkdb/HandleDatabaseLookupMessageJob.java
+++ b/router/java/src/net/i2p/router/networkdb/HandleDatabaseLookupMessageJob.java
@@ -59,6 +59,8 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
_fromHash = fromHash;
}
+ protected boolean answerAllQueries() { return false; }
+
public void runJob() {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Handling database lookup message for " + _message.getSearchKey());
@@ -75,7 +77,7 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
if (ls != null) {
// only answer a request for a LeaseSet if it has been published
// to us, or, if its local, if we would have published to ourselves
- if (ls.getReceivedAsPublished()) {
+ if (answerAllQueries() || ls.getReceivedAsPublished()) {
getContext().statManager().addRateData("netDb.lookupsMatchedReceivedPublished", 1, 0);
sendData(_message.getSearchKey(), ls, fromKey, _message.getReplyTunnel());
} else {
diff --git a/router/java/src/net/i2p/router/networkdb/PublishLocalRouterInfoJob.java b/router/java/src/net/i2p/router/networkdb/PublishLocalRouterInfoJob.java
index 2656cc866..41752cfd6 100644
--- a/router/java/src/net/i2p/router/networkdb/PublishLocalRouterInfoJob.java
+++ b/router/java/src/net/i2p/router/networkdb/PublishLocalRouterInfoJob.java
@@ -17,6 +17,7 @@ import net.i2p.data.SigningPrivateKey;
import net.i2p.router.JobImpl;
import net.i2p.router.RouterContext;
import net.i2p.router.Router;
+import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseFacade;
import net.i2p.util.Log;
/**
@@ -44,6 +45,8 @@ public class PublishLocalRouterInfoJob extends JobImpl {
ri.setPublished(getContext().clock().now());
ri.setOptions(stats);
ri.setAddresses(getContext().commSystem().createAddresses());
+ if (FloodfillNetworkDatabaseFacade.floodfillEnabled(getContext()))
+ ri.addCapability(FloodfillNetworkDatabaseFacade.CAPACITY_FLOODFILL);
SigningPrivateKey key = getContext().keyManager().getSigningPrivateKey();
if (key == null) {
_log.log(Log.CRIT, "Internal error - signing private key not known? rescheduling publish for 30s");
diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/DataPublisherJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/DataPublisherJob.java
index 293e29133..207bf6724 100644
--- a/router/java/src/net/i2p/router/networkdb/kademlia/DataPublisherJob.java
+++ b/router/java/src/net/i2p/router/networkdb/kademlia/DataPublisherJob.java
@@ -55,8 +55,9 @@ class DataPublisherJob extends JobImpl {
new Exception("Publish expired lease?"));
}
}
- StoreJob store = new StoreJob(getContext(), _facade, key, data, null, null, STORE_TIMEOUT);
- getContext().jobQueue().addJob(store);
+ _facade.sendStore(key, data, null, null, STORE_TIMEOUT, null);
+ //StoreJob store = new StoreJob(getContext(), _facade, key, data, null, null, STORE_TIMEOUT);
+ //getContext().jobQueue().addJob(store);
}
requeue(RERUN_DELAY_MS);
}
diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillDatabaseLookupMessageHandler.java b/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillDatabaseLookupMessageHandler.java
new file mode 100644
index 000000000..6ae7c9921
--- /dev/null
+++ b/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillDatabaseLookupMessageHandler.java
@@ -0,0 +1,46 @@
+package net.i2p.router.networkdb.kademlia;
+/*
+ * free (adj.): unencumbered; not under the control of others
+ * Written by jrandom in 2003 and released into the public domain
+ * with no warranty of any kind, either expressed or implied.
+ * It probably won't make your computer catch on fire, or eat
+ * your children, but it might. Use at your own risk.
+ *
+ */
+
+import net.i2p.data.Hash;
+import net.i2p.data.RouterIdentity;
+import net.i2p.data.i2np.DatabaseLookupMessage;
+import net.i2p.data.i2np.I2NPMessage;
+import net.i2p.router.HandlerJobBuilder;
+import net.i2p.router.Job;
+import net.i2p.router.RouterContext;
+import net.i2p.util.Log;
+
+/**
+ * Build a HandleDatabaseLookupMessageJob whenever a DatabaseLookupMessage arrives
+ *
+ */
+public class FloodfillDatabaseLookupMessageHandler implements HandlerJobBuilder {
+ private RouterContext _context;
+ private Log _log;
+ public FloodfillDatabaseLookupMessageHandler(RouterContext context) {
+ _context = context;
+ _log = context.logManager().getLog(FloodfillDatabaseLookupMessageHandler.class);
+ _context.statManager().createRateStat("netDb.lookupsReceived", "How many netDb lookups have we received?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
+ _context.statManager().createRateStat("netDb.lookupsDropped", "How many netDb lookups did we drop due to throttling?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
+ }
+
+ public Job createJob(I2NPMessage receivedMessage, RouterIdentity from, Hash fromHash) {
+ _context.statManager().addRateData("netDb.lookupsReceived", 1, 0);
+
+ if (true || _context.throttle().acceptNetDbLookupRequest(((DatabaseLookupMessage)receivedMessage).getSearchKey())) {
+ return new HandleFloodfillDatabaseLookupMessageJob(_context, (DatabaseLookupMessage)receivedMessage, from, fromHash);
+ } else {
+ if (_log.shouldLog(Log.INFO))
+ _log.info("Dropping lookup request as throttled");
+ _context.statManager().addRateData("netDb.lookupsDropped", 1, 1);
+ return null;
+ }
+ }
+}
diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillDatabaseStoreMessageHandler.java b/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillDatabaseStoreMessageHandler.java
new file mode 100644
index 000000000..f0f1cc24a
--- /dev/null
+++ b/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillDatabaseStoreMessageHandler.java
@@ -0,0 +1,34 @@
+package net.i2p.router.networkdb.kademlia;
+/*
+ * free (adj.): unencumbered; not under the control of others
+ * Written by jrandom in 2003 and released into the public domain
+ * with no warranty of any kind, either expressed or implied.
+ * It probably won't make your computer catch on fire, or eat
+ * your children, but it might. Use at your own risk.
+ *
+ */
+
+import net.i2p.data.Hash;
+import net.i2p.data.RouterIdentity;
+import net.i2p.data.i2np.DatabaseStoreMessage;
+import net.i2p.data.i2np.I2NPMessage;
+import net.i2p.router.HandlerJobBuilder;
+import net.i2p.router.Job;
+import net.i2p.router.RouterContext;
+
+/**
+ * Create a HandleDatabaseStoreMessageJob whenever a DatabaseStoreMessage arrives
+ *
+ */
+public class FloodfillDatabaseStoreMessageHandler implements HandlerJobBuilder {
+ private RouterContext _context;
+ private FloodfillNetworkDatabaseFacade _facade;
+
+ public FloodfillDatabaseStoreMessageHandler(RouterContext context, FloodfillNetworkDatabaseFacade facade) {
+ _context = context;
+ _facade = facade;
+ }
+ public Job createJob(I2NPMessage receivedMessage, RouterIdentity from, Hash fromHash) {
+ return new HandleFloodfillDatabaseStoreMessageJob(_context, (DatabaseStoreMessage)receivedMessage, from, fromHash, _facade);
+ }
+}
diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillNetworkDatabaseFacade.java b/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillNetworkDatabaseFacade.java
new file mode 100644
index 000000000..553b4c799
--- /dev/null
+++ b/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillNetworkDatabaseFacade.java
@@ -0,0 +1,97 @@
+package net.i2p.router.networkdb.kademlia;
+
+import java.util.*;
+import net.i2p.router.*;
+import net.i2p.router.networkdb.DatabaseStoreMessageHandler;
+import net.i2p.data.i2np.*;
+import net.i2p.data.*;
+import net.i2p.util.Log;
+
+/**
+ *
+ */
+public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacade {
+ public static final char CAPACITY_FLOODFILL = 'f';
+ private static final String PROP_FLOODFILL_PARTICIPANT = "router.floodfillParticipant";
+ private static final String DEFAULT_FLOODFILL_PARTICIPANT = "false";
+
+ public FloodfillNetworkDatabaseFacade(RouterContext context) {
+ super(context);
+ }
+
+ protected void createHandlers() {
+ _context.inNetMessagePool().registerHandlerJobBuilder(DatabaseLookupMessage.MESSAGE_TYPE, new FloodfillDatabaseLookupMessageHandler(_context));
+ _context.inNetMessagePool().registerHandlerJobBuilder(DatabaseStoreMessage.MESSAGE_TYPE, new FloodfillDatabaseStoreMessageHandler(_context, this));
+ }
+
+ public void sendStore(Hash key, DataStructure ds, Job onSuccess, Job onFailure, long sendTimeout, Set toIgnore) {
+ // if we are a part of the floodfill netDb, don't send out our own leaseSets as part
+ // of the flooding - instead, send them to a random floodfill peer so *they* can flood 'em out.
+ // perhaps statistically adjust this so we are the source every 1/N times... or something.
+ if (floodfillEnabled() && (ds instanceof RouterInfo)) {
+ flood(ds);
+ if (onSuccess != null)
+ _context.jobQueue().addJob(onSuccess);
+ } else {
+ _context.jobQueue().addJob(new FloodfillStoreJob(_context, this, key, ds, onSuccess, onFailure, sendTimeout, toIgnore));
+ }
+ }
+
+ public void flood(DataStructure ds) {
+ FloodfillPeerSelector sel = (FloodfillPeerSelector)getPeerSelector();
+ List peers = sel.selectFloodfillParticipants(getKBuckets());
+ int flooded = 0;
+ for (int i = 0; i < peers.size(); i++) {
+ Hash peer = (Hash)peers.get(i);
+ RouterInfo target = lookupRouterInfoLocally(peer);
+ if ( (target == null) || (_context.shitlist().isShitlisted(peer)) )
+ continue;
+ if (peer.equals(_context.routerHash()))
+ continue;
+ DatabaseStoreMessage msg = new DatabaseStoreMessage(_context);
+ if (ds instanceof LeaseSet) {
+ msg.setKey(((LeaseSet)ds).getDestination().calculateHash());
+ msg.setLeaseSet((LeaseSet)ds);
+ } else {
+ msg.setKey(((RouterInfo)ds).getIdentity().calculateHash());
+ msg.setRouterInfo((RouterInfo)ds);
+ }
+ msg.setReplyGateway(null);
+ msg.setReplyToken(0);
+ msg.setReplyTunnel(null);
+ OutNetMessage m = new OutNetMessage(_context);
+ m.setMessage(msg);
+ m.setOnFailedReplyJob(null);
+ m.setPriority(FLOOD_PRIORITY);
+ m.setTarget(target);
+ m.setExpiration(_context.clock().now()+FLOOD_TIMEOUT);
+ _context.commSystem().processMessage(m);
+ flooded++;
+ if (_log.shouldLog(Log.INFO))
+ _log.info("Flooding the entry for " + msg.getKey().toBase64() + " to " + peer.toBase64());
+ }
+
+ if (_log.shouldLog(Log.INFO))
+ _log.info("Flooded the to " + flooded + " peers");
+ }
+
+ private static final int FLOOD_PRIORITY = 200;
+ private static final int FLOOD_TIMEOUT = 30*1000;
+
+ protected PeerSelector createPeerSelector() { return new FloodfillPeerSelector(_context); }
+
+ public boolean floodfillEnabled() { return floodfillEnabled(_context); }
+ public static boolean floodfillEnabled(RouterContext ctx) {
+ String enabled = ctx.getProperty(PROP_FLOODFILL_PARTICIPANT, DEFAULT_FLOODFILL_PARTICIPANT);
+ return "true".equals(enabled);
+ }
+
+ public static boolean isFloodfill(RouterInfo peer) {
+ if (peer == null) return false;
+ String caps = peer.getCapabilities();
+ if ( (caps != null) && (caps.indexOf(FloodfillNetworkDatabaseFacade.CAPACITY_FLOODFILL) != -1) )
+ return true;
+ else
+ return false;
+ }
+}
diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillPeerSelector.java b/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillPeerSelector.java
new file mode 100644
index 000000000..965421fda
--- /dev/null
+++ b/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillPeerSelector.java
@@ -0,0 +1,105 @@
+package net.i2p.router.networkdb.kademlia;
+/*
+ * free (adj.): unencumbered; not under the control of others
+ * Written by jrandom in 2003 and released into the public domain
+ * with no warranty of any kind, either expressed or implied.
+ * It probably won't make your computer catch on fire, or eat
+ * your children, but it might. Use at your own risk.
+ *
+ */
+
+import java.math.BigInteger;
+import java.util.*;
+
+import net.i2p.data.DataHelper;
+import net.i2p.data.Hash;
+import net.i2p.data.RouterInfo;
+import net.i2p.router.RouterContext;
+import net.i2p.router.peermanager.PeerProfile;
+import net.i2p.stat.Rate;
+import net.i2p.stat.RateStat;
+import net.i2p.util.Log;
+
+class FloodfillPeerSelector extends PeerSelector {
+ public FloodfillPeerSelector(RouterContext ctx) { super(ctx); }
+
+ /**
+ * Pick out peers with the floodfill capacity set, returning them first, but then
+ * after they're complete, sort via kademlia.
+ *
+ * @return List of Hash for the peers selected
+ */
+ public List selectNearestExplicitThin(Hash key, int maxNumRouters, Set peersToIgnore, KBucketSet kbuckets) {
+ if (peersToIgnore == null)
+ peersToIgnore = new HashSet(1);
+ peersToIgnore.add(_context.router().getRouterInfo().getIdentity().getHash());
+ FloodfillSelectionCollector matches = new FloodfillSelectionCollector(key, peersToIgnore, maxNumRouters);
+ kbuckets.getAll(matches);
+ List rv = matches.get(maxNumRouters);
+ if (_log.shouldLog(Log.DEBUG))
+ _log.debug("Searching for " + maxNumRouters + " peers close to " + key + ": "
+ + rv + " (not including " + peersToIgnore + ") [allHashes.size = "
+ + matches.size() + "]");
+ return rv;
+ }
+
+ public List selectFloodfillParticipants(KBucketSet kbuckets) {
+ FloodfillSelectionCollector matches = new FloodfillSelectionCollector(null, null, 0);
+ kbuckets.getAll(matches);
+ return matches.getFloodfillParticipants();
+ }
+
+ private class FloodfillSelectionCollector implements SelectionCollector {
+ private TreeMap _sorted;
+ private List _floodfillMatches;
+ private Hash _key;
+ private Set _toIgnore;
+ private int _matches;
+ private int _wanted;
+ public FloodfillSelectionCollector(Hash key, Set toIgnore, int wanted) {
+ _key = key;
+ _sorted = new TreeMap();
+ _floodfillMatches = new ArrayList(1);
+ _toIgnore = toIgnore;
+ _matches = 0;
+ _wanted = wanted;
+ }
+ public List getFloodfillParticipants() { return _floodfillMatches; }
+ public void add(Hash entry) {
+ if (_context.profileOrganizer().isFailing(entry))
+ return;
+ if ( (_toIgnore != null) && (_toIgnore.contains(entry)) )
+ return;
+ if (entry.equals(_context.routerHash()))
+ return;
+ RouterInfo info = _context.netDb().lookupRouterInfoLocally(entry);
+ if (info == null)
+ return;
+
+ if (FloodfillNetworkDatabaseFacade.isFloodfill(info)) {
+ _floodfillMatches.add(entry);
+ } else {
+ if ( (_wanted > _matches) && (_key != null) ) {
+ BigInteger diff = getDistance(_key, entry);
+ _sorted.put(diff, entry);
+ }
+ }
+ _matches++;
+ }
+ /** get the first $howMany entries matching */
+ public List get(int howMany) {
+ Collections.shuffle(_floodfillMatches, _context.random());
+ List rv = new ArrayList(howMany);
+ for (int i = 0; i < howMany && i < _floodfillMatches.size(); i++) {
+ rv.add(_floodfillMatches.get(i));
+ }
+ for (int i = rv.size(); i < howMany; i++) {
+ if (_sorted.size() <= 0)
+ break;
+ rv.add(_sorted.remove(_sorted.firstKey()));
+ }
+ return rv;
+ }
+ public int size() { return _matches; }
+ }
+}
diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillStoreJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillStoreJob.java
new file mode 100644
index 000000000..28e6fb23c
--- /dev/null
+++ b/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillStoreJob.java
@@ -0,0 +1,61 @@
+package net.i2p.router.networkdb.kademlia;
+/*
+ * free (adj.): unencumbered; not under the control of others
+ * Written by jrandom in 2003 and released into the public domain
+ * with no warranty of any kind, either expressed or implied.
+ * It probably won't make your computer catch on fire, or eat
+ * your children, but it might. Use at your own risk.
+ *
+ */
+
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+
+import net.i2p.data.DataStructure;
+import net.i2p.data.Hash;
+import net.i2p.data.LeaseSet;
+import net.i2p.data.RouterInfo;
+import net.i2p.data.TunnelId;
+import net.i2p.data.i2np.DatabaseStoreMessage;
+import net.i2p.data.i2np.I2NPMessage;
+import net.i2p.router.Job;
+import net.i2p.router.JobImpl;
+import net.i2p.router.ReplyJob;
+import net.i2p.router.RouterContext;
+import net.i2p.router.TunnelInfo;
+import net.i2p.router.peermanager.PeerProfile;
+import net.i2p.stat.Rate;
+import net.i2p.stat.RateStat;
+import net.i2p.util.Log;
+
+class FloodfillStoreJob extends StoreJob {
+ private FloodfillNetworkDatabaseFacade _facade;
+ /**
+ * Create a new search for the routingKey specified
+ *
+ */
+ public FloodfillStoreJob(RouterContext context, FloodfillNetworkDatabaseFacade facade, Hash key, DataStructure data, Job onSuccess, Job onFailure, long timeoutMs) {
+ this(context, facade, key, data, onSuccess, onFailure, timeoutMs, null);
+ }
+
+ /**
+ * @param toSkip set of peer hashes of people we dont want to send the data to (e.g. we
+ * already know they have it). This can be null.
+ */
+ public FloodfillStoreJob(RouterContext context, FloodfillNetworkDatabaseFacade facade, Hash key, DataStructure data, Job onSuccess, Job onFailure, long timeoutMs, Set toSkip) {
+ super(context, facade, key, data, onSuccess, onFailure, timeoutMs, toSkip);
+ _facade = facade;
+ }
+
+ protected int getParallelization() { return 1; }
+ protected int getRedundancy() { return 1; }
+
+ /**
+ * Send was totally successful
+ */
+ protected void succeed() {
+ super.succeed();
+ getContext().jobQueue().addJob(new FloodfillVerifyStoreJob(getContext(), _state.getTarget(), _facade));
+ }
+}
\ No newline at end of file
diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillVerifyStoreJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillVerifyStoreJob.java
new file mode 100644
index 000000000..a7fe3338d
--- /dev/null
+++ b/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillVerifyStoreJob.java
@@ -0,0 +1,146 @@
+package net.i2p.router.networkdb.kademlia;
+
+import java.util.Collections;
+import java.util.List;
+import net.i2p.data.*;
+import net.i2p.data.i2np.*;
+import net.i2p.router.*;
+import net.i2p.util.Log;
+
+/**
+ * send a netDb lookup to a random floodfill peer - if it is found, great,
+ * but if they reply back saying they dont know it, queue up a store of the
+ * key to a random floodfill peer again (via FloodfillStoreJob)
+ *
+ */
+public class FloodfillVerifyStoreJob extends JobImpl {
+ private Log _log;
+ private Hash _key;
+ private FloodfillNetworkDatabaseFacade _facade;
+ private long _expiration;
+ private long _sendTime;
+
+ private static final int VERIFY_TIMEOUT = 10*1000;
+
+ public FloodfillVerifyStoreJob(RouterContext ctx, Hash key, FloodfillNetworkDatabaseFacade facade) {
+ super(ctx);
+ _key = key;
+ _log = ctx.logManager().getLog(getClass());
+ _facade = facade;
+ // wait 10 seconds before trying to verify the store
+ getTiming().setStartAfter(ctx.clock().now() + VERIFY_TIMEOUT);
+ getContext().statManager().createRateStat("netDb.floodfillVerifyOK", "How long a floodfill verify takes when it succeeds", "NetworkDatabase", new long[] { 60*1000, 10*60*1000, 60*60*1000 });
+ getContext().statManager().createRateStat("netDb.floodfillVerifyFail", "How long a floodfill verify takes when it fails", "NetworkDatabase", new long[] { 60*1000, 10*60*1000, 60*60*1000 });
+ getContext().statManager().createRateStat("netDb.floodfillVerifyTimeout", "How long a floodfill verify takes when it times out", "NetworkDatabase", new long[] { 60*1000, 10*60*1000, 60*60*1000 });
+ }
+ public String getName() { return "Verify netdb store"; }
+ public void runJob() {
+ Hash target = pickTarget();
+ if (target == null) return;
+
+ DatabaseLookupMessage lookup = buildLookup();
+ if (lookup == null) return;
+
+ TunnelInfo outTunnel = getContext().tunnelManager().selectOutboundTunnel();
+ if (outTunnel == null) {
+ if (_log.shouldLog(Log.WARN))
+ _log.warn("No outbound tunnels to verify a store");
+ return;
+ }
+
+ _sendTime = getContext().clock().now();
+ _expiration = _sendTime + VERIFY_TIMEOUT;
+ getContext().messageRegistry().registerPending(new VerifyReplySelector(), new VerifyReplyJob(getContext()), new VerifyTimeoutJob(getContext()), VERIFY_TIMEOUT);
+ getContext().tunnelDispatcher().dispatchOutbound(lookup, outTunnel.getSendTunnelId(0), target);
+ }
+
+ private Hash pickTarget() {
+ FloodfillPeerSelector sel = (FloodfillPeerSelector)_facade.getPeerSelector();
+ List peers = sel.selectFloodfillParticipants(_facade.getKBuckets());
+ Collections.shuffle(peers, getContext().random());
+ if (peers.size() > 0)
+ return (Hash)peers.get(0);
+
+ if (_log.shouldLog(Log.WARN))
+ _log.warn("No peers to verify floodfill with");
+ return null;
+ }
+
+ private DatabaseLookupMessage buildLookup() {
+ TunnelInfo replyTunnelInfo = getContext().tunnelManager().selectInboundTunnel();
+ if (replyTunnelInfo == null) {
+ if (_log.shouldLog(Log.WARN))
+ _log.warn("No inbound tunnels to get a reply from!");
+ return null;
+ }
+ DatabaseLookupMessage m = new DatabaseLookupMessage(getContext(), true);
+ m.setMessageExpiration(getContext().clock().now() + VERIFY_TIMEOUT);
+ m.setReplyTunnel(replyTunnelInfo.getReceiveTunnelId(0));
+ m.setFrom(replyTunnelInfo.getPeer(0));
+ m.setSearchKey(_key);
+ return m;
+ }
+
+ private class VerifyReplySelector implements MessageSelector {
+ public boolean continueMatching() {
+ return false; // only want one match
+ }
+
+ public long getExpiration() { return _expiration; }
+ public boolean isMatch(I2NPMessage message) {
+ if (message instanceof DatabaseStoreMessage) {
+ DatabaseStoreMessage dsm = (DatabaseStoreMessage)message;
+ if (_key.equals(dsm.getKey()))
+ return true;
+ else
+ return false;
+ } else if (message instanceof DatabaseSearchReplyMessage) {
+ DatabaseSearchReplyMessage dsrm = (DatabaseSearchReplyMessage)message;
+ if (_key.equals(dsrm.getSearchKey()))
+ return true;
+ else
+ return false;
+ }
+ return false;
+ }
+ }
+
+ private class VerifyReplyJob extends JobImpl implements ReplyJob {
+ private I2NPMessage _message;
+ public VerifyReplyJob(RouterContext ctx) {
+ super(ctx);
+ }
+ public String getName() { return "Handle floodfill verification reply"; }
+ public void runJob() {
+ if (_message instanceof DatabaseStoreMessage) {
+ // store ok, w00t!
+ getContext().statManager().addRateData("netDb.floodfillVerifyOK", getContext().clock().now() - _sendTime, 0);
+ } else {
+ // store failed, boo, hiss!
+ getContext().statManager().addRateData("netDb.floodfillVerifyFail", getContext().clock().now() - _sendTime, 0);
+ resend();
+ }
+ }
+ public void setMessage(I2NPMessage message) { _message = message; }
+ }
+
+ /** the netDb store failed to verify, so resend it to a random floodfill peer */
+ private void resend() {
+ DataStructure ds = null;
+ ds = _facade.lookupLeaseSetLocally(_key);
+ if (ds == null)
+ ds = _facade.lookupRouterInfoLocally(_key);
+ _facade.sendStore(_key, ds, null, null, VERIFY_TIMEOUT, null);
+ }
+
+ private class VerifyTimeoutJob extends JobImpl {
+ public VerifyTimeoutJob(RouterContext ctx) {
+ super(ctx);
+ }
+ public String getName() { return "Floodfill verification timeout"; }
+ public void runJob() {
+ getContext().statManager().addRateData("netDb.floodfillVerifyTimeout", getContext().clock().now() - _sendTime, 0);
+ resend();
+ }
+ }
+}
diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/HandleFloodfillDatabaseLookupMessageJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/HandleFloodfillDatabaseLookupMessageJob.java
new file mode 100644
index 000000000..cd78c7fee
--- /dev/null
+++ b/router/java/src/net/i2p/router/networkdb/kademlia/HandleFloodfillDatabaseLookupMessageJob.java
@@ -0,0 +1,47 @@
+package net.i2p.router.networkdb.kademlia;
+/*
+ * free (adj.): unencumbered; not under the control of others
+ * Written by jrandom in 2003 and released into the public domain
+ * with no warranty of any kind, either expressed or implied.
+ * It probably won't make your computer catch on fire, or eat
+ * your children, but it might. Use at your own risk.
+ *
+ */
+
+import java.util.Iterator;
+import java.util.Set;
+
+import net.i2p.data.DataStructure;
+import net.i2p.data.Hash;
+import net.i2p.data.LeaseSet;
+import net.i2p.data.RouterIdentity;
+import net.i2p.data.RouterInfo;
+import net.i2p.data.TunnelId;
+import net.i2p.data.i2np.DatabaseLookupMessage;
+import net.i2p.data.i2np.DatabaseSearchReplyMessage;
+import net.i2p.data.i2np.DatabaseStoreMessage;
+import net.i2p.data.i2np.I2NPMessage;
+import net.i2p.data.i2np.TunnelGatewayMessage;
+import net.i2p.router.Job;
+import net.i2p.router.JobImpl;
+import net.i2p.router.RouterContext;
+import net.i2p.router.TunnelInfo;
+import net.i2p.router.message.SendMessageDirectJob;
+import net.i2p.router.networkdb.HandleDatabaseLookupMessageJob;
+import net.i2p.util.Log;
+
+/**
+ * Handle a lookup for a key received from a remote peer. Needs to be implemented
+ * to send back replies, etc
+ *
+ */
+public class HandleFloodfillDatabaseLookupMessageJob extends HandleDatabaseLookupMessageJob {
+ public HandleFloodfillDatabaseLookupMessageJob(RouterContext ctx, DatabaseLookupMessage receivedMessage, RouterIdentity from, Hash fromHash) {
+ super(ctx, receivedMessage, from, fromHash);
+ }
+
+ protected boolean answerAllQueries() {
+ if (!FloodfillNetworkDatabaseFacade.floodfillEnabled(getContext())) return false;
+ return FloodfillNetworkDatabaseFacade.isFloodfill(getContext().router().getRouterInfo());
+ }
+}
diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/HandleFloodfillDatabaseStoreMessageJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/HandleFloodfillDatabaseStoreMessageJob.java
new file mode 100644
index 000000000..22923c57f
--- /dev/null
+++ b/router/java/src/net/i2p/router/networkdb/kademlia/HandleFloodfillDatabaseStoreMessageJob.java
@@ -0,0 +1,131 @@
+package net.i2p.router.networkdb.kademlia;
+/*
+ * free (adj.): unencumbered; not under the control of others
+ * Written by jrandom in 2003 and released into the public domain
+ * with no warranty of any kind, either expressed or implied.
+ * It probably won't make your computer catch on fire, or eat
+ * your children, but it might. Use at your own risk.
+ *
+ */
+
+import java.util.*;
+import net.i2p.data.*;
+import net.i2p.data.i2np.*;
+import net.i2p.router.*;
+import net.i2p.util.Log;
+
+/**
+ * Receive DatabaseStoreMessage data and store it in the local net db
+ *
+ */
+public class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
+ private Log _log;
+ private DatabaseStoreMessage _message;
+ private RouterIdentity _from;
+ private Hash _fromHash;
+ private FloodfillNetworkDatabaseFacade _facade;
+
+ private static final int ACK_TIMEOUT = 15*1000;
+ private static final int ACK_PRIORITY = 100;
+
+ public HandleFloodfillDatabaseStoreMessageJob(RouterContext ctx, DatabaseStoreMessage receivedMessage, RouterIdentity from, Hash fromHash, FloodfillNetworkDatabaseFacade facade) {
+ super(ctx);
+ _log = ctx.logManager().getLog(getClass());
+ ctx.statManager().createRateStat("netDb.storeHandled", "How many netDb store messages have we handled?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
+ ctx.statManager().createRateStat("netDb.storeLeaseSetHandled", "How many leaseSet store messages have we handled?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
+ ctx.statManager().createRateStat("netDb.storeRouterInfoHandled", "How many routerInfo store messages have we handled?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
+ _message = receivedMessage;
+ _from = from;
+ _fromHash = fromHash;
+ _facade = facade;
+ }
+
+ public void runJob() {
+ if (_log.shouldLog(Log.DEBUG))
+ _log.debug("Handling database store message");
+
+ String invalidMessage = null;
+ boolean wasNew = false;
+ if (_message.getValueType() == DatabaseStoreMessage.KEY_TYPE_LEASESET) {
+ getContext().statManager().addRateData("netDb.storeLeaseSetHandled", 1, 0);
+
+ try {
+ LeaseSet ls = _message.getLeaseSet();
+ // mark it as something we received, so we'll answer queries
+ // for it. this flag does NOT get set on entries that we
+ // receive in response to our own lookups.
+ ls.setReceivedAsPublished(true);
+ LeaseSet match = getContext().netDb().store(_message.getKey(), _message.getLeaseSet());
+ if (match == null) {
+ wasNew = true;
+ } else {
+ wasNew = false;
+ match.setReceivedAsPublished(true);
+ }
+ } catch (IllegalArgumentException iae) {
+ invalidMessage = iae.getMessage();
+ }
+ } else if (_message.getValueType() == DatabaseStoreMessage.KEY_TYPE_ROUTERINFO) {
+ getContext().statManager().addRateData("netDb.storeRouterInfoHandled", 1, 0);
+ if (_log.shouldLog(Log.INFO))
+ _log.info("Handling dbStore of router " + _message.getKey() + " with publishDate of "
+ + new Date(_message.getRouterInfo().getPublished()));
+ try {
+ Object match = getContext().netDb().store(_message.getKey(), _message.getRouterInfo());
+ wasNew = (null == match);
+ getContext().profileManager().heardAbout(_message.getKey());
+ } catch (IllegalArgumentException iae) {
+ invalidMessage = iae.getMessage();
+ }
+ } else {
+ if (_log.shouldLog(Log.ERROR))
+ _log.error("Invalid DatabaseStoreMessage data type - " + _message.getValueType()
+ + ": " + _message);
+ }
+
+ if (_message.getReplyToken() > 0)
+ sendAck();
+
+ if (_from != null)
+ _fromHash = _from.getHash();
+ if (_fromHash != null) {
+ if (invalidMessage == null) {
+ getContext().profileManager().dbStoreReceived(_fromHash, wasNew);
+ getContext().statManager().addRateData("netDb.storeHandled", 1, 0);
+ if (FloodfillNetworkDatabaseFacade.floodfillEnabled(getContext()) && (_message.getReplyToken() > 0) ) {
+ if (_message.getValueType() == DatabaseStoreMessage.KEY_TYPE_LEASESET)
+ _facade.flood(_message.getLeaseSet());
+ else
+ _facade.flood(_message.getRouterInfo());
+ }
+ } else {
+ if (_log.shouldLog(Log.WARN))
+ _log.warn("Peer " + _fromHash.toBase64() + " sent bad data: " + invalidMessage);
+ }
+ }
+ }
+
+ private void sendAck() {
+ DeliveryStatusMessage msg = new DeliveryStatusMessage(getContext());
+ msg.setMessageId(_message.getReplyToken());
+ msg.setArrival(getContext().clock().now());
+ TunnelInfo outTunnel = selectOutboundTunnel();
+ if (outTunnel == null) {
+ if (_log.shouldLog(Log.WARN))
+ _log.warn("No outbound tunnel could be found");
+ return;
+ } else {
+ getContext().tunnelDispatcher().dispatchOutbound(msg, outTunnel.getSendTunnelId(0), _message.getReplyTunnel(), _message.getReplyGateway());
+ }
+ }
+
+ private TunnelInfo selectOutboundTunnel() {
+ return getContext().tunnelManager().selectOutboundTunnel();
+ }
+
+ public String getName() { return "Handle Database Store Message"; }
+
+ public void dropped() {
+ getContext().messageHistory().messageProcessingError(_message.getUniqueId(), _message.getClass().getName(), "Dropped due to overload");
+ }
+}
diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/KademliaNetworkDatabaseFacade.java b/router/java/src/net/i2p/router/networkdb/kademlia/KademliaNetworkDatabaseFacade.java
index 628fcfc41..dc4ddb9f2 100644
--- a/router/java/src/net/i2p/router/networkdb/kademlia/KademliaNetworkDatabaseFacade.java
+++ b/router/java/src/net/i2p/router/networkdb/kademlia/KademliaNetworkDatabaseFacade.java
@@ -46,7 +46,7 @@ import net.i2p.util.Log;
*
*/
public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
- private Log _log;
+ protected Log _log;
private KBucketSet _kb; // peer hashes sorted into kbuckets, but within kbuckets, unsorted
private DataStore _ds; // hash to DataStructure mapping, persisted when necessary
/** where the data store is pushing the data */
@@ -62,8 +62,8 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
private HarvesterJob _harvestJob;
/** when was the last time an exploration found something new? */
private long _lastExploreNew;
- private PeerSelector _peerSelector;
- private RouterContext _context;
+ protected PeerSelector _peerSelector;
+ protected RouterContext _context;
/**
* Map of Hash to RepublishLeaseSetJob for leases we'realready managing.
* This is added to when we create a new RepublishLeaseSetJob, and the values are
@@ -93,8 +93,6 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
}
}
-
-
/**
* for the 10 minutes after startup, don't fail db entries so that if we were
* offline for a while, we'll have a chance of finding some live peers with the
@@ -123,15 +121,18 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
public KademliaNetworkDatabaseFacade(RouterContext context) {
_context = context;
- _log = _context.logManager().getLog(KademliaNetworkDatabaseFacade.class);
+ _log = _context.logManager().getLog(getClass());
_initialized = false;
- _peerSelector = new PeerSelector(_context);
+ _peerSelector = createPeerSelector();
_publishingLeaseSets = new HashMap(8);
_lastExploreNew = 0;
_activeRequests = new HashMap(8);
_enforceNetId = DEFAULT_ENFORCE_NETID;
}
+ protected PeerSelector createPeerSelector() { return new PeerSelector(_context); }
+ public PeerSelector getPeerSelector() { return _peerSelector; }
+
KBucketSet getKBuckets() { return _kb; }
DataStore getDataStore() { return _ds; }
@@ -266,8 +267,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
_lastSent = new HashMap(1024);
_dbDir = dbDir;
- _context.inNetMessagePool().registerHandlerJobBuilder(DatabaseLookupMessage.MESSAGE_TYPE, new DatabaseLookupMessageHandler(_context));
- _context.inNetMessagePool().registerHandlerJobBuilder(DatabaseStoreMessage.MESSAGE_TYPE, new DatabaseStoreMessageHandler(_context));
+ createHandlers();
_initialized = true;
_started = System.currentTimeMillis();
@@ -307,6 +307,11 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
}
}
+ protected void createHandlers() {
+ _context.inNetMessagePool().registerHandlerJobBuilder(DatabaseLookupMessage.MESSAGE_TYPE, new DatabaseLookupMessageHandler(_context));
+ _context.inNetMessagePool().registerHandlerJobBuilder(DatabaseStoreMessage.MESSAGE_TYPE, new DatabaseStoreMessageHandler(_context));
+ }
+
/**
* Get the routers closest to that key in response to a remote lookup
*/
@@ -359,6 +364,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
}
public int getKnownRouters() {
+ if (_kb == null) return 0;
CountRouters count = new CountRouters();
_kb.getAll(count);
return count.size();
@@ -368,12 +374,31 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
private int _count;
public int size() { return _count; }
public void add(Hash entry) {
+ if (_ds == null) return;
Object o = _ds.get(entry);
if (o instanceof RouterInfo)
_count++;
}
}
+ public int getKnownLeaseSets() {
+ if (_kb == null) return 0;
+ CountLeaseSets count = new CountLeaseSets();
+ _kb.getAll(count);
+ return count.size();
+ }
+
+ private class CountLeaseSets implements SelectionCollector {
+ private int _count;
+ public int size() { return _count; }
+ public void add(Hash entry) {
+ if (_ds == null) return;
+ Object o = _ds.get(entry);
+ if (o instanceof LeaseSet)
+ _count++;
+ }
+ }
+
public void lookupLeaseSet(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs) {
if (!_initialized) return;
LeaseSet ls = lookupLeaseSetLocally(key);
@@ -819,6 +844,10 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
responseTime = MAX_PER_PEER_TIMEOUT;
return 4 * (int)responseTime; // give it up to 4x the average response time
}
+
+ public void sendStore(Hash key, DataStructure ds, Job onSuccess, Job onFailure, long sendTimeout, Set toIgnore) {
+ _context.jobQueue().addJob(new StoreJob(_context, this, key, ds, onSuccess, onFailure, sendTimeout, toIgnore));
+ }
public void renderStatusHTML(Writer out) throws IOException {
StringBuffer buf = new StringBuffer(10*1024);
diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/PeerSelector.java b/router/java/src/net/i2p/router/networkdb/kademlia/PeerSelector.java
index 49d1c7af3..f8273b7ff 100644
--- a/router/java/src/net/i2p/router/networkdb/kademlia/PeerSelector.java
+++ b/router/java/src/net/i2p/router/networkdb/kademlia/PeerSelector.java
@@ -27,12 +27,12 @@ import net.i2p.stat.RateStat;
import net.i2p.util.Log;
class PeerSelector {
- private Log _log;
- private RouterContext _context;
+ protected Log _log;
+ protected RouterContext _context;
public PeerSelector(RouterContext ctx) {
_context = ctx;
- _log = _context.logManager().getLog(PeerSelector.class);
+ _log = _context.logManager().getLog(getClass());
}
/**
diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/RepublishLeaseSetJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/RepublishLeaseSetJob.java
index daba22e15..a13b30517 100644
--- a/router/java/src/net/i2p/router/networkdb/kademlia/RepublishLeaseSetJob.java
+++ b/router/java/src/net/i2p/router/networkdb/kademlia/RepublishLeaseSetJob.java
@@ -22,7 +22,7 @@ import net.i2p.util.Log;
*/
public class RepublishLeaseSetJob extends JobImpl {
private Log _log;
- private final static long REPUBLISH_LEASESET_DELAY = 3*60*1000; // 3 mins
+ private final static long REPUBLISH_LEASESET_DELAY = 5*60*1000;
private final static long REPUBLISH_LEASESET_TIMEOUT = 60*1000;
private Hash _dest;
private KademliaNetworkDatabaseFacade _facade;
@@ -48,14 +48,17 @@ public class RepublishLeaseSetJob extends JobImpl {
_log.warn("Not publishing a LOCAL lease that isn't current - " + _dest, new Exception("Publish expired LOCAL lease?"));
} else {
getContext().statManager().addRateData("netDb.republishLeaseSetCount", 1, 0);
- getContext().jobQueue().addJob(new StoreJob(getContext(), _facade, _dest, ls, new OnSuccess(getContext()), new OnFailure(getContext()), REPUBLISH_LEASESET_TIMEOUT));
+ _facade.sendStore(_dest, ls, new OnSuccess(getContext()), new OnFailure(getContext()), REPUBLISH_LEASESET_TIMEOUT, null);
+ //getContext().jobQueue().addJob(new StoreJob(getContext(), _facade, _dest, ls, new OnSuccess(getContext()), new OnFailure(getContext()), REPUBLISH_LEASESET_TIMEOUT));
}
} else {
if (_log.shouldLog(Log.WARN))
_log.warn("Client " + _dest + " is local, but we can't find a valid LeaseSet? perhaps its being rebuilt?");
}
- long republishDelay = getContext().random().nextLong(2*REPUBLISH_LEASESET_DELAY);
- requeue(republishDelay);
+ if (false) { // floodfill doesnt require republishing
+ long republishDelay = getContext().random().nextLong(2*REPUBLISH_LEASESET_DELAY);
+ requeue(republishDelay);
+ }
return;
} else {
if (_log.shouldLog(Log.INFO))
diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/SearchJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/SearchJob.java
index 0282a5514..5d882799a 100644
--- a/router/java/src/net/i2p/router/networkdb/kademlia/SearchJob.java
+++ b/router/java/src/net/i2p/router/networkdb/kademlia/SearchJob.java
@@ -61,7 +61,7 @@ class SearchJob extends JobImpl {
* How long will we give each peer to reply to our search?
*
*/
- private static final int PER_PEER_TIMEOUT = 2*1000;
+ private static final int PER_PEER_TIMEOUT = 5*1000;
/**
* give ourselves 30 seconds to send out the value found to the closest
@@ -96,7 +96,7 @@ class SearchJob extends JobImpl {
_isLease = isLease;
_deferredSearches = new ArrayList(0);
_deferredCleared = false;
- _peerSelector = new PeerSelector(getContext());
+ _peerSelector = facade.getPeerSelector();
_startedOn = -1;
_expiration = getContext().clock().now() + timeoutMs;
getContext().statManager().createRateStat("netDb.successTime", "How long a successful search takes", "NetworkDatabase", new long[] { 60*60*1000l, 24*60*60*1000l });
@@ -133,6 +133,8 @@ class SearchJob extends JobImpl {
*
*/
protected int getPerPeerTimeoutMs() {
+ if (true)
+ return PER_PEER_TIMEOUT;
int rv = -1;
RateStat rs = getContext().statManager().getRate("netDb.successTime");
if (rs != null)
@@ -576,6 +578,7 @@ class SearchJob extends JobImpl {
protected class FailedJob extends JobImpl {
private Hash _peer;
private boolean _penalizePeer;
+ private long _sentOn;
public FailedJob(RouterContext enclosingContext, RouterInfo peer) {
this(enclosingContext, peer, true);
}
@@ -588,12 +591,14 @@ class SearchJob extends JobImpl {
super(enclosingContext);
_penalizePeer = penalizePeer;
_peer = peer.getIdentity().getHash();
+ _sentOn = enclosingContext.clock().now();
}
public void runJob() {
+ if (_state.completed()) return;
_state.replyTimeout(_peer);
if (_penalizePeer) {
if (_log.shouldLog(Log.WARN))
- _log.warn("Penalizing peer for timeout on search: " + _peer.toBase64());
+ _log.warn("Penalizing peer for timeout on search: " + _peer.toBase64() + " after " + (getContext().clock().now() - _sentOn));
getContext().profileManager().dbLookupFailed(_peer);
} else {
if (_log.shouldLog(Log.ERROR))
@@ -657,9 +662,7 @@ class SearchJob extends JobImpl {
if (SHOULD_RESEND_ROUTERINFO) {
ds = _facade.lookupRouterInfoLocally(_state.getTarget());
if (ds != null)
- getContext().jobQueue().addJob(new StoreJob(getContext(), _facade, _state.getTarget(),
- ds, null, null, RESEND_TIMEOUT,
- _state.getSuccessful()));
+ _facade.sendStore(_state.getTarget(), ds, null, null, RESEND_TIMEOUT, _state.getSuccessful());
}
} else {
Set sendTo = _state.getRepliedPeers(); // _state.getFailed();
diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/StoreJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/StoreJob.java
index de1fba4e0..5401a8456 100644
--- a/router/java/src/net/i2p/router/networkdb/kademlia/StoreJob.java
+++ b/router/java/src/net/i2p/router/networkdb/kademlia/StoreJob.java
@@ -32,7 +32,7 @@ import net.i2p.util.Log;
class StoreJob extends JobImpl {
private Log _log;
private KademliaNetworkDatabaseFacade _facade;
- private StoreState _state;
+ protected StoreState _state;
private Job _onSuccess;
private Job _onFailure;
private long _timeoutMs;
@@ -82,7 +82,7 @@ class StoreJob extends JobImpl {
_onFailure = onFailure;
_timeoutMs = timeoutMs;
_expiration = context.clock().now() + timeoutMs;
- _peerSelector = new PeerSelector(context);
+ _peerSelector = facade.getPeerSelector();
}
public String getName() { return "Kademlia NetDb Store";}
@@ -112,6 +112,9 @@ class StoreJob extends JobImpl {
continueSending();
}
}
+
+ protected int getParallelization() { return PARALLELIZATION; }
+ protected int getRedundancy() { return REDUNDANCY; }
/**
* Send a series of searches to the next available peers as selected by
@@ -121,15 +124,15 @@ class StoreJob extends JobImpl {
*/
private void continueSending() {
if (_state.completed()) return;
- int toCheck = PARALLELIZATION - _state.getPending().size();
+ int toCheck = getParallelization() - _state.getPending().size();
if (toCheck <= 0) {
// too many already pending
if (_log.shouldLog(Log.DEBUG))
_log.debug(getJobId() + ": Too many store messages pending");
return;
}
- if (toCheck > PARALLELIZATION)
- toCheck = PARALLELIZATION;
+ if (toCheck > getParallelization())
+ toCheck = getParallelization();
List closestHashes = getClosestRouters(_state.getTarget(), toCheck, _state.getAttempted());
if ( (closestHashes == null) || (closestHashes.size() <= 0) ) {
@@ -310,7 +313,7 @@ class StoreJob extends JobImpl {
getContext().profileManager().dbStoreSent(_peer.getIdentity().getHash(), howLong);
getContext().statManager().addRateData("netDb.ackTime", howLong, howLong);
- if (_state.getCompleteCount() >= REDUNDANCY) {
+ if (_state.getCompleteCount() >= getRedundancy()) {
succeed();
} else {
sendNext();
@@ -352,7 +355,7 @@ class StoreJob extends JobImpl {
/**
* Send was totally successful
*/
- private void succeed() {
+ protected void succeed() {
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": Succeeded sending key " + _state.getTarget());
if (_log.shouldLog(Log.DEBUG))
@@ -367,7 +370,7 @@ class StoreJob extends JobImpl {
/**
* Send totally failed
*/
- private void fail() {
+ protected void fail() {
if (_log.shouldLog(Log.WARN))
_log.warn(getJobId() + ": Failed sending key " + _state.getTarget());
if (_log.shouldLog(Log.DEBUG))
diff --git a/router/java/src/net/i2p/router/startup/CreateRouterInfoJob.java b/router/java/src/net/i2p/router/startup/CreateRouterInfoJob.java
index 1baf4fc1b..aa17423f8 100644
--- a/router/java/src/net/i2p/router/startup/CreateRouterInfoJob.java
+++ b/router/java/src/net/i2p/router/startup/CreateRouterInfoJob.java
@@ -25,6 +25,7 @@ import net.i2p.router.Job;
import net.i2p.router.JobImpl;
import net.i2p.router.Router;
import net.i2p.router.RouterContext;
+import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseFacade;
import net.i2p.util.Log;
public class CreateRouterInfoJob extends JobImpl {
@@ -53,6 +54,8 @@ public class CreateRouterInfoJob extends JobImpl {
info.setAddresses(getContext().commSystem().createAddresses());
Properties stats = getContext().statPublisher().publishStatistics();
stats.setProperty(RouterInfo.PROP_NETWORK_ID, Router.NETWORK_ID+"");
+ if (FloodfillNetworkDatabaseFacade.floodfillEnabled(getContext()))
+ info.addCapability(FloodfillNetworkDatabaseFacade.CAPACITY_FLOODFILL);
info.setOptions(stats);
info.setPeers(new HashSet());
info.setPublished(getCurrentPublishDate(getContext()));
diff --git a/router/java/src/net/i2p/router/startup/RebuildRouterInfoJob.java b/router/java/src/net/i2p/router/startup/RebuildRouterInfoJob.java
index 82942b387..16c328adc 100644
--- a/router/java/src/net/i2p/router/startup/RebuildRouterInfoJob.java
+++ b/router/java/src/net/i2p/router/startup/RebuildRouterInfoJob.java
@@ -25,6 +25,7 @@ import net.i2p.data.SigningPublicKey;
import net.i2p.router.JobImpl;
import net.i2p.router.Router;
import net.i2p.router.RouterContext;
+import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseFacade;
import net.i2p.util.Log;
/**
@@ -126,6 +127,8 @@ public class RebuildRouterInfoJob extends JobImpl {
Properties stats = getContext().statPublisher().publishStatistics();
stats.setProperty(RouterInfo.PROP_NETWORK_ID, ""+Router.NETWORK_ID);
info.setOptions(stats);
+ if (FloodfillNetworkDatabaseFacade.floodfillEnabled(getContext()))
+ info.addCapability(FloodfillNetworkDatabaseFacade.CAPACITY_FLOODFILL);
// info.setPeers(new HashSet()); // this would have the trusted peers
info.setPublished(CreateRouterInfoJob.getCurrentPublishDate(getContext()));
diff --git a/router/java/src/net/i2p/router/tunnel/InboundMessageDistributor.java b/router/java/src/net/i2p/router/tunnel/InboundMessageDistributor.java
index d6527efd6..e724088f3 100644
--- a/router/java/src/net/i2p/router/tunnel/InboundMessageDistributor.java
+++ b/router/java/src/net/i2p/router/tunnel/InboundMessageDistributor.java
@@ -4,6 +4,7 @@ import net.i2p.data.Hash;
import net.i2p.data.TunnelId;
import net.i2p.data.Payload;
import net.i2p.data.i2np.DataMessage;
+import net.i2p.data.i2np.DatabaseStoreMessage;
import net.i2p.data.i2np.DeliveryInstructions;
import net.i2p.data.i2np.I2NPMessage;
import net.i2p.data.i2np.GarlicMessage;
@@ -109,7 +110,17 @@ public class InboundMessageDistributor implements GarlicMessageReceiver.CloveRec
_receiver.receive((GarlicMessage)data);
return;
} else {
- _context.inNetMessagePool().add(data, null, null);
+ if (data instanceof DatabaseStoreMessage) {
+ // treat db store explicitly, since we don't want to republish (or flood)
+ // unnecessarily
+ DatabaseStoreMessage dsm = (DatabaseStoreMessage)data;
+ if (dsm.getValueType() == DatabaseStoreMessage.KEY_TYPE_LEASESET)
+ _context.netDb().store(dsm.getKey(), dsm.getLeaseSet());
+ else
+ _context.netDb().store(dsm.getKey(), dsm.getRouterInfo());
+ } else {
+ _context.inNetMessagePool().add(data, null, null);
+ }
return;
}
case DeliveryInstructions.DELIVERY_MODE_DESTINATION:
diff --git a/router/java/src/net/i2p/router/tunnel/TunnelCreatorConfig.java b/router/java/src/net/i2p/router/tunnel/TunnelCreatorConfig.java
index cef9c93c1..39135a5e4 100644
--- a/router/java/src/net/i2p/router/tunnel/TunnelCreatorConfig.java
+++ b/router/java/src/net/i2p/router/tunnel/TunnelCreatorConfig.java
@@ -88,9 +88,12 @@ public class TunnelCreatorConfig implements TunnelInfo {
// H0:1235-->H1:2345-->H2:2345
StringBuffer buf = new StringBuffer(128);
if (_isInbound)
- buf.append("inbound: ");
+ buf.append("inbound");
else
- buf.append("outbound: ");
+ buf.append("outbound");
+ if (_destination == null)
+ buf.append(" exploratory");
+ buf.append(": ");
for (int i = 0; i < _peers.length; i++) {
buf.append(_peers[i].toBase64().substring(0,4));
buf.append(':');
diff --git a/router/java/src/net/i2p/router/tunnel/pool/ExploratoryPeerSelector.java b/router/java/src/net/i2p/router/tunnel/pool/ExploratoryPeerSelector.java
index 63a40ce68..8bce77f03 100644
--- a/router/java/src/net/i2p/router/tunnel/pool/ExploratoryPeerSelector.java
+++ b/router/java/src/net/i2p/router/tunnel/pool/ExploratoryPeerSelector.java
@@ -6,6 +6,7 @@ import java.util.HashSet;
import java.util.List;
import net.i2p.router.RouterContext;
import net.i2p.router.TunnelPoolSettings;
+import net.i2p.util.Log;
/**
* Pick peers randomly out of the not-failing pool, and put them into randomly
@@ -14,15 +15,28 @@ import net.i2p.router.TunnelPoolSettings;
*/
class ExploratoryPeerSelector extends TunnelPeerSelector {
public List selectPeers(RouterContext ctx, TunnelPoolSettings settings) {
+ Log l = ctx.logManager().getLog(getClass());
int length = getLength(ctx, settings);
- if (length < 0)
+ if (length < 0) {
+ if (l.shouldLog(Log.DEBUG))
+ l.debug("Length requested is zero: " + settings);
return null;
+ }
- if (shouldSelectExplicit(settings))
- return selectExplicit(ctx, settings, length);
+ if (shouldSelectExplicit(settings)) {
+ List rv = selectExplicit(ctx, settings, length);
+ if (l.shouldLog(Log.DEBUG))
+ l.debug("Explicit peers selected: " + rv);
+ return rv;
+ }
+ HashSet exclude = new HashSet(1);
+ exclude.add(ctx.routerHash());
HashSet matches = new HashSet(length);
- ctx.profileOrganizer().selectNotFailingPeers(length, null, matches, true);
+ ctx.profileOrganizer().selectNotFailingPeers(length, exclude, matches, false);
+
+ if (l.shouldLog(Log.DEBUG))
+ l.debug("profileOrganizer.selectNotFailing(" + length + ") found " + matches);
matches.remove(ctx.routerHash());
ArrayList rv = new ArrayList(matches);