2005-08-29 jrandom

* Added the new test Floodfill netDb
This commit is contained in:
jrandom
2005-08-30 01:59:11 +00:00
committed by zzz
parent e313da254c
commit 3ba921ec0e
32 changed files with 902 additions and 50 deletions

View File

@ -103,6 +103,18 @@ public class HTMLPreviewRenderer extends HTMLRenderer {
_postBodyBuffer.append("<br />\n"); _postBodyBuffer.append("<br />\n");
} }
if (_archives.size() > 0) {
_postBodyBuffer.append("<b>Archives:</b>");
for (int i = 0; i < _archives.size(); i++) {
ArchiveRef a = (ArchiveRef)_archives.get(i);
_postBodyBuffer.append(" <a href=\"").append(getArchiveURL(null, new SafeURL(a.locationSchema + "://" + a.location)));
_postBodyBuffer.append("\">").append(sanitizeString(a.name)).append("</a>");
if (a.description != null)
_postBodyBuffer.append(": ").append(sanitizeString(a.description));
}
_postBodyBuffer.append("<br />\n");
}
_postBodyBuffer.append("</td>\n</form>\n</tr>\n"); _postBodyBuffer.append("</td>\n</form>\n</tr>\n");
_postBodyBuffer.append("</table>\n"); _postBodyBuffer.append("</table>\n");
} }

View File

@ -26,6 +26,7 @@ public class HTMLRenderer extends EventReceiverImpl {
protected List _addresses; protected List _addresses;
protected List _links; protected List _links;
protected List _blogs; protected List _blogs;
protected List _archives;
protected StringBuffer _preBodyBuffer; protected StringBuffer _preBodyBuffer;
protected StringBuffer _bodyBuffer; protected StringBuffer _bodyBuffer;
protected StringBuffer _postBodyBuffer; protected StringBuffer _postBodyBuffer;
@ -93,6 +94,7 @@ public class HTMLRenderer extends EventReceiverImpl {
_addresses = new ArrayList(); _addresses = new ArrayList();
_links = new ArrayList(); _links = new ArrayList();
_blogs = new ArrayList(); _blogs = new ArrayList();
_archives = new ArrayList();
_cutBody = cutBody; _cutBody = cutBody;
_showImages = showImages; _showImages = showImages;
_cutReached = false; _cutReached = false;
@ -261,9 +263,6 @@ public class HTMLRenderer extends EventReceiverImpl {
* *
*/ */
public void receiveBlog(String name, String hash, String tag, long entryId, List locations, String description) { public void receiveBlog(String name, String hash, String tag, long entryId, List locations, String description) {
if (!continueBody()) { return; }
if (hash == null) return;
System.out.println("Receiving the blog: " + name + "/" + hash + "/" + tag + "/" + entryId +"/" + locations + ": "+ description); System.out.println("Receiving the blog: " + name + "/" + hash + "/" + tag + "/" + entryId +"/" + locations + ": "+ description);
byte blogData[] = Base64.decode(hash); byte blogData[] = Base64.decode(hash);
if ( (blogData == null) || (blogData.length != Hash.HASH_LENGTH) ) if ( (blogData == null) || (blogData.length != Hash.HASH_LENGTH) )
@ -278,6 +277,9 @@ public class HTMLRenderer extends EventReceiverImpl {
if (!_blogs.contains(b)) if (!_blogs.contains(b))
_blogs.add(b); _blogs.add(b);
if (!continueBody()) { return; }
if (hash == null) return;
Hash blog = new Hash(blogData); Hash blog = new Hash(blogData);
if (entryId > 0) { if (entryId > 0) {
String pageURL = getPageURL(blog, tag, entryId, -1, -1, true, (_user != null ? _user.getShowImages() : false)); String pageURL = getPageURL(blog, tag, entryId, -1, -1, true, (_user != null ? _user.getShowImages() : false));
@ -319,6 +321,45 @@ public class HTMLRenderer extends EventReceiverImpl {
_bodyBuffer.append("] "); _bodyBuffer.append("] ");
} }
protected static class ArchiveRef {
public String name;
public String description;
public String locationSchema;
public String location;
public int hashCode() { return -1; }
public boolean equals(Object o) {
ArchiveRef a = (ArchiveRef)o;
return DataHelper.eq(name, a.name) && DataHelper.eq(description, a.description)
&& DataHelper.eq(locationSchema, a.locationSchema)
&& DataHelper.eq(location, a.location);
}
}
public void receiveArchive(String name, String description, String locationSchema, String location,
String postingKey, String anchorText) {
ArchiveRef a = new ArchiveRef();
a.name = name;
a.description = description;
a.locationSchema = locationSchema;
a.location = location;
if (!_archives.contains(a))
_archives.add(a);
if (!continueBody()) { return; }
_bodyBuffer.append(sanitizeString(anchorText)).append(" [Archive ");
if (name != null)
_bodyBuffer.append(sanitizeString(name));
if (location != null) {
_bodyBuffer.append(" at ");
SafeURL surl = new SafeURL(locationSchema + "://" + location);
_bodyBuffer.append("<a href=\"").append(getArchiveURL(null, surl));
_bodyBuffer.append("\">").append(sanitizeString(surl.toString())).append("</a>");
}
if (description != null)
_bodyBuffer.append(": ").append(sanitizeString(description));
_bodyBuffer.append("]");
}
protected static class Link { protected static class Link {
public String schema; public String schema;
public String location; public String location;
@ -414,6 +455,12 @@ public class HTMLRenderer extends EventReceiverImpl {
else if (addrs > 1) else if (addrs > 1)
_postBodyBuffer.append(addrs).append(" addresses "); _postBodyBuffer.append(addrs).append(" addresses ");
int archives = _archives.size();
if (archives == 1)
_postBodyBuffer.append("1 archive ");
else if (archives > 1)
_postBodyBuffer.append(archives).append(" archives ");
if (_entry != null) { if (_entry != null) {
List replies = _archive.getIndex().getReplies(_entry.getURI()); List replies = _archive.getIndex().getReplies(_entry.getURI());
if ( (replies != null) && (replies.size() > 0) ) { if ( (replies != null) && (replies.size() > 0) ) {
@ -490,10 +537,10 @@ public class HTMLRenderer extends EventReceiverImpl {
} }
if (_addresses.size() > 0) { if (_addresses.size() > 0) {
_postBodyBuffer.append("<b>Addresses:</b> "); _postBodyBuffer.append("<b>Addresses:</b>");
for (int i = 0; i < _addresses.size(); i++) { for (int i = 0; i < _addresses.size(); i++) {
Address a = (Address)_addresses.get(i); Address a = (Address)_addresses.get(i);
_postBodyBuffer.append("<a href=\"addaddress.jsp?schema="); _postBodyBuffer.append(" <a href=\"addaddress.jsp?schema=");
_postBodyBuffer.append(sanitizeURL(a.schema)).append("&location="); _postBodyBuffer.append(sanitizeURL(a.schema)).append("&location=");
_postBodyBuffer.append(sanitizeURL(a.location)).append("&name="); _postBodyBuffer.append(sanitizeURL(a.location)).append("&name=");
_postBodyBuffer.append(sanitizeURL(a.name)); _postBodyBuffer.append(sanitizeURL(a.name));
@ -502,6 +549,17 @@ public class HTMLRenderer extends EventReceiverImpl {
_postBodyBuffer.append("<br />\n"); _postBodyBuffer.append("<br />\n");
} }
if (_archives.size() > 0) {
_postBodyBuffer.append("<b>Archives:</b>");
for (int i = 0; i < _archives.size(); i++) {
ArchiveRef a = (ArchiveRef)_archives.get(i);
_postBodyBuffer.append(" <a href=\"").append(getArchiveURL(null, new SafeURL(a.locationSchema + "://" + a.location)));
_postBodyBuffer.append("\">").append(sanitizeString(a.name)).append("</a>");
if (a.description != null)
_postBodyBuffer.append(": ").append(sanitizeString(a.description));
}
_postBodyBuffer.append("<br />\n");
}
if (_entry != null) { if (_entry != null) {
List replies = _archive.getIndex().getReplies(_entry.getURI()); List replies = _archive.getIndex().getReplies(_entry.getURI());

View File

@ -196,6 +196,7 @@ public class SMLParser {
private static final String T_HR = "hr"; private static final String T_HR = "hr";
private static final String T_PRE = "pre"; private static final String T_PRE = "pre";
private static final String T_ATTACHMENT = "attachment"; private static final String T_ATTACHMENT = "attachment";
private static final String T_ARCHIVE = "archive";
private static final String P_ATTACHMENT = "attachment"; private static final String P_ATTACHMENT = "attachment";
private static final String P_WHO_QUOTED = "author"; private static final String P_WHO_QUOTED = "author";
@ -211,6 +212,11 @@ public class SMLParser {
private static final String P_ADDRESS_LOCATION = "location"; private static final String P_ADDRESS_LOCATION = "location";
private static final String P_ADDRESS_SCHEMA = "schema"; private static final String P_ADDRESS_SCHEMA = "schema";
private static final String P_ATTACHMENT_ID = "id"; private static final String P_ATTACHMENT_ID = "id";
private static final String P_ARCHIVE_NAME = "name";
private static final String P_ARCHIVE_DESCRIPTION = "description";
private static final String P_ARCHIVE_LOCATION_SCHEMA = "schema";
private static final String P_ARCHIVE_LOCATION = "location";
private static final String P_ARCHIVE_POSTING_KEY = "postingkey";
private void parseTag(String tagName, Map attr, String body, EventReceiver receiver) { private void parseTag(String tagName, Map attr, String body, EventReceiver receiver) {
tagName = tagName.toLowerCase(); tagName = tagName.toLowerCase();
@ -241,6 +247,10 @@ public class SMLParser {
} }
receiver.receiveBlog(getString(P_BLOG_NAME, attr), getString(P_BLOG_HASH, attr), getString(P_BLOG_TAG, attr), receiver.receiveBlog(getString(P_BLOG_NAME, attr), getString(P_BLOG_HASH, attr), getString(P_BLOG_TAG, attr),
getLong(P_BLOG_ENTRY, attr), locations, body); getLong(P_BLOG_ENTRY, attr), locations, body);
} else if (T_ARCHIVE.equals(tagName)) {
receiver.receiveArchive(getString(P_ARCHIVE_NAME, attr), getString(P_ARCHIVE_DESCRIPTION, attr),
getString(P_ARCHIVE_LOCATION_SCHEMA, attr), getString(P_ARCHIVE_LOCATION, attr),
getString(P_ARCHIVE_POSTING_KEY, attr), body);
} else if (T_LINK.equals(tagName)) { } else if (T_LINK.equals(tagName)) {
receiver.receiveLink(getString(P_LINK_SCHEMA, attr), getString(P_LINK_LOCATION, attr), body); receiver.receiveLink(getString(P_LINK_SCHEMA, attr), getString(P_LINK_LOCATION, attr), body);
} else if (T_ADDRESS.equals(tagName)) { } else if (T_ADDRESS.equals(tagName)) {

View File

@ -97,6 +97,7 @@ Post content (in raw SML, no headers):<br />
* [blog name="name" bloghash="base64hash" blogtag="tag"]description[/blog] = link to all posts in the blog with the specified tag * [blog name="name" bloghash="base64hash" blogtag="tag"]description[/blog] = link to all posts in the blog with the specified tag
* [blog name="name" blogtag="tag"]description[/blog] = link to all posts in all blogs with the specified tag * [blog name="name" blogtag="tag"]description[/blog] = link to all posts in all blogs with the specified tag
* [link schema="eep" location="http://forum.i2p"]text[/link] = offer a link to an external resource (accessible with the given schema) * [link schema="eep" location="http://forum.i2p"]text[/link] = offer a link to an external resource (accessible with the given schema)
* [archive name="name" description="they have good stuff" schema="eep" location="http://syndiemedia.i2p/archive/archive.txt"]foo![/archive] = offer an easy way to sync up with a new Syndie archive
SML headers are newline delimited key=value pairs. Example keys are: SML headers are newline delimited key=value pairs. Example keys are:
* bgcolor = background color of the post (e.g. bgcolor=#ffccaa or bgcolor=red) * bgcolor = background color of the post (e.g. bgcolor=#ffccaa or bgcolor=red)

View File

@ -316,6 +316,17 @@ public class RouterInfo extends DataStructureImpl {
return ""; return "";
} }
public void addCapability(char cap) {
if (_options == null) _options = new OrderedProperties();
synchronized (_options) {
String caps = _options.getProperty(PROP_CAPABILITIES);
if (caps == null)
_options.setProperty(PROP_CAPABILITIES, ""+cap);
else if (caps.indexOf(cap) == -1)
_options.setProperty(PROP_CAPABILITIES, caps + cap);
}
}
/** /**
* Get the routing key for the structure using the current modifier in the RoutingKeyGenerator. * Get the routing key for the structure using the current modifier in the RoutingKeyGenerator.
* This only calculates a new one when necessary though (if the generator's key modifier changes) * This only calculates a new one when necessary though (if the generator's key modifier changes)

View File

@ -1,4 +1,7 @@
$Id: history.txt,v 1.230 2005/08/24 17:55:27 jrandom Exp $ $Id: history.txt,v 1.231 2005/08/27 17:15:38 jrandom Exp $
2005-08-29 jrandom
* Added the new test Floodfill netDb
2005-08-27 jrandom 2005-08-27 jrandom
* Minor logging and optimization tweaks in the router and SDK * Minor logging and optimization tweaks in the router and SDK

View File

@ -59,6 +59,7 @@ public abstract class NetworkDatabaseFacade implements Service {
public abstract void fail(Hash dbEntry); public abstract void fail(Hash dbEntry);
public int getKnownRouters() { return 0; } public int getKnownRouters() { return 0; }
public int getKnownLeaseSets() { return 0; }
} }

View File

@ -35,6 +35,7 @@ import net.i2p.data.i2np.GarlicMessage;
//import net.i2p.data.i2np.TunnelMessage; //import net.i2p.data.i2np.TunnelMessage;
import net.i2p.router.message.GarlicMessageHandler; import net.i2p.router.message.GarlicMessageHandler;
//import net.i2p.router.message.TunnelMessageHandler; //import net.i2p.router.message.TunnelMessageHandler;
import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseFacade;
import net.i2p.router.startup.StartupJob; import net.i2p.router.startup.StartupJob;
import net.i2p.stat.Rate; import net.i2p.stat.Rate;
import net.i2p.stat.RateStat; import net.i2p.stat.RateStat;
@ -291,6 +292,8 @@ public class Router {
stats.setProperty(RouterInfo.PROP_NETWORK_ID, NETWORK_ID+""); stats.setProperty(RouterInfo.PROP_NETWORK_ID, NETWORK_ID+"");
ri.setOptions(stats); ri.setOptions(stats);
ri.setAddresses(_context.commSystem().createAddresses()); ri.setAddresses(_context.commSystem().createAddresses());
if (FloodfillNetworkDatabaseFacade.floodfillEnabled(_context))
ri.addCapability(FloodfillNetworkDatabaseFacade.CAPACITY_FLOODFILL);
SigningPrivateKey key = _context.keyManager().getSigningPrivateKey(); SigningPrivateKey key = _context.keyManager().getSigningPrivateKey();
if (key == null) { if (key == null) {
_log.log(Log.CRIT, "Internal error - signing private key not known? wtf"); _log.log(Log.CRIT, "Internal error - signing private key not known? wtf");

View File

@ -9,6 +9,7 @@ import net.i2p.data.Hash;
import net.i2p.router.admin.AdminManager; import net.i2p.router.admin.AdminManager;
import net.i2p.router.client.ClientManagerFacadeImpl; import net.i2p.router.client.ClientManagerFacadeImpl;
import net.i2p.router.networkdb.kademlia.KademliaNetworkDatabaseFacade; import net.i2p.router.networkdb.kademlia.KademliaNetworkDatabaseFacade;
import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseFacade;
import net.i2p.router.peermanager.Calculator; import net.i2p.router.peermanager.Calculator;
import net.i2p.router.peermanager.CapacityCalculator; import net.i2p.router.peermanager.CapacityCalculator;
import net.i2p.router.peermanager.IntegrationCalculator; import net.i2p.router.peermanager.IntegrationCalculator;
@ -97,7 +98,7 @@ public class RouterContext extends I2PAppContext {
_messageHistory = new MessageHistory(this); _messageHistory = new MessageHistory(this);
_messageRegistry = new OutboundMessageRegistry(this); _messageRegistry = new OutboundMessageRegistry(this);
_messageStateMonitor = new MessageStateMonitor(this); _messageStateMonitor = new MessageStateMonitor(this);
_netDb = new KademliaNetworkDatabaseFacade(this); _netDb = new FloodfillNetworkDatabaseFacade(this); // new KademliaNetworkDatabaseFacade(this);
_keyManager = new KeyManager(this); _keyManager = new KeyManager(this);
if ("false".equals(getProperty("i2p.vmCommSystem", "false"))) if ("false".equals(getProperty("i2p.vmCommSystem", "false")))
_commSystem = new CommSystemFacadeImpl(this); _commSystem = new CommSystemFacadeImpl(this);

View File

@ -15,9 +15,9 @@ import net.i2p.CoreVersion;
* *
*/ */
public class RouterVersion { public class RouterVersion {
public final static String ID = "$Revision: 1.219 $ $Date: 2005/08/24 17:55:26 $"; public final static String ID = "$Revision: 1.220 $ $Date: 2005/08/27 17:15:38 $";
public final static String VERSION = "0.6.0.3"; public final static String VERSION = "0.6.0.3";
public final static long BUILD = 3; public final static long BUILD = 4;
public static void main(String args[]) { public static void main(String args[]) {
System.out.println("I2P Router version: " + VERSION); System.out.println("I2P Router version: " + VERSION);
System.out.println("Router ID: " + RouterVersion.ID); System.out.println("Router ID: " + RouterVersion.ID);

View File

@ -19,6 +19,7 @@ import net.i2p.data.DataHelper;
import net.i2p.stat.Rate; import net.i2p.stat.Rate;
import net.i2p.stat.RateStat; import net.i2p.stat.RateStat;
import net.i2p.util.Log; import net.i2p.util.Log;
import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseFacade;
/** /**
* Maintain the statistics about the router * Maintain the statistics about the router
@ -147,6 +148,12 @@ public class StatisticsManager implements Service {
//includeRate("stream.con.receiveDuplicateSize", stats, new long[] { 60*60*1000 }); //includeRate("stream.con.receiveDuplicateSize", stats, new long[] { 60*60*1000 });
stats.setProperty("stat_uptime", DataHelper.formatDuration(_context.router().getUptime())); stats.setProperty("stat_uptime", DataHelper.formatDuration(_context.router().getUptime()));
stats.setProperty("stat__rateKey", "avg;maxAvg;pctLifetime;[sat;satLim;maxSat;maxSatLim;][num;lifetimeFreq;maxFreq]"); stats.setProperty("stat__rateKey", "avg;maxAvg;pctLifetime;[sat;satLim;maxSat;maxSatLim;][num;lifetimeFreq;maxFreq]");
if (FloodfillNetworkDatabaseFacade.isFloodfill(_context.router().getRouterInfo())) {
stats.setProperty("netdb.knownRouters", ""+_context.netDb().getKnownRouters());
stats.setProperty("netdb.knownLeaseSets", ""+_context.netDb().getKnownLeaseSets());
}
_log.debug("Publishing peer rankings"); _log.debug("Publishing peer rankings");
} else { } else {
_log.debug("Not publishing peer rankings"); _log.debug("Not publishing peer rankings");

View File

@ -59,6 +59,8 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
_fromHash = fromHash; _fromHash = fromHash;
} }
protected boolean answerAllQueries() { return false; }
public void runJob() { public void runJob() {
if (_log.shouldLog(Log.DEBUG)) if (_log.shouldLog(Log.DEBUG))
_log.debug("Handling database lookup message for " + _message.getSearchKey()); _log.debug("Handling database lookup message for " + _message.getSearchKey());
@ -75,7 +77,7 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
if (ls != null) { if (ls != null) {
// only answer a request for a LeaseSet if it has been published // only answer a request for a LeaseSet if it has been published
// to us, or, if its local, if we would have published to ourselves // to us, or, if its local, if we would have published to ourselves
if (ls.getReceivedAsPublished()) { if (answerAllQueries() || ls.getReceivedAsPublished()) {
getContext().statManager().addRateData("netDb.lookupsMatchedReceivedPublished", 1, 0); getContext().statManager().addRateData("netDb.lookupsMatchedReceivedPublished", 1, 0);
sendData(_message.getSearchKey(), ls, fromKey, _message.getReplyTunnel()); sendData(_message.getSearchKey(), ls, fromKey, _message.getReplyTunnel());
} else { } else {

View File

@ -17,6 +17,7 @@ import net.i2p.data.SigningPrivateKey;
import net.i2p.router.JobImpl; import net.i2p.router.JobImpl;
import net.i2p.router.RouterContext; import net.i2p.router.RouterContext;
import net.i2p.router.Router; import net.i2p.router.Router;
import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseFacade;
import net.i2p.util.Log; import net.i2p.util.Log;
/** /**
@ -44,6 +45,8 @@ public class PublishLocalRouterInfoJob extends JobImpl {
ri.setPublished(getContext().clock().now()); ri.setPublished(getContext().clock().now());
ri.setOptions(stats); ri.setOptions(stats);
ri.setAddresses(getContext().commSystem().createAddresses()); ri.setAddresses(getContext().commSystem().createAddresses());
if (FloodfillNetworkDatabaseFacade.floodfillEnabled(getContext()))
ri.addCapability(FloodfillNetworkDatabaseFacade.CAPACITY_FLOODFILL);
SigningPrivateKey key = getContext().keyManager().getSigningPrivateKey(); SigningPrivateKey key = getContext().keyManager().getSigningPrivateKey();
if (key == null) { if (key == null) {
_log.log(Log.CRIT, "Internal error - signing private key not known? rescheduling publish for 30s"); _log.log(Log.CRIT, "Internal error - signing private key not known? rescheduling publish for 30s");

View File

@ -55,8 +55,9 @@ class DataPublisherJob extends JobImpl {
new Exception("Publish expired lease?")); new Exception("Publish expired lease?"));
} }
} }
StoreJob store = new StoreJob(getContext(), _facade, key, data, null, null, STORE_TIMEOUT); _facade.sendStore(key, data, null, null, STORE_TIMEOUT, null);
getContext().jobQueue().addJob(store); //StoreJob store = new StoreJob(getContext(), _facade, key, data, null, null, STORE_TIMEOUT);
//getContext().jobQueue().addJob(store);
} }
requeue(RERUN_DELAY_MS); requeue(RERUN_DELAY_MS);
} }

View File

@ -0,0 +1,46 @@
package net.i2p.router.networkdb.kademlia;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
import net.i2p.data.Hash;
import net.i2p.data.RouterIdentity;
import net.i2p.data.i2np.DatabaseLookupMessage;
import net.i2p.data.i2np.I2NPMessage;
import net.i2p.router.HandlerJobBuilder;
import net.i2p.router.Job;
import net.i2p.router.RouterContext;
import net.i2p.util.Log;
/**
* Build a HandleDatabaseLookupMessageJob whenever a DatabaseLookupMessage arrives
*
*/
public class FloodfillDatabaseLookupMessageHandler implements HandlerJobBuilder {
private RouterContext _context;
private Log _log;
public FloodfillDatabaseLookupMessageHandler(RouterContext context) {
_context = context;
_log = context.logManager().getLog(FloodfillDatabaseLookupMessageHandler.class);
_context.statManager().createRateStat("netDb.lookupsReceived", "How many netDb lookups have we received?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("netDb.lookupsDropped", "How many netDb lookups did we drop due to throttling?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
}
public Job createJob(I2NPMessage receivedMessage, RouterIdentity from, Hash fromHash) {
_context.statManager().addRateData("netDb.lookupsReceived", 1, 0);
if (true || _context.throttle().acceptNetDbLookupRequest(((DatabaseLookupMessage)receivedMessage).getSearchKey())) {
return new HandleFloodfillDatabaseLookupMessageJob(_context, (DatabaseLookupMessage)receivedMessage, from, fromHash);
} else {
if (_log.shouldLog(Log.INFO))
_log.info("Dropping lookup request as throttled");
_context.statManager().addRateData("netDb.lookupsDropped", 1, 1);
return null;
}
}
}

View File

@ -0,0 +1,34 @@
package net.i2p.router.networkdb.kademlia;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
import net.i2p.data.Hash;
import net.i2p.data.RouterIdentity;
import net.i2p.data.i2np.DatabaseStoreMessage;
import net.i2p.data.i2np.I2NPMessage;
import net.i2p.router.HandlerJobBuilder;
import net.i2p.router.Job;
import net.i2p.router.RouterContext;
/**
* Create a HandleDatabaseStoreMessageJob whenever a DatabaseStoreMessage arrives
*
*/
public class FloodfillDatabaseStoreMessageHandler implements HandlerJobBuilder {
private RouterContext _context;
private FloodfillNetworkDatabaseFacade _facade;
public FloodfillDatabaseStoreMessageHandler(RouterContext context, FloodfillNetworkDatabaseFacade facade) {
_context = context;
_facade = facade;
}
public Job createJob(I2NPMessage receivedMessage, RouterIdentity from, Hash fromHash) {
return new HandleFloodfillDatabaseStoreMessageJob(_context, (DatabaseStoreMessage)receivedMessage, from, fromHash, _facade);
}
}

View File

@ -0,0 +1,97 @@
package net.i2p.router.networkdb.kademlia;
import java.util.*;
import net.i2p.router.*;
import net.i2p.router.networkdb.DatabaseStoreMessageHandler;
import net.i2p.data.i2np.*;
import net.i2p.data.*;
import net.i2p.util.Log;
/**
*
*/
public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacade {
public static final char CAPACITY_FLOODFILL = 'f';
private static final String PROP_FLOODFILL_PARTICIPANT = "router.floodfillParticipant";
private static final String DEFAULT_FLOODFILL_PARTICIPANT = "false";
public FloodfillNetworkDatabaseFacade(RouterContext context) {
super(context);
}
protected void createHandlers() {
_context.inNetMessagePool().registerHandlerJobBuilder(DatabaseLookupMessage.MESSAGE_TYPE, new FloodfillDatabaseLookupMessageHandler(_context));
_context.inNetMessagePool().registerHandlerJobBuilder(DatabaseStoreMessage.MESSAGE_TYPE, new FloodfillDatabaseStoreMessageHandler(_context, this));
}
public void sendStore(Hash key, DataStructure ds, Job onSuccess, Job onFailure, long sendTimeout, Set toIgnore) {
// if we are a part of the floodfill netDb, don't send out our own leaseSets as part
// of the flooding - instead, send them to a random floodfill peer so *they* can flood 'em out.
// perhaps statistically adjust this so we are the source every 1/N times... or something.
if (floodfillEnabled() && (ds instanceof RouterInfo)) {
flood(ds);
if (onSuccess != null)
_context.jobQueue().addJob(onSuccess);
} else {
_context.jobQueue().addJob(new FloodfillStoreJob(_context, this, key, ds, onSuccess, onFailure, sendTimeout, toIgnore));
}
}
public void flood(DataStructure ds) {
FloodfillPeerSelector sel = (FloodfillPeerSelector)getPeerSelector();
List peers = sel.selectFloodfillParticipants(getKBuckets());
int flooded = 0;
for (int i = 0; i < peers.size(); i++) {
Hash peer = (Hash)peers.get(i);
RouterInfo target = lookupRouterInfoLocally(peer);
if ( (target == null) || (_context.shitlist().isShitlisted(peer)) )
continue;
if (peer.equals(_context.routerHash()))
continue;
DatabaseStoreMessage msg = new DatabaseStoreMessage(_context);
if (ds instanceof LeaseSet) {
msg.setKey(((LeaseSet)ds).getDestination().calculateHash());
msg.setLeaseSet((LeaseSet)ds);
} else {
msg.setKey(((RouterInfo)ds).getIdentity().calculateHash());
msg.setRouterInfo((RouterInfo)ds);
}
msg.setReplyGateway(null);
msg.setReplyToken(0);
msg.setReplyTunnel(null);
OutNetMessage m = new OutNetMessage(_context);
m.setMessage(msg);
m.setOnFailedReplyJob(null);
m.setPriority(FLOOD_PRIORITY);
m.setTarget(target);
m.setExpiration(_context.clock().now()+FLOOD_TIMEOUT);
_context.commSystem().processMessage(m);
flooded++;
if (_log.shouldLog(Log.INFO))
_log.info("Flooding the entry for " + msg.getKey().toBase64() + " to " + peer.toBase64());
}
if (_log.shouldLog(Log.INFO))
_log.info("Flooded the to " + flooded + " peers");
}
private static final int FLOOD_PRIORITY = 200;
private static final int FLOOD_TIMEOUT = 30*1000;
protected PeerSelector createPeerSelector() { return new FloodfillPeerSelector(_context); }
public boolean floodfillEnabled() { return floodfillEnabled(_context); }
public static boolean floodfillEnabled(RouterContext ctx) {
String enabled = ctx.getProperty(PROP_FLOODFILL_PARTICIPANT, DEFAULT_FLOODFILL_PARTICIPANT);
return "true".equals(enabled);
}
public static boolean isFloodfill(RouterInfo peer) {
if (peer == null) return false;
String caps = peer.getCapabilities();
if ( (caps != null) && (caps.indexOf(FloodfillNetworkDatabaseFacade.CAPACITY_FLOODFILL) != -1) )
return true;
else
return false;
}
}

View File

@ -0,0 +1,105 @@
package net.i2p.router.networkdb.kademlia;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
import java.math.BigInteger;
import java.util.*;
import net.i2p.data.DataHelper;
import net.i2p.data.Hash;
import net.i2p.data.RouterInfo;
import net.i2p.router.RouterContext;
import net.i2p.router.peermanager.PeerProfile;
import net.i2p.stat.Rate;
import net.i2p.stat.RateStat;
import net.i2p.util.Log;
class FloodfillPeerSelector extends PeerSelector {
public FloodfillPeerSelector(RouterContext ctx) { super(ctx); }
/**
* Pick out peers with the floodfill capacity set, returning them first, but then
* after they're complete, sort via kademlia.
*
* @return List of Hash for the peers selected
*/
public List selectNearestExplicitThin(Hash key, int maxNumRouters, Set peersToIgnore, KBucketSet kbuckets) {
if (peersToIgnore == null)
peersToIgnore = new HashSet(1);
peersToIgnore.add(_context.router().getRouterInfo().getIdentity().getHash());
FloodfillSelectionCollector matches = new FloodfillSelectionCollector(key, peersToIgnore, maxNumRouters);
kbuckets.getAll(matches);
List rv = matches.get(maxNumRouters);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Searching for " + maxNumRouters + " peers close to " + key + ": "
+ rv + " (not including " + peersToIgnore + ") [allHashes.size = "
+ matches.size() + "]");
return rv;
}
public List selectFloodfillParticipants(KBucketSet kbuckets) {
FloodfillSelectionCollector matches = new FloodfillSelectionCollector(null, null, 0);
kbuckets.getAll(matches);
return matches.getFloodfillParticipants();
}
private class FloodfillSelectionCollector implements SelectionCollector {
private TreeMap _sorted;
private List _floodfillMatches;
private Hash _key;
private Set _toIgnore;
private int _matches;
private int _wanted;
public FloodfillSelectionCollector(Hash key, Set toIgnore, int wanted) {
_key = key;
_sorted = new TreeMap();
_floodfillMatches = new ArrayList(1);
_toIgnore = toIgnore;
_matches = 0;
_wanted = wanted;
}
public List getFloodfillParticipants() { return _floodfillMatches; }
public void add(Hash entry) {
if (_context.profileOrganizer().isFailing(entry))
return;
if ( (_toIgnore != null) && (_toIgnore.contains(entry)) )
return;
if (entry.equals(_context.routerHash()))
return;
RouterInfo info = _context.netDb().lookupRouterInfoLocally(entry);
if (info == null)
return;
if (FloodfillNetworkDatabaseFacade.isFloodfill(info)) {
_floodfillMatches.add(entry);
} else {
if ( (_wanted > _matches) && (_key != null) ) {
BigInteger diff = getDistance(_key, entry);
_sorted.put(diff, entry);
}
}
_matches++;
}
/** get the first $howMany entries matching */
public List get(int howMany) {
Collections.shuffle(_floodfillMatches, _context.random());
List rv = new ArrayList(howMany);
for (int i = 0; i < howMany && i < _floodfillMatches.size(); i++) {
rv.add(_floodfillMatches.get(i));
}
for (int i = rv.size(); i < howMany; i++) {
if (_sorted.size() <= 0)
break;
rv.add(_sorted.remove(_sorted.firstKey()));
}
return rv;
}
public int size() { return _matches; }
}
}

View File

@ -0,0 +1,61 @@
package net.i2p.router.networkdb.kademlia;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import net.i2p.data.DataStructure;
import net.i2p.data.Hash;
import net.i2p.data.LeaseSet;
import net.i2p.data.RouterInfo;
import net.i2p.data.TunnelId;
import net.i2p.data.i2np.DatabaseStoreMessage;
import net.i2p.data.i2np.I2NPMessage;
import net.i2p.router.Job;
import net.i2p.router.JobImpl;
import net.i2p.router.ReplyJob;
import net.i2p.router.RouterContext;
import net.i2p.router.TunnelInfo;
import net.i2p.router.peermanager.PeerProfile;
import net.i2p.stat.Rate;
import net.i2p.stat.RateStat;
import net.i2p.util.Log;
class FloodfillStoreJob extends StoreJob {
private FloodfillNetworkDatabaseFacade _facade;
/**
* Create a new search for the routingKey specified
*
*/
public FloodfillStoreJob(RouterContext context, FloodfillNetworkDatabaseFacade facade, Hash key, DataStructure data, Job onSuccess, Job onFailure, long timeoutMs) {
this(context, facade, key, data, onSuccess, onFailure, timeoutMs, null);
}
/**
* @param toSkip set of peer hashes of people we dont want to send the data to (e.g. we
* already know they have it). This can be null.
*/
public FloodfillStoreJob(RouterContext context, FloodfillNetworkDatabaseFacade facade, Hash key, DataStructure data, Job onSuccess, Job onFailure, long timeoutMs, Set toSkip) {
super(context, facade, key, data, onSuccess, onFailure, timeoutMs, toSkip);
_facade = facade;
}
protected int getParallelization() { return 1; }
protected int getRedundancy() { return 1; }
/**
* Send was totally successful
*/
protected void succeed() {
super.succeed();
getContext().jobQueue().addJob(new FloodfillVerifyStoreJob(getContext(), _state.getTarget(), _facade));
}
}

View File

@ -0,0 +1,146 @@
package net.i2p.router.networkdb.kademlia;
import java.util.Collections;
import java.util.List;
import net.i2p.data.*;
import net.i2p.data.i2np.*;
import net.i2p.router.*;
import net.i2p.util.Log;
/**
* send a netDb lookup to a random floodfill peer - if it is found, great,
* but if they reply back saying they dont know it, queue up a store of the
* key to a random floodfill peer again (via FloodfillStoreJob)
*
*/
public class FloodfillVerifyStoreJob extends JobImpl {
private Log _log;
private Hash _key;
private FloodfillNetworkDatabaseFacade _facade;
private long _expiration;
private long _sendTime;
private static final int VERIFY_TIMEOUT = 10*1000;
public FloodfillVerifyStoreJob(RouterContext ctx, Hash key, FloodfillNetworkDatabaseFacade facade) {
super(ctx);
_key = key;
_log = ctx.logManager().getLog(getClass());
_facade = facade;
// wait 10 seconds before trying to verify the store
getTiming().setStartAfter(ctx.clock().now() + VERIFY_TIMEOUT);
getContext().statManager().createRateStat("netDb.floodfillVerifyOK", "How long a floodfill verify takes when it succeeds", "NetworkDatabase", new long[] { 60*1000, 10*60*1000, 60*60*1000 });
getContext().statManager().createRateStat("netDb.floodfillVerifyFail", "How long a floodfill verify takes when it fails", "NetworkDatabase", new long[] { 60*1000, 10*60*1000, 60*60*1000 });
getContext().statManager().createRateStat("netDb.floodfillVerifyTimeout", "How long a floodfill verify takes when it times out", "NetworkDatabase", new long[] { 60*1000, 10*60*1000, 60*60*1000 });
}
public String getName() { return "Verify netdb store"; }
public void runJob() {
Hash target = pickTarget();
if (target == null) return;
DatabaseLookupMessage lookup = buildLookup();
if (lookup == null) return;
TunnelInfo outTunnel = getContext().tunnelManager().selectOutboundTunnel();
if (outTunnel == null) {
if (_log.shouldLog(Log.WARN))
_log.warn("No outbound tunnels to verify a store");
return;
}
_sendTime = getContext().clock().now();
_expiration = _sendTime + VERIFY_TIMEOUT;
getContext().messageRegistry().registerPending(new VerifyReplySelector(), new VerifyReplyJob(getContext()), new VerifyTimeoutJob(getContext()), VERIFY_TIMEOUT);
getContext().tunnelDispatcher().dispatchOutbound(lookup, outTunnel.getSendTunnelId(0), target);
}
private Hash pickTarget() {
FloodfillPeerSelector sel = (FloodfillPeerSelector)_facade.getPeerSelector();
List peers = sel.selectFloodfillParticipants(_facade.getKBuckets());
Collections.shuffle(peers, getContext().random());
if (peers.size() > 0)
return (Hash)peers.get(0);
if (_log.shouldLog(Log.WARN))
_log.warn("No peers to verify floodfill with");
return null;
}
private DatabaseLookupMessage buildLookup() {
TunnelInfo replyTunnelInfo = getContext().tunnelManager().selectInboundTunnel();
if (replyTunnelInfo == null) {
if (_log.shouldLog(Log.WARN))
_log.warn("No inbound tunnels to get a reply from!");
return null;
}
DatabaseLookupMessage m = new DatabaseLookupMessage(getContext(), true);
m.setMessageExpiration(getContext().clock().now() + VERIFY_TIMEOUT);
m.setReplyTunnel(replyTunnelInfo.getReceiveTunnelId(0));
m.setFrom(replyTunnelInfo.getPeer(0));
m.setSearchKey(_key);
return m;
}
private class VerifyReplySelector implements MessageSelector {
public boolean continueMatching() {
return false; // only want one match
}
public long getExpiration() { return _expiration; }
public boolean isMatch(I2NPMessage message) {
if (message instanceof DatabaseStoreMessage) {
DatabaseStoreMessage dsm = (DatabaseStoreMessage)message;
if (_key.equals(dsm.getKey()))
return true;
else
return false;
} else if (message instanceof DatabaseSearchReplyMessage) {
DatabaseSearchReplyMessage dsrm = (DatabaseSearchReplyMessage)message;
if (_key.equals(dsrm.getSearchKey()))
return true;
else
return false;
}
return false;
}
}
private class VerifyReplyJob extends JobImpl implements ReplyJob {
private I2NPMessage _message;
public VerifyReplyJob(RouterContext ctx) {
super(ctx);
}
public String getName() { return "Handle floodfill verification reply"; }
public void runJob() {
if (_message instanceof DatabaseStoreMessage) {
// store ok, w00t!
getContext().statManager().addRateData("netDb.floodfillVerifyOK", getContext().clock().now() - _sendTime, 0);
} else {
// store failed, boo, hiss!
getContext().statManager().addRateData("netDb.floodfillVerifyFail", getContext().clock().now() - _sendTime, 0);
resend();
}
}
public void setMessage(I2NPMessage message) { _message = message; }
}
/** the netDb store failed to verify, so resend it to a random floodfill peer */
private void resend() {
DataStructure ds = null;
ds = _facade.lookupLeaseSetLocally(_key);
if (ds == null)
ds = _facade.lookupRouterInfoLocally(_key);
_facade.sendStore(_key, ds, null, null, VERIFY_TIMEOUT, null);
}
private class VerifyTimeoutJob extends JobImpl {
public VerifyTimeoutJob(RouterContext ctx) {
super(ctx);
}
public String getName() { return "Floodfill verification timeout"; }
public void runJob() {
getContext().statManager().addRateData("netDb.floodfillVerifyTimeout", getContext().clock().now() - _sendTime, 0);
resend();
}
}
}

View File

@ -0,0 +1,47 @@
package net.i2p.router.networkdb.kademlia;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
import java.util.Iterator;
import java.util.Set;
import net.i2p.data.DataStructure;
import net.i2p.data.Hash;
import net.i2p.data.LeaseSet;
import net.i2p.data.RouterIdentity;
import net.i2p.data.RouterInfo;
import net.i2p.data.TunnelId;
import net.i2p.data.i2np.DatabaseLookupMessage;
import net.i2p.data.i2np.DatabaseSearchReplyMessage;
import net.i2p.data.i2np.DatabaseStoreMessage;
import net.i2p.data.i2np.I2NPMessage;
import net.i2p.data.i2np.TunnelGatewayMessage;
import net.i2p.router.Job;
import net.i2p.router.JobImpl;
import net.i2p.router.RouterContext;
import net.i2p.router.TunnelInfo;
import net.i2p.router.message.SendMessageDirectJob;
import net.i2p.router.networkdb.HandleDatabaseLookupMessageJob;
import net.i2p.util.Log;
/**
* Handle a lookup for a key received from a remote peer. Needs to be implemented
* to send back replies, etc
*
*/
public class HandleFloodfillDatabaseLookupMessageJob extends HandleDatabaseLookupMessageJob {
public HandleFloodfillDatabaseLookupMessageJob(RouterContext ctx, DatabaseLookupMessage receivedMessage, RouterIdentity from, Hash fromHash) {
super(ctx, receivedMessage, from, fromHash);
}
protected boolean answerAllQueries() {
if (!FloodfillNetworkDatabaseFacade.floodfillEnabled(getContext())) return false;
return FloodfillNetworkDatabaseFacade.isFloodfill(getContext().router().getRouterInfo());
}
}

View File

@ -0,0 +1,131 @@
package net.i2p.router.networkdb.kademlia;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
import java.util.*;
import net.i2p.data.*;
import net.i2p.data.i2np.*;
import net.i2p.router.*;
import net.i2p.util.Log;
/**
* Receive DatabaseStoreMessage data and store it in the local net db
*
*/
public class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
private Log _log;
private DatabaseStoreMessage _message;
private RouterIdentity _from;
private Hash _fromHash;
private FloodfillNetworkDatabaseFacade _facade;
private static final int ACK_TIMEOUT = 15*1000;
private static final int ACK_PRIORITY = 100;
public HandleFloodfillDatabaseStoreMessageJob(RouterContext ctx, DatabaseStoreMessage receivedMessage, RouterIdentity from, Hash fromHash, FloodfillNetworkDatabaseFacade facade) {
super(ctx);
_log = ctx.logManager().getLog(getClass());
ctx.statManager().createRateStat("netDb.storeHandled", "How many netDb store messages have we handled?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("netDb.storeLeaseSetHandled", "How many leaseSet store messages have we handled?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("netDb.storeRouterInfoHandled", "How many routerInfo store messages have we handled?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
_message = receivedMessage;
_from = from;
_fromHash = fromHash;
_facade = facade;
}
public void runJob() {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Handling database store message");
String invalidMessage = null;
boolean wasNew = false;
if (_message.getValueType() == DatabaseStoreMessage.KEY_TYPE_LEASESET) {
getContext().statManager().addRateData("netDb.storeLeaseSetHandled", 1, 0);
try {
LeaseSet ls = _message.getLeaseSet();
// mark it as something we received, so we'll answer queries
// for it. this flag does NOT get set on entries that we
// receive in response to our own lookups.
ls.setReceivedAsPublished(true);
LeaseSet match = getContext().netDb().store(_message.getKey(), _message.getLeaseSet());
if (match == null) {
wasNew = true;
} else {
wasNew = false;
match.setReceivedAsPublished(true);
}
} catch (IllegalArgumentException iae) {
invalidMessage = iae.getMessage();
}
} else if (_message.getValueType() == DatabaseStoreMessage.KEY_TYPE_ROUTERINFO) {
getContext().statManager().addRateData("netDb.storeRouterInfoHandled", 1, 0);
if (_log.shouldLog(Log.INFO))
_log.info("Handling dbStore of router " + _message.getKey() + " with publishDate of "
+ new Date(_message.getRouterInfo().getPublished()));
try {
Object match = getContext().netDb().store(_message.getKey(), _message.getRouterInfo());
wasNew = (null == match);
getContext().profileManager().heardAbout(_message.getKey());
} catch (IllegalArgumentException iae) {
invalidMessage = iae.getMessage();
}
} else {
if (_log.shouldLog(Log.ERROR))
_log.error("Invalid DatabaseStoreMessage data type - " + _message.getValueType()
+ ": " + _message);
}
if (_message.getReplyToken() > 0)
sendAck();
if (_from != null)
_fromHash = _from.getHash();
if (_fromHash != null) {
if (invalidMessage == null) {
getContext().profileManager().dbStoreReceived(_fromHash, wasNew);
getContext().statManager().addRateData("netDb.storeHandled", 1, 0);
if (FloodfillNetworkDatabaseFacade.floodfillEnabled(getContext()) && (_message.getReplyToken() > 0) ) {
if (_message.getValueType() == DatabaseStoreMessage.KEY_TYPE_LEASESET)
_facade.flood(_message.getLeaseSet());
else
_facade.flood(_message.getRouterInfo());
}
} else {
if (_log.shouldLog(Log.WARN))
_log.warn("Peer " + _fromHash.toBase64() + " sent bad data: " + invalidMessage);
}
}
}
private void sendAck() {
DeliveryStatusMessage msg = new DeliveryStatusMessage(getContext());
msg.setMessageId(_message.getReplyToken());
msg.setArrival(getContext().clock().now());
TunnelInfo outTunnel = selectOutboundTunnel();
if (outTunnel == null) {
if (_log.shouldLog(Log.WARN))
_log.warn("No outbound tunnel could be found");
return;
} else {
getContext().tunnelDispatcher().dispatchOutbound(msg, outTunnel.getSendTunnelId(0), _message.getReplyTunnel(), _message.getReplyGateway());
}
}
private TunnelInfo selectOutboundTunnel() {
return getContext().tunnelManager().selectOutboundTunnel();
}
public String getName() { return "Handle Database Store Message"; }
public void dropped() {
getContext().messageHistory().messageProcessingError(_message.getUniqueId(), _message.getClass().getName(), "Dropped due to overload");
}
}

View File

@ -46,7 +46,7 @@ import net.i2p.util.Log;
* *
*/ */
public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade { public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
private Log _log; protected Log _log;
private KBucketSet _kb; // peer hashes sorted into kbuckets, but within kbuckets, unsorted private KBucketSet _kb; // peer hashes sorted into kbuckets, but within kbuckets, unsorted
private DataStore _ds; // hash to DataStructure mapping, persisted when necessary private DataStore _ds; // hash to DataStructure mapping, persisted when necessary
/** where the data store is pushing the data */ /** where the data store is pushing the data */
@ -62,8 +62,8 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
private HarvesterJob _harvestJob; private HarvesterJob _harvestJob;
/** when was the last time an exploration found something new? */ /** when was the last time an exploration found something new? */
private long _lastExploreNew; private long _lastExploreNew;
private PeerSelector _peerSelector; protected PeerSelector _peerSelector;
private RouterContext _context; protected RouterContext _context;
/** /**
* Map of Hash to RepublishLeaseSetJob for leases we'realready managing. * Map of Hash to RepublishLeaseSetJob for leases we'realready managing.
* This is added to when we create a new RepublishLeaseSetJob, and the values are * This is added to when we create a new RepublishLeaseSetJob, and the values are
@ -93,8 +93,6 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
} }
} }
/** /**
* for the 10 minutes after startup, don't fail db entries so that if we were * for the 10 minutes after startup, don't fail db entries so that if we were
* offline for a while, we'll have a chance of finding some live peers with the * offline for a while, we'll have a chance of finding some live peers with the
@ -123,15 +121,18 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
public KademliaNetworkDatabaseFacade(RouterContext context) { public KademliaNetworkDatabaseFacade(RouterContext context) {
_context = context; _context = context;
_log = _context.logManager().getLog(KademliaNetworkDatabaseFacade.class); _log = _context.logManager().getLog(getClass());
_initialized = false; _initialized = false;
_peerSelector = new PeerSelector(_context); _peerSelector = createPeerSelector();
_publishingLeaseSets = new HashMap(8); _publishingLeaseSets = new HashMap(8);
_lastExploreNew = 0; _lastExploreNew = 0;
_activeRequests = new HashMap(8); _activeRequests = new HashMap(8);
_enforceNetId = DEFAULT_ENFORCE_NETID; _enforceNetId = DEFAULT_ENFORCE_NETID;
} }
protected PeerSelector createPeerSelector() { return new PeerSelector(_context); }
public PeerSelector getPeerSelector() { return _peerSelector; }
KBucketSet getKBuckets() { return _kb; } KBucketSet getKBuckets() { return _kb; }
DataStore getDataStore() { return _ds; } DataStore getDataStore() { return _ds; }
@ -266,8 +267,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
_lastSent = new HashMap(1024); _lastSent = new HashMap(1024);
_dbDir = dbDir; _dbDir = dbDir;
_context.inNetMessagePool().registerHandlerJobBuilder(DatabaseLookupMessage.MESSAGE_TYPE, new DatabaseLookupMessageHandler(_context)); createHandlers();
_context.inNetMessagePool().registerHandlerJobBuilder(DatabaseStoreMessage.MESSAGE_TYPE, new DatabaseStoreMessageHandler(_context));
_initialized = true; _initialized = true;
_started = System.currentTimeMillis(); _started = System.currentTimeMillis();
@ -307,6 +307,11 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
} }
} }
protected void createHandlers() {
_context.inNetMessagePool().registerHandlerJobBuilder(DatabaseLookupMessage.MESSAGE_TYPE, new DatabaseLookupMessageHandler(_context));
_context.inNetMessagePool().registerHandlerJobBuilder(DatabaseStoreMessage.MESSAGE_TYPE, new DatabaseStoreMessageHandler(_context));
}
/** /**
* Get the routers closest to that key in response to a remote lookup * Get the routers closest to that key in response to a remote lookup
*/ */
@ -359,6 +364,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
} }
public int getKnownRouters() { public int getKnownRouters() {
if (_kb == null) return 0;
CountRouters count = new CountRouters(); CountRouters count = new CountRouters();
_kb.getAll(count); _kb.getAll(count);
return count.size(); return count.size();
@ -368,12 +374,31 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
private int _count; private int _count;
public int size() { return _count; } public int size() { return _count; }
public void add(Hash entry) { public void add(Hash entry) {
if (_ds == null) return;
Object o = _ds.get(entry); Object o = _ds.get(entry);
if (o instanceof RouterInfo) if (o instanceof RouterInfo)
_count++; _count++;
} }
} }
public int getKnownLeaseSets() {
if (_kb == null) return 0;
CountLeaseSets count = new CountLeaseSets();
_kb.getAll(count);
return count.size();
}
private class CountLeaseSets implements SelectionCollector {
private int _count;
public int size() { return _count; }
public void add(Hash entry) {
if (_ds == null) return;
Object o = _ds.get(entry);
if (o instanceof LeaseSet)
_count++;
}
}
public void lookupLeaseSet(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs) { public void lookupLeaseSet(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs) {
if (!_initialized) return; if (!_initialized) return;
LeaseSet ls = lookupLeaseSetLocally(key); LeaseSet ls = lookupLeaseSetLocally(key);
@ -820,6 +845,10 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
return 4 * (int)responseTime; // give it up to 4x the average response time return 4 * (int)responseTime; // give it up to 4x the average response time
} }
public void sendStore(Hash key, DataStructure ds, Job onSuccess, Job onFailure, long sendTimeout, Set toIgnore) {
_context.jobQueue().addJob(new StoreJob(_context, this, key, ds, onSuccess, onFailure, sendTimeout, toIgnore));
}
public void renderStatusHTML(Writer out) throws IOException { public void renderStatusHTML(Writer out) throws IOException {
StringBuffer buf = new StringBuffer(10*1024); StringBuffer buf = new StringBuffer(10*1024);
buf.append("<h2>Kademlia Network DB Contents</h2>\n"); buf.append("<h2>Kademlia Network DB Contents</h2>\n");

View File

@ -27,12 +27,12 @@ import net.i2p.stat.RateStat;
import net.i2p.util.Log; import net.i2p.util.Log;
class PeerSelector { class PeerSelector {
private Log _log; protected Log _log;
private RouterContext _context; protected RouterContext _context;
public PeerSelector(RouterContext ctx) { public PeerSelector(RouterContext ctx) {
_context = ctx; _context = ctx;
_log = _context.logManager().getLog(PeerSelector.class); _log = _context.logManager().getLog(getClass());
} }
/** /**

View File

@ -22,7 +22,7 @@ import net.i2p.util.Log;
*/ */
public class RepublishLeaseSetJob extends JobImpl { public class RepublishLeaseSetJob extends JobImpl {
private Log _log; private Log _log;
private final static long REPUBLISH_LEASESET_DELAY = 3*60*1000; // 3 mins private final static long REPUBLISH_LEASESET_DELAY = 5*60*1000;
private final static long REPUBLISH_LEASESET_TIMEOUT = 60*1000; private final static long REPUBLISH_LEASESET_TIMEOUT = 60*1000;
private Hash _dest; private Hash _dest;
private KademliaNetworkDatabaseFacade _facade; private KademliaNetworkDatabaseFacade _facade;
@ -48,14 +48,17 @@ public class RepublishLeaseSetJob extends JobImpl {
_log.warn("Not publishing a LOCAL lease that isn't current - " + _dest, new Exception("Publish expired LOCAL lease?")); _log.warn("Not publishing a LOCAL lease that isn't current - " + _dest, new Exception("Publish expired LOCAL lease?"));
} else { } else {
getContext().statManager().addRateData("netDb.republishLeaseSetCount", 1, 0); getContext().statManager().addRateData("netDb.republishLeaseSetCount", 1, 0);
getContext().jobQueue().addJob(new StoreJob(getContext(), _facade, _dest, ls, new OnSuccess(getContext()), new OnFailure(getContext()), REPUBLISH_LEASESET_TIMEOUT)); _facade.sendStore(_dest, ls, new OnSuccess(getContext()), new OnFailure(getContext()), REPUBLISH_LEASESET_TIMEOUT, null);
//getContext().jobQueue().addJob(new StoreJob(getContext(), _facade, _dest, ls, new OnSuccess(getContext()), new OnFailure(getContext()), REPUBLISH_LEASESET_TIMEOUT));
} }
} else { } else {
if (_log.shouldLog(Log.WARN)) if (_log.shouldLog(Log.WARN))
_log.warn("Client " + _dest + " is local, but we can't find a valid LeaseSet? perhaps its being rebuilt?"); _log.warn("Client " + _dest + " is local, but we can't find a valid LeaseSet? perhaps its being rebuilt?");
} }
long republishDelay = getContext().random().nextLong(2*REPUBLISH_LEASESET_DELAY); if (false) { // floodfill doesnt require republishing
requeue(republishDelay); long republishDelay = getContext().random().nextLong(2*REPUBLISH_LEASESET_DELAY);
requeue(republishDelay);
}
return; return;
} else { } else {
if (_log.shouldLog(Log.INFO)) if (_log.shouldLog(Log.INFO))

View File

@ -61,7 +61,7 @@ class SearchJob extends JobImpl {
* How long will we give each peer to reply to our search? * How long will we give each peer to reply to our search?
* *
*/ */
private static final int PER_PEER_TIMEOUT = 2*1000; private static final int PER_PEER_TIMEOUT = 5*1000;
/** /**
* give ourselves 30 seconds to send out the value found to the closest * give ourselves 30 seconds to send out the value found to the closest
@ -96,7 +96,7 @@ class SearchJob extends JobImpl {
_isLease = isLease; _isLease = isLease;
_deferredSearches = new ArrayList(0); _deferredSearches = new ArrayList(0);
_deferredCleared = false; _deferredCleared = false;
_peerSelector = new PeerSelector(getContext()); _peerSelector = facade.getPeerSelector();
_startedOn = -1; _startedOn = -1;
_expiration = getContext().clock().now() + timeoutMs; _expiration = getContext().clock().now() + timeoutMs;
getContext().statManager().createRateStat("netDb.successTime", "How long a successful search takes", "NetworkDatabase", new long[] { 60*60*1000l, 24*60*60*1000l }); getContext().statManager().createRateStat("netDb.successTime", "How long a successful search takes", "NetworkDatabase", new long[] { 60*60*1000l, 24*60*60*1000l });
@ -133,6 +133,8 @@ class SearchJob extends JobImpl {
* *
*/ */
protected int getPerPeerTimeoutMs() { protected int getPerPeerTimeoutMs() {
if (true)
return PER_PEER_TIMEOUT;
int rv = -1; int rv = -1;
RateStat rs = getContext().statManager().getRate("netDb.successTime"); RateStat rs = getContext().statManager().getRate("netDb.successTime");
if (rs != null) if (rs != null)
@ -576,6 +578,7 @@ class SearchJob extends JobImpl {
protected class FailedJob extends JobImpl { protected class FailedJob extends JobImpl {
private Hash _peer; private Hash _peer;
private boolean _penalizePeer; private boolean _penalizePeer;
private long _sentOn;
public FailedJob(RouterContext enclosingContext, RouterInfo peer) { public FailedJob(RouterContext enclosingContext, RouterInfo peer) {
this(enclosingContext, peer, true); this(enclosingContext, peer, true);
} }
@ -588,12 +591,14 @@ class SearchJob extends JobImpl {
super(enclosingContext); super(enclosingContext);
_penalizePeer = penalizePeer; _penalizePeer = penalizePeer;
_peer = peer.getIdentity().getHash(); _peer = peer.getIdentity().getHash();
_sentOn = enclosingContext.clock().now();
} }
public void runJob() { public void runJob() {
if (_state.completed()) return;
_state.replyTimeout(_peer); _state.replyTimeout(_peer);
if (_penalizePeer) { if (_penalizePeer) {
if (_log.shouldLog(Log.WARN)) if (_log.shouldLog(Log.WARN))
_log.warn("Penalizing peer for timeout on search: " + _peer.toBase64()); _log.warn("Penalizing peer for timeout on search: " + _peer.toBase64() + " after " + (getContext().clock().now() - _sentOn));
getContext().profileManager().dbLookupFailed(_peer); getContext().profileManager().dbLookupFailed(_peer);
} else { } else {
if (_log.shouldLog(Log.ERROR)) if (_log.shouldLog(Log.ERROR))
@ -657,9 +662,7 @@ class SearchJob extends JobImpl {
if (SHOULD_RESEND_ROUTERINFO) { if (SHOULD_RESEND_ROUTERINFO) {
ds = _facade.lookupRouterInfoLocally(_state.getTarget()); ds = _facade.lookupRouterInfoLocally(_state.getTarget());
if (ds != null) if (ds != null)
getContext().jobQueue().addJob(new StoreJob(getContext(), _facade, _state.getTarget(), _facade.sendStore(_state.getTarget(), ds, null, null, RESEND_TIMEOUT, _state.getSuccessful());
ds, null, null, RESEND_TIMEOUT,
_state.getSuccessful()));
} }
} else { } else {
Set sendTo = _state.getRepliedPeers(); // _state.getFailed(); Set sendTo = _state.getRepliedPeers(); // _state.getFailed();

View File

@ -32,7 +32,7 @@ import net.i2p.util.Log;
class StoreJob extends JobImpl { class StoreJob extends JobImpl {
private Log _log; private Log _log;
private KademliaNetworkDatabaseFacade _facade; private KademliaNetworkDatabaseFacade _facade;
private StoreState _state; protected StoreState _state;
private Job _onSuccess; private Job _onSuccess;
private Job _onFailure; private Job _onFailure;
private long _timeoutMs; private long _timeoutMs;
@ -82,7 +82,7 @@ class StoreJob extends JobImpl {
_onFailure = onFailure; _onFailure = onFailure;
_timeoutMs = timeoutMs; _timeoutMs = timeoutMs;
_expiration = context.clock().now() + timeoutMs; _expiration = context.clock().now() + timeoutMs;
_peerSelector = new PeerSelector(context); _peerSelector = facade.getPeerSelector();
} }
public String getName() { return "Kademlia NetDb Store";} public String getName() { return "Kademlia NetDb Store";}
@ -113,6 +113,9 @@ class StoreJob extends JobImpl {
} }
} }
protected int getParallelization() { return PARALLELIZATION; }
protected int getRedundancy() { return REDUNDANCY; }
/** /**
* Send a series of searches to the next available peers as selected by * Send a series of searches to the next available peers as selected by
* the routing table, but making sure no more than PARALLELIZATION are outstanding * the routing table, but making sure no more than PARALLELIZATION are outstanding
@ -121,15 +124,15 @@ class StoreJob extends JobImpl {
*/ */
private void continueSending() { private void continueSending() {
if (_state.completed()) return; if (_state.completed()) return;
int toCheck = PARALLELIZATION - _state.getPending().size(); int toCheck = getParallelization() - _state.getPending().size();
if (toCheck <= 0) { if (toCheck <= 0) {
// too many already pending // too many already pending
if (_log.shouldLog(Log.DEBUG)) if (_log.shouldLog(Log.DEBUG))
_log.debug(getJobId() + ": Too many store messages pending"); _log.debug(getJobId() + ": Too many store messages pending");
return; return;
} }
if (toCheck > PARALLELIZATION) if (toCheck > getParallelization())
toCheck = PARALLELIZATION; toCheck = getParallelization();
List closestHashes = getClosestRouters(_state.getTarget(), toCheck, _state.getAttempted()); List closestHashes = getClosestRouters(_state.getTarget(), toCheck, _state.getAttempted());
if ( (closestHashes == null) || (closestHashes.size() <= 0) ) { if ( (closestHashes == null) || (closestHashes.size() <= 0) ) {
@ -310,7 +313,7 @@ class StoreJob extends JobImpl {
getContext().profileManager().dbStoreSent(_peer.getIdentity().getHash(), howLong); getContext().profileManager().dbStoreSent(_peer.getIdentity().getHash(), howLong);
getContext().statManager().addRateData("netDb.ackTime", howLong, howLong); getContext().statManager().addRateData("netDb.ackTime", howLong, howLong);
if (_state.getCompleteCount() >= REDUNDANCY) { if (_state.getCompleteCount() >= getRedundancy()) {
succeed(); succeed();
} else { } else {
sendNext(); sendNext();
@ -352,7 +355,7 @@ class StoreJob extends JobImpl {
/** /**
* Send was totally successful * Send was totally successful
*/ */
private void succeed() { protected void succeed() {
if (_log.shouldLog(Log.INFO)) if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": Succeeded sending key " + _state.getTarget()); _log.info(getJobId() + ": Succeeded sending key " + _state.getTarget());
if (_log.shouldLog(Log.DEBUG)) if (_log.shouldLog(Log.DEBUG))
@ -367,7 +370,7 @@ class StoreJob extends JobImpl {
/** /**
* Send totally failed * Send totally failed
*/ */
private void fail() { protected void fail() {
if (_log.shouldLog(Log.WARN)) if (_log.shouldLog(Log.WARN))
_log.warn(getJobId() + ": Failed sending key " + _state.getTarget()); _log.warn(getJobId() + ": Failed sending key " + _state.getTarget());
if (_log.shouldLog(Log.DEBUG)) if (_log.shouldLog(Log.DEBUG))

View File

@ -25,6 +25,7 @@ import net.i2p.router.Job;
import net.i2p.router.JobImpl; import net.i2p.router.JobImpl;
import net.i2p.router.Router; import net.i2p.router.Router;
import net.i2p.router.RouterContext; import net.i2p.router.RouterContext;
import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseFacade;
import net.i2p.util.Log; import net.i2p.util.Log;
public class CreateRouterInfoJob extends JobImpl { public class CreateRouterInfoJob extends JobImpl {
@ -53,6 +54,8 @@ public class CreateRouterInfoJob extends JobImpl {
info.setAddresses(getContext().commSystem().createAddresses()); info.setAddresses(getContext().commSystem().createAddresses());
Properties stats = getContext().statPublisher().publishStatistics(); Properties stats = getContext().statPublisher().publishStatistics();
stats.setProperty(RouterInfo.PROP_NETWORK_ID, Router.NETWORK_ID+""); stats.setProperty(RouterInfo.PROP_NETWORK_ID, Router.NETWORK_ID+"");
if (FloodfillNetworkDatabaseFacade.floodfillEnabled(getContext()))
info.addCapability(FloodfillNetworkDatabaseFacade.CAPACITY_FLOODFILL);
info.setOptions(stats); info.setOptions(stats);
info.setPeers(new HashSet()); info.setPeers(new HashSet());
info.setPublished(getCurrentPublishDate(getContext())); info.setPublished(getCurrentPublishDate(getContext()));

View File

@ -25,6 +25,7 @@ import net.i2p.data.SigningPublicKey;
import net.i2p.router.JobImpl; import net.i2p.router.JobImpl;
import net.i2p.router.Router; import net.i2p.router.Router;
import net.i2p.router.RouterContext; import net.i2p.router.RouterContext;
import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseFacade;
import net.i2p.util.Log; import net.i2p.util.Log;
/** /**
@ -126,6 +127,8 @@ public class RebuildRouterInfoJob extends JobImpl {
Properties stats = getContext().statPublisher().publishStatistics(); Properties stats = getContext().statPublisher().publishStatistics();
stats.setProperty(RouterInfo.PROP_NETWORK_ID, ""+Router.NETWORK_ID); stats.setProperty(RouterInfo.PROP_NETWORK_ID, ""+Router.NETWORK_ID);
info.setOptions(stats); info.setOptions(stats);
if (FloodfillNetworkDatabaseFacade.floodfillEnabled(getContext()))
info.addCapability(FloodfillNetworkDatabaseFacade.CAPACITY_FLOODFILL);
// info.setPeers(new HashSet()); // this would have the trusted peers // info.setPeers(new HashSet()); // this would have the trusted peers
info.setPublished(CreateRouterInfoJob.getCurrentPublishDate(getContext())); info.setPublished(CreateRouterInfoJob.getCurrentPublishDate(getContext()));

View File

@ -4,6 +4,7 @@ import net.i2p.data.Hash;
import net.i2p.data.TunnelId; import net.i2p.data.TunnelId;
import net.i2p.data.Payload; import net.i2p.data.Payload;
import net.i2p.data.i2np.DataMessage; import net.i2p.data.i2np.DataMessage;
import net.i2p.data.i2np.DatabaseStoreMessage;
import net.i2p.data.i2np.DeliveryInstructions; import net.i2p.data.i2np.DeliveryInstructions;
import net.i2p.data.i2np.I2NPMessage; import net.i2p.data.i2np.I2NPMessage;
import net.i2p.data.i2np.GarlicMessage; import net.i2p.data.i2np.GarlicMessage;
@ -109,7 +110,17 @@ public class InboundMessageDistributor implements GarlicMessageReceiver.CloveRec
_receiver.receive((GarlicMessage)data); _receiver.receive((GarlicMessage)data);
return; return;
} else { } else {
_context.inNetMessagePool().add(data, null, null); if (data instanceof DatabaseStoreMessage) {
// treat db store explicitly, since we don't want to republish (or flood)
// unnecessarily
DatabaseStoreMessage dsm = (DatabaseStoreMessage)data;
if (dsm.getValueType() == DatabaseStoreMessage.KEY_TYPE_LEASESET)
_context.netDb().store(dsm.getKey(), dsm.getLeaseSet());
else
_context.netDb().store(dsm.getKey(), dsm.getRouterInfo());
} else {
_context.inNetMessagePool().add(data, null, null);
}
return; return;
} }
case DeliveryInstructions.DELIVERY_MODE_DESTINATION: case DeliveryInstructions.DELIVERY_MODE_DESTINATION:

View File

@ -88,9 +88,12 @@ public class TunnelCreatorConfig implements TunnelInfo {
// H0:1235-->H1:2345-->H2:2345 // H0:1235-->H1:2345-->H2:2345
StringBuffer buf = new StringBuffer(128); StringBuffer buf = new StringBuffer(128);
if (_isInbound) if (_isInbound)
buf.append("inbound: "); buf.append("inbound");
else else
buf.append("outbound: "); buf.append("outbound");
if (_destination == null)
buf.append(" exploratory");
buf.append(": ");
for (int i = 0; i < _peers.length; i++) { for (int i = 0; i < _peers.length; i++) {
buf.append(_peers[i].toBase64().substring(0,4)); buf.append(_peers[i].toBase64().substring(0,4));
buf.append(':'); buf.append(':');

View File

@ -6,6 +6,7 @@ import java.util.HashSet;
import java.util.List; import java.util.List;
import net.i2p.router.RouterContext; import net.i2p.router.RouterContext;
import net.i2p.router.TunnelPoolSettings; import net.i2p.router.TunnelPoolSettings;
import net.i2p.util.Log;
/** /**
* Pick peers randomly out of the not-failing pool, and put them into randomly * Pick peers randomly out of the not-failing pool, and put them into randomly
@ -14,15 +15,28 @@ import net.i2p.router.TunnelPoolSettings;
*/ */
class ExploratoryPeerSelector extends TunnelPeerSelector { class ExploratoryPeerSelector extends TunnelPeerSelector {
public List selectPeers(RouterContext ctx, TunnelPoolSettings settings) { public List selectPeers(RouterContext ctx, TunnelPoolSettings settings) {
Log l = ctx.logManager().getLog(getClass());
int length = getLength(ctx, settings); int length = getLength(ctx, settings);
if (length < 0) if (length < 0) {
if (l.shouldLog(Log.DEBUG))
l.debug("Length requested is zero: " + settings);
return null; return null;
}
if (shouldSelectExplicit(settings)) if (shouldSelectExplicit(settings)) {
return selectExplicit(ctx, settings, length); List rv = selectExplicit(ctx, settings, length);
if (l.shouldLog(Log.DEBUG))
l.debug("Explicit peers selected: " + rv);
return rv;
}
HashSet exclude = new HashSet(1);
exclude.add(ctx.routerHash());
HashSet matches = new HashSet(length); HashSet matches = new HashSet(length);
ctx.profileOrganizer().selectNotFailingPeers(length, null, matches, true); ctx.profileOrganizer().selectNotFailingPeers(length, exclude, matches, false);
if (l.shouldLog(Log.DEBUG))
l.debug("profileOrganizer.selectNotFailing(" + length + ") found " + matches);
matches.remove(ctx.routerHash()); matches.remove(ctx.routerHash());
ArrayList rv = new ArrayList(matches); ArrayList rv = new ArrayList(matches);