forked from I2P_Developers/i2p.i2p
2005-12-31 jrandom
* Include a simple torrent creator in the I2PSnark web UI * Further streaming lib closing improvements * Refactored the load test components to run off live tunnels (though, still not safe for normal/anonymous load testing)
This commit is contained in:
@ -264,7 +264,8 @@ public class SnarkManager implements Snark.CompleteListener {
|
|||||||
* Grab the torrent given the (canonical) filename
|
* Grab the torrent given the (canonical) filename
|
||||||
*/
|
*/
|
||||||
public Snark getTorrent(String filename) { synchronized (_snarks) { return (Snark)_snarks.get(filename); } }
|
public Snark getTorrent(String filename) { synchronized (_snarks) { return (Snark)_snarks.get(filename); } }
|
||||||
public void addTorrent(String filename) {
|
public void addTorrent(String filename) { addTorrent(filename, false); }
|
||||||
|
public void addTorrent(String filename, boolean dontAutoStart) {
|
||||||
if (!I2PSnarkUtil.instance().connected()) {
|
if (!I2PSnarkUtil.instance().connected()) {
|
||||||
addMessage("Connecting to I2P");
|
addMessage("Connecting to I2P");
|
||||||
boolean ok = I2PSnarkUtil.instance().connect();
|
boolean ok = I2PSnarkUtil.instance().connect();
|
||||||
@ -317,7 +318,7 @@ public class SnarkManager implements Snark.CompleteListener {
|
|||||||
}
|
}
|
||||||
// ok, snark created, now lets start it up or configure it further
|
// ok, snark created, now lets start it up or configure it further
|
||||||
File f = new File(filename);
|
File f = new File(filename);
|
||||||
if (shouldAutoStart()) {
|
if (!dontAutoStart && shouldAutoStart()) {
|
||||||
torrent.startTorrent();
|
torrent.startTorrent();
|
||||||
addMessage("Torrent added and started: '" + f.getName() + "'");
|
addMessage("Torrent added and started: '" + f.getName() + "'");
|
||||||
} else {
|
} else {
|
||||||
@ -460,6 +461,40 @@ public class SnarkManager implements Snark.CompleteListener {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private static final String DEFAULT_TRACKERS[] = {
|
||||||
|
"Postman's tracker", "http://YRgrgTLGnbTq2aZOZDJQ~o6Uk5k6TK-OZtx0St9pb0G-5EGYURZioxqYG8AQt~LgyyI~NCj6aYWpPO-150RcEvsfgXLR~CxkkZcVpgt6pns8SRc3Bi-QSAkXpJtloapRGcQfzTtwllokbdC-aMGpeDOjYLd8b5V9Im8wdCHYy7LRFxhEtGb~RL55DA8aYOgEXcTpr6RPPywbV~Qf3q5UK55el6Kex-6VCxreUnPEe4hmTAbqZNR7Fm0hpCiHKGoToRcygafpFqDw5frLXToYiqs9d4liyVB-BcOb0ihORbo0nS3CLmAwZGvdAP8BZ7cIYE3Z9IU9D1G8JCMxWarfKX1pix~6pIA-sp1gKlL1HhYhPMxwyxvuSqx34o3BqU7vdTYwWiLpGM~zU1~j9rHL7x60pVuYaXcFQDR4-QVy26b6Pt6BlAZoFmHhPcAuWfu-SFhjyZYsqzmEmHeYdAwa~HojSbofg0TMUgESRXMw6YThK1KXWeeJVeztGTz25sL8AAAA.i2p/announce.php"
|
||||||
|
, "Orion's tracker", "http://gKik1lMlRmuroXVGTZ~7v4Vez3L3ZSpddrGZBrxVriosCQf7iHu6CIk8t15BKsj~P0JJpxrofeuxtm7SCUAJEr0AIYSYw8XOmp35UfcRPQWyb1LsxUkMT4WqxAT3s1ClIICWlBu5An~q-Mm0VFlrYLIPBWlUFnfPR7jZ9uP5ZMSzTKSMYUWao3ejiykr~mtEmyls6g-ZbgKZawa9II4zjOy-hdxHgP-eXMDseFsrym4Gpxvy~3Fv9TuiSqhpgm~UeTo5YBfxn6~TahKtE~~sdCiSydqmKBhxAQ7uT9lda7xt96SS09OYMsIWxLeQUWhns-C~FjJPp1D~IuTrUpAFcVEGVL-BRMmdWbfOJEcWPZ~CBCQSO~VkuN1ebvIOr9JBerFMZSxZtFl8JwcrjCIBxeKPBmfh~xYh16BJm1BBBmN1fp2DKmZ2jBNkAmnUbjQOqWvUcehrykWk5lZbE7bjJMDFH48v3SXwRuDBiHZmSbsTY6zhGY~GkMQHNGxPMMSIAAAA.i2p/bt"
|
||||||
|
// , "The freak's tracker", "http://mHKva9x24E5Ygfey2llR1KyQHv5f8hhMpDMwJDg1U-hABpJ2NrQJd6azirdfaR0OKt4jDlmP2o4Qx0H598~AteyD~RJU~xcWYdcOE0dmJ2e9Y8-HY51ie0B1yD9FtIV72ZI-V3TzFDcs6nkdX9b81DwrAwwFzx0EfNvK1GLVWl59Ow85muoRTBA1q8SsZImxdyZ-TApTVlMYIQbdI4iQRwU9OmmtefrCe~ZOf4UBS9-KvNIqUL0XeBSqm0OU1jq-D10Ykg6KfqvuPnBYT1BYHFDQJXW5DdPKwcaQE4MtAdSGmj1epDoaEBUa9btQlFsM2l9Cyn1hzxqNWXELmx8dRlomQLlV4b586dRzW~fLlOPIGC13ntPXogvYvHVyEyptXkv890jC7DZNHyxZd5cyrKC36r9huKvhQAmNABT2Y~pOGwVrb~RpPwT0tBuPZ3lHYhBFYmD8y~AOhhNHKMLzea1rfwTvovBMByDdFps54gMN1mX4MbCGT4w70vIopS9yAAAA.i2p/bytemonsoon/announce.php"
|
||||||
|
};
|
||||||
|
|
||||||
|
/** comma delimited list of name=announceURL for the trackers to be displayed */
|
||||||
|
public static final String PROP_TRACKERS = "i2psnark.trackers";
|
||||||
|
/** unordered map of announceURL to name */
|
||||||
|
public Map getTrackers() {
|
||||||
|
HashMap rv = new HashMap();
|
||||||
|
String trackers = _config.getProperty(PROP_TRACKERS);
|
||||||
|
if ( (trackers == null) || (trackers.trim().length() <= 0) )
|
||||||
|
trackers = _context.getProperty(PROP_TRACKERS);
|
||||||
|
if ( (trackers == null) || (trackers.trim().length() <= 0) ) {
|
||||||
|
for (int i = 0; i < DEFAULT_TRACKERS.length; i += 2)
|
||||||
|
rv.put(DEFAULT_TRACKERS[i+1], DEFAULT_TRACKERS[i]);
|
||||||
|
} else {
|
||||||
|
StringTokenizer tok = new StringTokenizer(trackers, ",");
|
||||||
|
while (tok.hasMoreTokens()) {
|
||||||
|
String pair = tok.nextToken();
|
||||||
|
int split = pair.indexOf('=');
|
||||||
|
if (split <= 0)
|
||||||
|
continue;
|
||||||
|
String name = pair.substring(0, split).trim();
|
||||||
|
String url = pair.substring(split+1).trim();
|
||||||
|
if ( (name.length() > 0) && (url.length() > 0) )
|
||||||
|
rv.put(url, name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return rv;
|
||||||
|
}
|
||||||
|
|
||||||
private static class TorrentFilenameFilter implements FilenameFilter {
|
private static class TorrentFilenameFilter implements FilenameFilter {
|
||||||
private static final TorrentFilenameFilter _filter = new TorrentFilenameFilter();
|
private static final TorrentFilenameFilter _filter = new TorrentFilenameFilter();
|
||||||
public static TorrentFilenameFilter instance() { return _filter; }
|
public static TorrentFilenameFilter instance() { return _filter; }
|
||||||
|
@ -80,7 +80,7 @@ public class I2PSnarkServlet extends HttpServlet {
|
|||||||
|
|
||||||
out.write(TABLE_FOOTER);
|
out.write(TABLE_FOOTER);
|
||||||
writeAddForm(out, req);
|
writeAddForm(out, req);
|
||||||
if (false) // seeding needs to register the torrent first (boo, hiss)
|
if (true) // seeding needs to register the torrent first, so we can't start it automatically (boo, hiss)
|
||||||
writeSeedForm(out, req);
|
writeSeedForm(out, req);
|
||||||
writeConfigForm(out, req);
|
writeConfigForm(out, req);
|
||||||
out.write(FOOTER);
|
out.write(FOOTER);
|
||||||
@ -252,8 +252,9 @@ public class I2PSnarkServlet extends HttpServlet {
|
|||||||
out.write(info.getTorrentData());
|
out.write(info.getTorrentData());
|
||||||
out.close();
|
out.close();
|
||||||
_manager.addMessage("Torrent created for " + baseFile.getName() + ": " + torrentFile.getAbsolutePath());
|
_manager.addMessage("Torrent created for " + baseFile.getName() + ": " + torrentFile.getAbsolutePath());
|
||||||
// now fire it up and seed away!
|
// now fire it up, but don't automatically seed it
|
||||||
_manager.addTorrent(torrentFile.getCanonicalPath());
|
_manager.addTorrent(torrentFile.getCanonicalPath(), false);
|
||||||
|
_manager.addMessage("Many I2P trackers require you to register new torrents before seeding - please do so before starting " + baseFile.getName());
|
||||||
} catch (IOException ioe) {
|
} catch (IOException ioe) {
|
||||||
_manager.addMessage("Error creating a torrent for " + baseFile.getAbsolutePath() + ": " + ioe.getMessage());
|
_manager.addMessage("Error creating a torrent for " + baseFile.getAbsolutePath() + ": " + ioe.getMessage());
|
||||||
}
|
}
|
||||||
@ -437,11 +438,6 @@ public class I2PSnarkServlet extends HttpServlet {
|
|||||||
out.write("</form>\n</span>\n");
|
out.write("</form>\n</span>\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
private static final String DEFAULT_TRACKERS[] = {
|
|
||||||
"Postman's tracker", "http://YRgrgTLGnbTq2aZOZDJQ~o6Uk5k6TK-OZtx0St9pb0G-5EGYURZioxqYG8AQt~LgyyI~NCj6aYWpPO-150RcEvsfgXLR~CxkkZcVpgt6pns8SRc3Bi-QSAkXpJtloapRGcQfzTtwllokbdC-aMGpeDOjYLd8b5V9Im8wdCHYy7LRFxhEtGb~RL55DA8aYOgEXcTpr6RPPywbV~Qf3q5UK55el6Kex-6VCxreUnPEe4hmTAbqZNR7Fm0hpCiHKGoToRcygafpFqDw5frLXToYiqs9d4liyVB-BcOb0ihORbo0nS3CLmAwZGvdAP8BZ7cIYE3Z9IU9D1G8JCMxWarfKX1pix~6pIA-sp1gKlL1HhYhPMxwyxvuSqx34o3BqU7vdTYwWiLpGM~zU1~j9rHL7x60pVuYaXcFQDR4-QVy26b6Pt6BlAZoFmHhPcAuWfu-SFhjyZYsqzmEmHeYdAwa~HojSbofg0TMUgESRXMw6YThK1KXWeeJVeztGTz25sL8AAAA.i2p/announce.php",
|
|
||||||
"Orion's tracker", "http://gKik1lMlRmuroXVGTZ~7v4Vez3L3ZSpddrGZBrxVriosCQf7iHu6CIk8t15BKsj~P0JJpxrofeuxtm7SCUAJEr0AIYSYw8XOmp35UfcRPQWyb1LsxUkMT4WqxAT3s1ClIICWlBu5An~q-Mm0VFlrYLIPBWlUFnfPR7jZ9uP5ZMSzTKSMYUWao3ejiykr~mtEmyls6g-ZbgKZawa9II4zjOy-hdxHgP-eXMDseFsrym4Gpxvy~3Fv9TuiSqhpgm~UeTo5YBfxn6~TahKtE~~sdCiSydqmKBhxAQ7uT9lda7xt96SS09OYMsIWxLeQUWhns-C~FjJPp1D~IuTrUpAFcVEGVL-BRMmdWbfOJEcWPZ~CBCQSO~VkuN1ebvIOr9JBerFMZSxZtFl8JwcrjCIBxeKPBmfh~xYh16BJm1BBBmN1fp2DKmZ2jBNkAmnUbjQOqWvUcehrykWk5lZbE7bjJMDFH48v3SXwRuDBiHZmSbsTY6zhGY~GkMQHNGxPMMSIAAAA.i2p/bt",
|
|
||||||
"The freak's tracker", "http://mHKva9x24E5Ygfey2llR1KyQHv5f8hhMpDMwJDg1U-hABpJ2NrQJd6azirdfaR0OKt4jDlmP2o4Qx0H598~AteyD~RJU~xcWYdcOE0dmJ2e9Y8-HY51ie0B1yD9FtIV72ZI-V3TzFDcs6nkdX9b81DwrAwwFzx0EfNvK1GLVWl59Ow85muoRTBA1q8SsZImxdyZ-TApTVlMYIQbdI4iQRwU9OmmtefrCe~ZOf4UBS9-KvNIqUL0XeBSqm0OU1jq-D10Ykg6KfqvuPnBYT1BYHFDQJXW5DdPKwcaQE4MtAdSGmj1epDoaEBUa9btQlFsM2l9Cyn1hzxqNWXELmx8dRlomQLlV4b586dRzW~fLlOPIGC13ntPXogvYvHVyEyptXkv890jC7DZNHyxZd5cyrKC36r9huKvhQAmNABT2Y~pOGwVrb~RpPwT0tBuPZ3lHYhBFYmD8y~AOhhNHKMLzea1rfwTvovBMByDdFps54gMN1mX4MbCGT4w70vIopS9yAAAA.i2p/bytemonsoon/announce.php"
|
|
||||||
};
|
|
||||||
private void writeSeedForm(PrintWriter out, HttpServletRequest req) throws IOException {
|
private void writeSeedForm(PrintWriter out, HttpServletRequest req) throws IOException {
|
||||||
String uri = req.getRequestURI();
|
String uri = req.getRequestURI();
|
||||||
String baseFile = req.getParameter("baseFile");
|
String baseFile = req.getParameter("baseFile");
|
||||||
@ -453,19 +449,36 @@ public class I2PSnarkServlet extends HttpServlet {
|
|||||||
out.write("<form action=\"" + uri + "\" method=\"POST\">\n");
|
out.write("<form action=\"" + uri + "\" method=\"POST\">\n");
|
||||||
out.write("<input type=\"hidden\" name=\"nonce\" value=\"" + _nonce + "\" />\n");
|
out.write("<input type=\"hidden\" name=\"nonce\" value=\"" + _nonce + "\" />\n");
|
||||||
//out.write("From file: <input type=\"file\" name=\"newFile\" size=\"50\" value=\"" + newFile + "\" /><br />\n");
|
//out.write("From file: <input type=\"file\" name=\"newFile\" size=\"50\" value=\"" + newFile + "\" /><br />\n");
|
||||||
out.write("Data to seed: <input type=\"text\" name=\"baseFile\" size=\"50\" value=\"" + baseFile
|
out.write("Data to seed: " + _manager.getDataDir().getAbsolutePath() + File.separatorChar
|
||||||
+ "\" title=\"File within " + _manager.getDataDir().getAbsolutePath() + " to seed\" /><br />\n");
|
+ "<input type=\"text\" name=\"baseFile\" size=\"20\" value=\"" + baseFile
|
||||||
|
+ "\" title=\"File to seed (must be within the specified path)\" /><br />\n");
|
||||||
out.write("Tracker: <select name=\"announceURL\"><option value=\"\">Select a tracker</option>\n");
|
out.write("Tracker: <select name=\"announceURL\"><option value=\"\">Select a tracker</option>\n");
|
||||||
for (int i = 0; i + 1 < DEFAULT_TRACKERS.length; i += 2)
|
Map trackers = sort(_manager.getTrackers());
|
||||||
out.write("\t<option value=\"" + DEFAULT_TRACKERS[i+1] + "\">" + DEFAULT_TRACKERS[i] + "</option>\n");
|
for (Iterator iter = trackers.keySet().iterator(); iter.hasNext(); ) {
|
||||||
out.write("</select><br />\n");
|
String name = (String)iter.next();
|
||||||
out.write(" or: ");
|
String announceURL = (String)trackers.get(name);
|
||||||
out.write("<input type=\"text\" name=\"announceURLOther\" size=\"50\" value=\"http://\" " +
|
// we inject whitespace in sort(...) to guarantee uniqueness, but we can strip it off here
|
||||||
"title=\"Custom tracker URL\" /><br />\n");
|
out.write("\t<option value=\"" + announceURL + "\">" + name.trim() + "</option>\n");
|
||||||
|
}
|
||||||
|
out.write("</select>\n");
|
||||||
|
out.write("or <input type=\"text\" name=\"announceURLOther\" size=\"50\" value=\"http://\" " +
|
||||||
|
"title=\"Custom tracker URL\" /> ");
|
||||||
out.write("<input type=\"submit\" value=\"Create torrent\" name=\"action\" />\n");
|
out.write("<input type=\"submit\" value=\"Create torrent\" name=\"action\" />\n");
|
||||||
out.write("</form>\n</span>\n");
|
out.write("</form>\n</span>\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private Map sort(Map trackers) {
|
||||||
|
TreeMap rv = new TreeMap();
|
||||||
|
for (Iterator iter = trackers.keySet().iterator(); iter.hasNext(); ) {
|
||||||
|
String url = (String)iter.next();
|
||||||
|
String name = (String)trackers.get(url);
|
||||||
|
while (rv.containsKey(name))
|
||||||
|
name = name + " ";
|
||||||
|
rv.put(name, url);
|
||||||
|
}
|
||||||
|
return rv;
|
||||||
|
}
|
||||||
|
|
||||||
private void writeConfigForm(PrintWriter out, HttpServletRequest req) throws IOException {
|
private void writeConfigForm(PrintWriter out, HttpServletRequest req) throws IOException {
|
||||||
String uri = req.getRequestURI();
|
String uri = req.getRequestURI();
|
||||||
String dataDir = _manager.getDataDir().getAbsolutePath();
|
String dataDir = _manager.getDataDir().getAbsolutePath();
|
||||||
@ -584,7 +597,7 @@ public class I2PSnarkServlet extends HttpServlet {
|
|||||||
" background-color: #DDDDCC;\n" +
|
" background-color: #DDDDCC;\n" +
|
||||||
"}\n" +
|
"}\n" +
|
||||||
".snarkNewTorrent {\n" +
|
".snarkNewTorrent {\n" +
|
||||||
" font-size: 12pt;\n" +
|
" font-size: 10pt;\n" +
|
||||||
" font-family: monospace;\n" +
|
" font-family: monospace;\n" +
|
||||||
" background-color: #ADAE9;\n" +
|
" background-color: #ADAE9;\n" +
|
||||||
"}\n" +
|
"}\n" +
|
||||||
|
@ -768,7 +768,7 @@ public class Connection {
|
|||||||
long howLong = _options.getInactivityTimeout();
|
long howLong = _options.getInactivityTimeout();
|
||||||
howLong += _context.random().nextInt(30*1000); // randomize it a bit, so both sides don't do it at once
|
howLong += _context.random().nextInt(30*1000); // randomize it a bit, so both sides don't do it at once
|
||||||
if (_log.shouldLog(Log.DEBUG))
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
_log.debug("Resetting the inactivity timer to " + howLong, new Exception("Reset by"));
|
_log.debug("Resetting the inactivity timer to " + howLong, new Exception(toString()));
|
||||||
// this will get rescheduled, and rescheduled, and rescheduled...
|
// this will get rescheduled, and rescheduled, and rescheduled...
|
||||||
RetransmissionTimer.getInstance().removeEvent(_activityTimer);
|
RetransmissionTimer.getInstance().removeEvent(_activityTimer);
|
||||||
RetransmissionTimer.getInstance().addEvent(_activityTimer, howLong);
|
RetransmissionTimer.getInstance().addEvent(_activityTimer, howLong);
|
||||||
|
@ -444,14 +444,20 @@ public class ConnectionPacketHandler {
|
|||||||
}
|
}
|
||||||
public void timeReached() {
|
public void timeReached() {
|
||||||
if (_con.getLastSendTime() <= _created) {
|
if (_con.getLastSendTime() <= _created) {
|
||||||
if (_con.getResetReceived() || _con.getResetSent() || (_con.getUnackedPacketsReceived() <= 0) )
|
if (_con.getResetReceived() || _con.getResetSent()) {
|
||||||
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
|
_log.debug("Ack dup on " + _con + ", but we have been reset");
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
if (_log.shouldLog(Log.DEBUG))
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
_log.debug("Last sent was a while ago, and we want to ack a dup");
|
_log.debug("Last sent was a while ago, and we want to ack a dup on " + _con);
|
||||||
// we haven't done anything since receiving the dup, send an
|
// we haven't done anything since receiving the dup, send an
|
||||||
// ack now
|
// ack now
|
||||||
_con.ackImmediately();
|
_con.ackImmediately();
|
||||||
|
} else {
|
||||||
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
|
_log.debug("Ack dup on " + _con + ", but we have sent (" + (_con.getLastSendTime()-_created) + ")");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,4 +1,10 @@
|
|||||||
$Id: history.txt,v 1.374 2005/12/30 15:57:53 jrandom Exp $
|
$Id: history.txt,v 1.375 2005/12/30 18:33:54 jrandom Exp $
|
||||||
|
|
||||||
|
2005-12-31 jrandom
|
||||||
|
* Include a simple torrent creator in the I2PSnark web UI
|
||||||
|
* Further streaming lib closing improvements
|
||||||
|
* Refactored the load test components to run off live tunnels (though,
|
||||||
|
still not safe for normal/anonymous load testing)
|
||||||
|
|
||||||
2005-12-30 jrandom
|
2005-12-30 jrandom
|
||||||
* Close streams more gracefully
|
* Close streams more gracefully
|
||||||
|
@ -178,29 +178,9 @@ public class InNetMessagePool implements Service {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (allowMatches) {
|
if (allowMatches) {
|
||||||
List origMessages = _context.messageRegistry().getOriginalMessages(messageBody);
|
int replies = handleReplies(messageBody);
|
||||||
if (_log.shouldLog(Log.DEBUG))
|
|
||||||
_log.debug("Original messages for inbound message: " + origMessages.size());
|
|
||||||
if (origMessages.size() > 1) {
|
|
||||||
if (_log.shouldLog(Log.DEBUG))
|
|
||||||
_log.debug("Orig: " + origMessages + " \nthe above are replies for: " + messageBody,
|
|
||||||
new Exception("Multiple matches"));
|
|
||||||
}
|
|
||||||
|
|
||||||
for (int i = 0; i < origMessages.size(); i++) {
|
if (replies <= 0) {
|
||||||
OutNetMessage omsg = (OutNetMessage)origMessages.get(i);
|
|
||||||
ReplyJob job = omsg.getOnReplyJob();
|
|
||||||
if (_log.shouldLog(Log.DEBUG))
|
|
||||||
_log.debug("Original message [" + i + "] " + omsg.getReplySelector()
|
|
||||||
+ " : " + omsg + ": reply job: " + job);
|
|
||||||
|
|
||||||
if (job != null) {
|
|
||||||
job.setMessage(messageBody);
|
|
||||||
_context.jobQueue().addJob(job);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (origMessages.size() <= 0) {
|
|
||||||
// not handled as a reply
|
// not handled as a reply
|
||||||
if (!jobFound) {
|
if (!jobFound) {
|
||||||
// was not handled via HandlerJobBuilder
|
// was not handled via HandlerJobBuilder
|
||||||
@ -247,6 +227,31 @@ public class InNetMessagePool implements Service {
|
|||||||
return 0; // no queue
|
return 0; // no queue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public int handleReplies(I2NPMessage messageBody) {
|
||||||
|
List origMessages = _context.messageRegistry().getOriginalMessages(messageBody);
|
||||||
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
|
_log.debug("Original messages for inbound message: " + origMessages.size());
|
||||||
|
if (origMessages.size() > 1) {
|
||||||
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
|
_log.debug("Orig: " + origMessages + " \nthe above are replies for: " + messageBody,
|
||||||
|
new Exception("Multiple matches"));
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int i = 0; i < origMessages.size(); i++) {
|
||||||
|
OutNetMessage omsg = (OutNetMessage)origMessages.get(i);
|
||||||
|
ReplyJob job = omsg.getOnReplyJob();
|
||||||
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
|
_log.debug("Original message [" + i + "] " + omsg.getReplySelector()
|
||||||
|
+ " : " + omsg + ": reply job: " + job);
|
||||||
|
|
||||||
|
if (job != null) {
|
||||||
|
job.setMessage(messageBody);
|
||||||
|
_context.jobQueue().addJob(job);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return origMessages.size();
|
||||||
|
}
|
||||||
|
|
||||||
// the following short circuits the tunnel dispatching - i'm not sure whether
|
// the following short circuits the tunnel dispatching - i'm not sure whether
|
||||||
// we'll want to run the dispatching in jobs or whether it shuold go inline with
|
// we'll want to run the dispatching in jobs or whether it shuold go inline with
|
||||||
// others and/or on other threads (e.g. transport threads). lets try 'em both.
|
// others and/or on other threads (e.g. transport threads). lets try 'em both.
|
||||||
|
@ -226,13 +226,8 @@ public class JobQueue {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
private static final String PROP_LOAD_TEST = "router.loadTest";
|
|
||||||
public void allowParallelOperation() {
|
public void allowParallelOperation() {
|
||||||
_allowParallelOperation = true;
|
_allowParallelOperation = true;
|
||||||
if (Boolean.valueOf(_context.getProperty(PROP_LOAD_TEST, "false")).booleanValue()) {
|
|
||||||
LoadTestManager t = new LoadTestManager(_context);
|
|
||||||
addJob(t.getTestJob());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public void restart() {
|
public void restart() {
|
||||||
|
@ -5,16 +5,19 @@ import java.util.*;
|
|||||||
import net.i2p.util.*;
|
import net.i2p.util.*;
|
||||||
import net.i2p.data.*;
|
import net.i2p.data.*;
|
||||||
import net.i2p.data.i2np.*;
|
import net.i2p.data.i2np.*;
|
||||||
|
import net.i2p.router.message.*;
|
||||||
import net.i2p.router.tunnel.*;
|
import net.i2p.router.tunnel.*;
|
||||||
import net.i2p.router.tunnel.pool.*;
|
import net.i2p.router.tunnel.pool.*;
|
||||||
import net.i2p.router.transport.udp.UDPTransport;
|
import net.i2p.router.transport.udp.UDPTransport;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Coordinate some tests of peers to see how much load they can handle. This
|
* Coordinate some tests of peers to see how much load they can handle. If
|
||||||
* test is not safe for use in anonymous environments, but should help pinpoint
|
* TEST_LIVE_TUNNELS is set to false, it builds load test tunnels across various
|
||||||
* some performance aspects of the live net.
|
* peers in ways that are not anonymity sensitive (but may help with testing the net).
|
||||||
|
* If it is set to true, however, it runs a few tests at a time for actual tunnels that
|
||||||
|
* are built, to help determine whether our peer selection is insufficient.
|
||||||
*
|
*
|
||||||
* Each individual load test is conducted by building a single one hop inbound
|
* Load tests of fake tunnels are conducted by building a single one hop inbound
|
||||||
* tunnel with the peer in question acting as the inbound gateway. We then send
|
* tunnel with the peer in question acting as the inbound gateway. We then send
|
||||||
* messages directly to that gateway, which they batch up and send "down the
|
* messages directly to that gateway, which they batch up and send "down the
|
||||||
* tunnel" (aka directly to us), at which point we then send another message,
|
* tunnel" (aka directly to us), at which point we then send another message,
|
||||||
@ -24,9 +27,15 @@ import net.i2p.router.transport.udp.UDPTransport;
|
|||||||
*
|
*
|
||||||
* If "router.loadTestSmall=true", we transmit a tiny DeliveryStatusMessage (~96 bytes
|
* If "router.loadTestSmall=true", we transmit a tiny DeliveryStatusMessage (~96 bytes
|
||||||
* at the SSU level), which is sent back to us as a single TunnelDataMessage (~1KB).
|
* at the SSU level), which is sent back to us as a single TunnelDataMessage (~1KB).
|
||||||
* Otherwise, we transmit a 4KB DataMessage, which is sent back to us as five (1KB)
|
* Otherwise, we transmit a 4KB DataMessage wrapped inside a garlic message, which is
|
||||||
* TunnelDataMessages. This size is chosen because the streaming lib uses 4KB messages
|
* sent back to us as five (1KB) TunnelDataMessages. This size is chosen because the
|
||||||
* by default.
|
* streaming lib uses 4KB messages by default.
|
||||||
|
*
|
||||||
|
* Load tests of live tunnels pick a random tunnel from the tested pool's pair (e.g. if
|
||||||
|
* we are testing an outbound tunnel for a particular destination, it picks an inbound
|
||||||
|
* tunnel from that destination's inbound pool), with each message going down that one
|
||||||
|
* randomly paired tunnel for the duration of the load test (varying the paired tunnel
|
||||||
|
* with each message had poor results)
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
public class LoadTestManager {
|
public class LoadTestManager {
|
||||||
@ -34,12 +43,14 @@ public class LoadTestManager {
|
|||||||
private Log _log;
|
private Log _log;
|
||||||
private Writer _out;
|
private Writer _out;
|
||||||
private List _untestedPeers;
|
private List _untestedPeers;
|
||||||
|
private List _active;
|
||||||
public LoadTestManager(RouterContext ctx) {
|
public LoadTestManager(RouterContext ctx) {
|
||||||
_context = ctx;
|
_context = ctx;
|
||||||
_log = ctx.logManager().getLog(LoadTestManager.class);
|
_log = ctx.logManager().getLog(LoadTestManager.class);
|
||||||
|
_active = Collections.synchronizedList(new ArrayList());
|
||||||
try {
|
try {
|
||||||
_out = new BufferedWriter(new FileWriter("loadtest.log", true));
|
_out = new BufferedWriter(new FileWriter("loadtest.log", true));
|
||||||
|
_out.write("startup at " + ctx.clock().now() + "\n");
|
||||||
} catch (IOException ioe) {
|
} catch (IOException ioe) {
|
||||||
_log.log(Log.CRIT, "error creating log", ioe);
|
_log.log(Log.CRIT, "error creating log", ioe);
|
||||||
}
|
}
|
||||||
@ -50,6 +61,8 @@ public class LoadTestManager {
|
|||||||
_context.statManager().createRateStat("test.rttHigh", "How long it takes to get a reply, if it is a slow rtt", "test", new long[] { 60*1000, 5*60*1000, 60*60*1000 });
|
_context.statManager().createRateStat("test.rttHigh", "How long it takes to get a reply, if it is a slow rtt", "test", new long[] { 60*1000, 5*60*1000, 60*60*1000 });
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static final boolean TEST_LIVE_TUNNELS = true;
|
||||||
|
|
||||||
public Job getTestJob() { return new TestJob(_context); }
|
public Job getTestJob() { return new TestJob(_context); }
|
||||||
private class TestJob extends JobImpl {
|
private class TestJob extends JobImpl {
|
||||||
public TestJob(RouterContext ctx) {
|
public TestJob(RouterContext ctx) {
|
||||||
@ -59,14 +72,16 @@ public class LoadTestManager {
|
|||||||
}
|
}
|
||||||
public String getName() { return "run load tests"; }
|
public String getName() { return "run load tests"; }
|
||||||
public void runJob() {
|
public void runJob() {
|
||||||
runTest();
|
if (!TEST_LIVE_TUNNELS) {
|
||||||
getTiming().setStartAfter(10*60*1000 + getContext().clock().now());
|
runTest();
|
||||||
getContext().jobQueue().addJob(TestJob.this);
|
getTiming().setStartAfter(10*60*1000 + getContext().clock().now());
|
||||||
|
getContext().jobQueue().addJob(TestJob.this);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/** 10 peers at a time */
|
/** 10 peers at a time */
|
||||||
private static final int CONCURRENT_PEERS = 10;
|
private static final int CONCURRENT_PEERS = 0;
|
||||||
/** 4 messages per peer at a time */
|
/** 4 messages per peer at a time */
|
||||||
private static final int CONCURRENT_MESSAGES = 4;
|
private static final int CONCURRENT_MESSAGES = 4;
|
||||||
|
|
||||||
@ -88,8 +103,8 @@ public class LoadTestManager {
|
|||||||
} catch (NumberFormatException nfe) {
|
} catch (NumberFormatException nfe) {
|
||||||
rv = CONCURRENT_PEERS;
|
rv = CONCURRENT_PEERS;
|
||||||
}
|
}
|
||||||
if (rv < 1)
|
if (rv < 0)
|
||||||
rv = 1;
|
rv = 0;
|
||||||
if (rv > 50)
|
if (rv > 50)
|
||||||
rv = 50;
|
rv = 50;
|
||||||
return rv;
|
return rv;
|
||||||
@ -115,38 +130,119 @@ public class LoadTestManager {
|
|||||||
private void runTest(LoadTestTunnelConfig tunnel) {
|
private void runTest(LoadTestTunnelConfig tunnel) {
|
||||||
log(tunnel, "start");
|
log(tunnel, "start");
|
||||||
int peerMessages = getPeerMessages();
|
int peerMessages = getPeerMessages();
|
||||||
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
|
_log.debug("Run test on " + tunnel + " with " + peerMessages + " messages");
|
||||||
for (int i = 0; i < peerMessages; i++)
|
for (int i = 0; i < peerMessages; i++)
|
||||||
sendTestMessage(tunnel);
|
sendTestMessage(tunnel);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private void pickTunnels(LoadTestTunnelConfig tunnel) {
|
||||||
|
TunnelInfo inbound = null;
|
||||||
|
TunnelInfo outbound = null;
|
||||||
|
if (tunnel.getTunnel().isInbound()) {
|
||||||
|
inbound = _context.tunnelManager().getTunnelInfo(tunnel.getReceiveTunnelId(0));
|
||||||
|
if ( (inbound == null) && (_log.shouldLog(Log.WARN)) )
|
||||||
|
_log.warn("where are we? inbound tunnel isn't known: " + tunnel, new Exception("source"));
|
||||||
|
if (tunnel.getTunnel().getDestination() != null)
|
||||||
|
outbound = _context.tunnelManager().selectOutboundTunnel(tunnel.getTunnel().getDestination());
|
||||||
|
else
|
||||||
|
outbound = _context.tunnelManager().selectOutboundTunnel();
|
||||||
|
} else {
|
||||||
|
outbound = _context.tunnelManager().getTunnelInfo(tunnel.getSendTunnelId(0));
|
||||||
|
if ( (outbound == null) && (_log.shouldLog(Log.WARN)) )
|
||||||
|
_log.warn("where are we? outbound tunnel isn't known: " + tunnel, new Exception("source"));
|
||||||
|
if (tunnel.getTunnel().getDestination() != null)
|
||||||
|
inbound = _context.tunnelManager().selectInboundTunnel(tunnel.getTunnel().getDestination());
|
||||||
|
else
|
||||||
|
inbound = _context.tunnelManager().selectInboundTunnel();
|
||||||
|
}
|
||||||
|
tunnel.setInbound(inbound);
|
||||||
|
tunnel.setOutbound(outbound);
|
||||||
|
}
|
||||||
|
|
||||||
private void sendTestMessage(LoadTestTunnelConfig tunnel) {
|
private void sendTestMessage(LoadTestTunnelConfig tunnel) {
|
||||||
if (_context.clock().now() > tunnel.getExpiration())
|
long now = _context.clock().now();
|
||||||
return;
|
if (now > tunnel.getExpiration()) {
|
||||||
RouterInfo target = _context.netDb().lookupRouterInfoLocally(tunnel.getPeer(0));
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
if (target == null) {
|
_log.debug("Not sending a test message to " + tunnel + " because it expired");
|
||||||
log(tunnel, "lookup failed");
|
tunnel.logComplete();
|
||||||
|
_active.remove(tunnel);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
I2NPMessage payloadMessage = createPayloadMessage();
|
if (TEST_LIVE_TUNNELS) {
|
||||||
|
TunnelInfo inbound = tunnel.getInbound();
|
||||||
|
TunnelInfo outbound = tunnel.getOutbound();
|
||||||
|
if ( (inbound == null) || (outbound == null) ) {
|
||||||
|
pickTunnels(tunnel);
|
||||||
|
inbound = tunnel.getInbound();
|
||||||
|
outbound = tunnel.getOutbound();
|
||||||
|
}
|
||||||
|
|
||||||
TunnelGatewayMessage tm = new TunnelGatewayMessage(_context);
|
if (inbound == null) {
|
||||||
tm.setMessage(payloadMessage);
|
log(tunnel, "No inbound tunnels found");
|
||||||
tm.setTunnelId(tunnel.getReceiveTunnelId(0));
|
_active.remove(tunnel);
|
||||||
tm.setMessageExpiration(payloadMessage.getMessageExpiration());
|
return;
|
||||||
|
} else if (outbound == null) {
|
||||||
|
log(tunnel, "No outbound tunnels found");
|
||||||
|
tunnel.logComplete();
|
||||||
|
_active.remove(tunnel);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
OutNetMessage om = new OutNetMessage(_context);
|
if ( (now >= inbound.getExpiration()) || (now >= outbound.getExpiration()) ) {
|
||||||
om.setMessage(tm);
|
tunnel.logComplete();
|
||||||
SendAgain failed = new SendAgain(_context, tunnel, payloadMessage.getUniqueId(), false);
|
_active.remove(tunnel);
|
||||||
om.setOnFailedReplyJob(failed);
|
return;
|
||||||
om.setOnReplyJob(new SendAgain(_context, tunnel, payloadMessage.getUniqueId(), true));
|
}
|
||||||
//om.setOnFailedSendJob(failed);
|
|
||||||
om.setReplySelector(new Selector(tunnel, payloadMessage.getUniqueId()));
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
om.setTarget(target);
|
_log.debug("inbound and outbound found for " + tunnel);
|
||||||
om.setExpiration(tm.getMessageExpiration());
|
|
||||||
om.setPriority(40);
|
I2NPMessage payloadMessage = createPayloadMessage();
|
||||||
_context.outNetMessagePool().add(om);
|
|
||||||
//log(tunnel, m.getMessageId() + " sent");
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
|
_log.debug("testing live tunnels with inbound [" + inbound + "] and outbound [" + outbound + "]");
|
||||||
|
|
||||||
|
// this should take into consideration both the inbound and outbound tunnels
|
||||||
|
// ... but it doesn't, yet.
|
||||||
|
_context.messageRegistry().registerPending(new Selector(tunnel, payloadMessage.getUniqueId()),
|
||||||
|
new SendAgain(_context, tunnel, payloadMessage.getUniqueId(), true),
|
||||||
|
new SendAgain(_context, tunnel, payloadMessage.getUniqueId(), false),
|
||||||
|
10*1000);
|
||||||
|
_context.tunnelDispatcher().dispatchOutbound(payloadMessage, outbound.getSendTunnelId(0),
|
||||||
|
inbound.getReceiveTunnelId(0),
|
||||||
|
inbound.getPeer(0));
|
||||||
|
//log(tunnel, payloadMessage.getUniqueId() + " sent via " + inbound + " / " + outbound);
|
||||||
|
} else {
|
||||||
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
|
_log.debug("NOT testing live tunnels for [" + tunnel + "]");
|
||||||
|
RouterInfo target = _context.netDb().lookupRouterInfoLocally(tunnel.getPeer(0));
|
||||||
|
if (target == null) {
|
||||||
|
log(tunnel, "lookup failed");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
I2NPMessage payloadMessage = createPayloadMessage();
|
||||||
|
|
||||||
|
TunnelGatewayMessage tm = new TunnelGatewayMessage(_context);
|
||||||
|
tm.setMessage(payloadMessage);
|
||||||
|
tm.setTunnelId(tunnel.getReceiveTunnelId(0));
|
||||||
|
tm.setMessageExpiration(payloadMessage.getMessageExpiration());
|
||||||
|
|
||||||
|
OutNetMessage om = new OutNetMessage(_context);
|
||||||
|
om.setMessage(tm);
|
||||||
|
SendAgain failed = new SendAgain(_context, tunnel, payloadMessage.getUniqueId(), false);
|
||||||
|
om.setOnFailedReplyJob(failed);
|
||||||
|
om.setOnReplyJob(new SendAgain(_context, tunnel, payloadMessage.getUniqueId(), true));
|
||||||
|
//om.setOnFailedSendJob(failed);
|
||||||
|
om.setReplySelector(new Selector(tunnel, payloadMessage.getUniqueId()));
|
||||||
|
om.setTarget(target);
|
||||||
|
om.setExpiration(tm.getMessageExpiration());
|
||||||
|
om.setPriority(40);
|
||||||
|
_context.outNetMessagePool().add(om);
|
||||||
|
//log(tunnel, m.getMessageId() + " sent");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private static final boolean SMALL_PAYLOAD = false;
|
private static final boolean SMALL_PAYLOAD = false;
|
||||||
@ -172,7 +268,40 @@ public class LoadTestManager {
|
|||||||
m.setData(data);
|
m.setData(data);
|
||||||
long now = _context.clock().now();
|
long now = _context.clock().now();
|
||||||
m.setMessageExpiration(now + 10*1000);
|
m.setMessageExpiration(now + 10*1000);
|
||||||
return m;
|
|
||||||
|
if (true) {
|
||||||
|
// garlic wrap the data message to ourselves so the endpoints and gateways
|
||||||
|
// can't tell its a test, encrypting it with a random key and tag,
|
||||||
|
// remembering that key+tag so that we can decrypt it later without any ElGamal
|
||||||
|
DeliveryInstructions instructions = new DeliveryInstructions();
|
||||||
|
instructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_LOCAL);
|
||||||
|
|
||||||
|
PayloadGarlicConfig payload = new PayloadGarlicConfig();
|
||||||
|
payload.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null));
|
||||||
|
payload.setId(_context.random().nextLong(I2NPMessage.MAX_ID_VALUE));
|
||||||
|
payload.setId(m.getUniqueId());
|
||||||
|
payload.setPayload(m);
|
||||||
|
payload.setRecipient(_context.router().getRouterInfo());
|
||||||
|
payload.setDeliveryInstructions(instructions);
|
||||||
|
payload.setRequestAck(false);
|
||||||
|
payload.setExpiration(m.getMessageExpiration());
|
||||||
|
|
||||||
|
SessionKey encryptKey = _context.keyGenerator().generateSessionKey();
|
||||||
|
SessionTag encryptTag = new SessionTag(true);
|
||||||
|
SessionKey sentKey = new SessionKey();
|
||||||
|
Set sentTags = null;
|
||||||
|
GarlicMessage msg = GarlicMessageBuilder.buildMessage(_context, payload, sentKey, sentTags,
|
||||||
|
_context.keyManager().getPublicKey(),
|
||||||
|
encryptKey, encryptTag);
|
||||||
|
|
||||||
|
Set encryptTags = new HashSet(1);
|
||||||
|
encryptTags.add(encryptTag);
|
||||||
|
_context.sessionKeyManager().tagsReceived(encryptKey, encryptTags);
|
||||||
|
|
||||||
|
return msg;
|
||||||
|
} else {
|
||||||
|
return m;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -238,23 +367,66 @@ public class LoadTestManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private void log(LoadTestTunnelConfig tunnel, String msg) {
|
private void log(LoadTestTunnelConfig tunnel, String msg) {
|
||||||
|
//if (!_log.shouldLog(Log.INFO)) return;
|
||||||
StringBuffer buf = new StringBuffer(128);
|
StringBuffer buf = new StringBuffer(128);
|
||||||
for (int i = 0; i < tunnel.getLength()-1; i++) {
|
if (tunnel.getInbound() == null) {
|
||||||
Hash peer = tunnel.getPeer(i);
|
for (int i = 0; i < tunnel.getLength()-1; i++) {
|
||||||
if ( (peer != null) && (peer.equals(_context.routerHash())) )
|
Hash peer = tunnel.getPeer(i);
|
||||||
continue;
|
if ( (peer != null) && (peer.equals(_context.routerHash())) )
|
||||||
else if (peer != null)
|
continue;
|
||||||
buf.append(peer.toBase64());
|
else if (peer != null)
|
||||||
else
|
buf.append(peer.toBase64());
|
||||||
buf.append("[unknown_peer]");
|
else
|
||||||
buf.append(" ");
|
buf.append("[unknown_peer]");
|
||||||
TunnelId id = tunnel.getReceiveTunnelId(i);
|
buf.append(" ");
|
||||||
if (id != null)
|
TunnelId id = tunnel.getReceiveTunnelId(i);
|
||||||
buf.append(id.getTunnelId());
|
if (id != null)
|
||||||
else
|
buf.append(id.getTunnelId());
|
||||||
buf.append("[unknown_tunnel]");
|
else
|
||||||
buf.append(" ");
|
buf.append("[unknown_tunnel]");
|
||||||
buf.append(_context.clock().now()).append(" hop ").append(i).append(" ").append(msg).append("\n");
|
buf.append(" ");
|
||||||
|
buf.append(_context.clock().now()).append(" hop ").append(i).append(" ").append(msg).append("\n");
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
int hop = 0;
|
||||||
|
TunnelInfo info = tunnel.getOutbound();
|
||||||
|
for (int i = 0; (info != null) && (i < info.getLength()-1); i++) {
|
||||||
|
Hash peer = info.getPeer(i);
|
||||||
|
if ( (peer != null) && (peer.equals(_context.routerHash())) )
|
||||||
|
continue;
|
||||||
|
else if (peer != null)
|
||||||
|
buf.append(peer.toBase64());
|
||||||
|
else
|
||||||
|
buf.append("[unknown_peer]");
|
||||||
|
buf.append(" ");
|
||||||
|
TunnelId id = tunnel.getReceiveTunnelId(i);
|
||||||
|
if (id != null)
|
||||||
|
buf.append(id.getTunnelId());
|
||||||
|
else
|
||||||
|
buf.append("[unknown_tunnel]");
|
||||||
|
buf.append(" ");
|
||||||
|
buf.append(_context.clock().now()).append(" out_hop ").append(hop).append(" ").append(msg).append("\n");
|
||||||
|
hop++;
|
||||||
|
}
|
||||||
|
info = tunnel.getInbound();
|
||||||
|
for (int i = 0; (info != null) && (i < info.getLength()-1); i++) {
|
||||||
|
Hash peer = info.getPeer(i);
|
||||||
|
if ( (peer != null) && (peer.equals(_context.routerHash())) )
|
||||||
|
continue;
|
||||||
|
else if (peer != null)
|
||||||
|
buf.append(peer.toBase64());
|
||||||
|
else
|
||||||
|
buf.append("[unknown_peer]");
|
||||||
|
buf.append(" ");
|
||||||
|
TunnelId id = tunnel.getReceiveTunnelId(i);
|
||||||
|
if (id != null)
|
||||||
|
buf.append(id.getTunnelId());
|
||||||
|
else
|
||||||
|
buf.append("[unknown_tunnel]");
|
||||||
|
buf.append(" ");
|
||||||
|
buf.append(_context.clock().now()).append(" in_hop ").append(hop).append(" ").append(msg).append("\n");
|
||||||
|
hop++;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
try {
|
try {
|
||||||
synchronized (_out) {
|
synchronized (_out) {
|
||||||
@ -280,7 +452,7 @@ public class LoadTestManager {
|
|||||||
private void buildOneHop(Hash peer) {
|
private void buildOneHop(Hash peer) {
|
||||||
long expiration = _context.clock().now() + 10*60*1000;
|
long expiration = _context.clock().now() + 10*60*1000;
|
||||||
|
|
||||||
LoadTestTunnelConfig cfg = new LoadTestTunnelConfig(_context, 2, true);
|
PooledTunnelCreatorConfig cfg = new PooledTunnelCreatorConfig(_context, 2, true);
|
||||||
// cfg.getPeer() is ordered gateway first
|
// cfg.getPeer() is ordered gateway first
|
||||||
cfg.setPeer(0, peer);
|
cfg.setPeer(0, peer);
|
||||||
HopConfig hop = cfg.getConfig(0);
|
HopConfig hop = cfg.getConfig(0);
|
||||||
@ -299,8 +471,10 @@ public class LoadTestManager {
|
|||||||
if (_log.shouldLog(Log.DEBUG))
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
_log.debug("Config for " + peer.toBase64() + ": " + cfg);
|
_log.debug("Config for " + peer.toBase64() + ": " + cfg);
|
||||||
|
|
||||||
CreatedJob onCreated = new CreatedJob(_context, cfg);
|
LoadTestTunnelConfig ltCfg = new LoadTestTunnelConfig(cfg);
|
||||||
FailedJob fail = new FailedJob(_context, cfg);
|
|
||||||
|
CreatedJob onCreated = new CreatedJob(_context, ltCfg);
|
||||||
|
FailedJob fail = new FailedJob(_context, ltCfg);
|
||||||
RequestTunnelJob req = new RequestTunnelJob(_context, cfg, onCreated, fail, cfg.getLength()-1, false, true);
|
RequestTunnelJob req = new RequestTunnelJob(_context, cfg, onCreated, fail, cfg.getLength()-1, false, true);
|
||||||
_context.jobQueue().addJob(req);
|
_context.jobQueue().addJob(req);
|
||||||
}
|
}
|
||||||
@ -333,7 +507,7 @@ public class LoadTestManager {
|
|||||||
private void buildLonger(Hash peer) {
|
private void buildLonger(Hash peer) {
|
||||||
long expiration = _context.clock().now() + 10*60*1000;
|
long expiration = _context.clock().now() + 10*60*1000;
|
||||||
|
|
||||||
LoadTestTunnelConfig cfg = new LoadTestTunnelConfig(_context, 3, true);
|
PooledTunnelCreatorConfig cfg = new PooledTunnelCreatorConfig(_context, 3, true);
|
||||||
// cfg.getPeer() is ordered gateway first
|
// cfg.getPeer() is ordered gateway first
|
||||||
cfg.setPeer(0, peer);
|
cfg.setPeer(0, peer);
|
||||||
HopConfig hop = cfg.getConfig(0);
|
HopConfig hop = cfg.getConfig(0);
|
||||||
@ -370,12 +544,61 @@ public class LoadTestManager {
|
|||||||
if (_log.shouldLog(Log.DEBUG))
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
_log.debug("Config for " + peer.toBase64() + " with fastPeer: " + fastPeer.toBase64() + ": " + cfg);
|
_log.debug("Config for " + peer.toBase64() + " with fastPeer: " + fastPeer.toBase64() + ": " + cfg);
|
||||||
|
|
||||||
CreatedJob onCreated = new CreatedJob(_context, cfg);
|
|
||||||
FailedJob fail = new FailedJob(_context, cfg);
|
LoadTestTunnelConfig ltCfg = new LoadTestTunnelConfig(cfg);
|
||||||
|
CreatedJob onCreated = new CreatedJob(_context, ltCfg);
|
||||||
|
FailedJob fail = new FailedJob(_context, ltCfg);
|
||||||
RequestTunnelJob req = new RequestTunnelJob(_context, cfg, onCreated, fail, cfg.getLength()-1, false, true);
|
RequestTunnelJob req = new RequestTunnelJob(_context, cfg, onCreated, fail, cfg.getLength()-1, false, true);
|
||||||
_context.jobQueue().addJob(req);
|
_context.jobQueue().addJob(req);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* If we are testing live tunnels, see if we want to test the one that was just created
|
||||||
|
* fully.
|
||||||
|
*/
|
||||||
|
public void addTunnelTestCandidate(TunnelCreatorConfig cfg) {
|
||||||
|
LoadTestTunnelConfig ltCfg = new LoadTestTunnelConfig(cfg);
|
||||||
|
if (wantToTest(ltCfg)) {
|
||||||
|
// wait briefly so everyone has their things in order (not really necessary...)
|
||||||
|
long delay = _context.random().nextInt(30*1000) + 30*1000;
|
||||||
|
SimpleTimer.getInstance().addEvent(new BeginTest(ltCfg), delay);
|
||||||
|
if (_log.shouldLog(Log.INFO))
|
||||||
|
_log.info("Testing " + cfg + ", with " + _active.size() + " active");
|
||||||
|
} else {
|
||||||
|
if (_log.shouldLog(Log.INFO))
|
||||||
|
_log.info("Not testing " + cfg + " because we have " + _active.size() + " active: " + _active);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
public void removeTunnelTestCandidate(TunnelCreatorConfig cfg) { _active.remove(cfg); }
|
||||||
|
|
||||||
|
private class BeginTest implements SimpleTimer.TimedEvent {
|
||||||
|
private LoadTestTunnelConfig _cfg;
|
||||||
|
public BeginTest(LoadTestTunnelConfig cfg) {
|
||||||
|
_cfg = cfg;
|
||||||
|
}
|
||||||
|
public void timeReached() {
|
||||||
|
_context.jobQueue().addJob(new Expire(_context, _cfg, false));
|
||||||
|
runTest(_cfg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private boolean wantToTest(LoadTestTunnelConfig cfg) {
|
||||||
|
// wait 10 minutes before testing anything
|
||||||
|
if (_context.router().getUptime() <= 10*60*1000) return false;
|
||||||
|
|
||||||
|
if (TEST_LIVE_TUNNELS && _active.size() < getConcurrency()) {
|
||||||
|
// length == #hops+1 (as it includes the creator)
|
||||||
|
if (cfg.getLength() < 2)
|
||||||
|
return false;
|
||||||
|
// only load test the client tunnels
|
||||||
|
if (cfg.getTunnel().getDestination() == null)
|
||||||
|
return false;
|
||||||
|
_active.add(cfg);
|
||||||
|
return true;
|
||||||
|
} else {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
private class CreatedJob extends JobImpl {
|
private class CreatedJob extends JobImpl {
|
||||||
private LoadTestTunnelConfig _cfg;
|
private LoadTestTunnelConfig _cfg;
|
||||||
@ -387,29 +610,33 @@ public class LoadTestManager {
|
|||||||
public void runJob() {
|
public void runJob() {
|
||||||
if (_log.shouldLog(Log.INFO))
|
if (_log.shouldLog(Log.INFO))
|
||||||
_log.info("Tunnel created for testing peer " + _cfg.getPeer(0).toBase64());
|
_log.info("Tunnel created for testing peer " + _cfg.getPeer(0).toBase64());
|
||||||
getContext().tunnelDispatcher().joinInbound(_cfg);
|
getContext().tunnelDispatcher().joinInbound(_cfg.getTunnel());
|
||||||
//log(_cfg, "joined");
|
//log(_cfg, "joined");
|
||||||
|
_active.add(_cfg);
|
||||||
Expire j = new Expire(getContext(), _cfg);
|
Expire j = new Expire(getContext(), _cfg);
|
||||||
_cfg.setExpireJob(j);
|
//_cfg.setExpireJob(j);
|
||||||
getContext().jobQueue().addJob(j);
|
getContext().jobQueue().addJob(j);
|
||||||
runTest(_cfg);
|
runTest(_cfg);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
private class Expire extends JobImpl {
|
private class Expire extends JobImpl {
|
||||||
private LoadTestTunnelConfig _cfg;
|
private LoadTestTunnelConfig _cfg;
|
||||||
|
private boolean _removeFromDispatcher;
|
||||||
public Expire(RouterContext ctx, LoadTestTunnelConfig cfg) {
|
public Expire(RouterContext ctx, LoadTestTunnelConfig cfg) {
|
||||||
|
this(ctx, cfg, true);
|
||||||
|
}
|
||||||
|
public Expire(RouterContext ctx, LoadTestTunnelConfig cfg, boolean removeFromDispatcher) {
|
||||||
super(ctx);
|
super(ctx);
|
||||||
_cfg = cfg;
|
_cfg = cfg;
|
||||||
|
_removeFromDispatcher = removeFromDispatcher;
|
||||||
getTiming().setStartAfter(cfg.getExpiration()+60*1000);
|
getTiming().setStartAfter(cfg.getExpiration()+60*1000);
|
||||||
}
|
}
|
||||||
public String getName() { return "expire test tunnel"; }
|
public String getName() { return "expire test tunnel"; }
|
||||||
public void runJob() {
|
public void runJob() {
|
||||||
getContext().tunnelDispatcher().remove(_cfg);
|
if (_removeFromDispatcher)
|
||||||
log(_cfg, "expired after sending " + _cfg.getFullMessageCount() + " / " + _cfg.getFailedMessageCount());
|
getContext().tunnelDispatcher().remove(_cfg.getTunnel());
|
||||||
getContext().statManager().addRateData("test.lifetimeSuccessful", _cfg.getFullMessageCount(), _cfg.getFailedMessageCount());
|
_cfg.logComplete();
|
||||||
if (_cfg.getFailedMessageCount() > 0)
|
_active.remove(_cfg);
|
||||||
getContext().statManager().addRateData("test.lifetimeFailed", _cfg.getFailedMessageCount(), _cfg.getFullMessageCount());
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
private class FailedJob extends JobImpl {
|
private class FailedJob extends JobImpl {
|
||||||
@ -426,17 +653,46 @@ public class LoadTestManager {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private class LoadTestTunnelConfig extends PooledTunnelCreatorConfig {
|
private class LoadTestTunnelConfig {
|
||||||
|
private TunnelCreatorConfig _cfg;
|
||||||
private long _failed;
|
private long _failed;
|
||||||
private long _fullMessages;
|
private long _fullMessages;
|
||||||
public LoadTestTunnelConfig(RouterContext ctx, int length, boolean isInbound) {
|
private TunnelInfo _testInbound;
|
||||||
super(ctx, length, isInbound);
|
private TunnelInfo _testOutbound;
|
||||||
|
private boolean _completed;
|
||||||
|
public LoadTestTunnelConfig(TunnelCreatorConfig cfg) {
|
||||||
|
_cfg = cfg;
|
||||||
_failed = 0;
|
_failed = 0;
|
||||||
_fullMessages = 0;
|
_fullMessages = 0;
|
||||||
|
_completed = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public long getExpiration() { return _cfg.getExpiration(); }
|
||||||
|
public Hash getPeer(int peer) { return _cfg.getPeer(peer); }
|
||||||
|
public TunnelId getReceiveTunnelId(int peer) { return _cfg.getReceiveTunnelId(peer); }
|
||||||
|
public TunnelId getSendTunnelId(int peer) { return _cfg.getSendTunnelId(peer); }
|
||||||
|
public int getLength() { return _cfg.getLength(); }
|
||||||
|
|
||||||
public void incrementFailed() { ++_failed; }
|
public void incrementFailed() { ++_failed; }
|
||||||
public long getFailedMessageCount() { return _failed; }
|
public long getFailedMessageCount() { return _failed; }
|
||||||
public void incrementFull() { ++_fullMessages; }
|
public void incrementFull() { ++_fullMessages; }
|
||||||
public long getFullMessageCount() { return _fullMessages; }
|
public long getFullMessageCount() { return _fullMessages; }
|
||||||
|
public TunnelCreatorConfig getTunnel() { return _cfg; }
|
||||||
|
public void setInbound(TunnelInfo info) { _testInbound = info; }
|
||||||
|
public void setOutbound(TunnelInfo info) { _testOutbound = info; }
|
||||||
|
public TunnelInfo getInbound() { return _testInbound; }
|
||||||
|
public TunnelInfo getOutbound() { return _testOutbound; }
|
||||||
|
public String toString() { return _cfg + ": failed=" + _failed + " full=" + _fullMessages; }
|
||||||
|
|
||||||
|
void logComplete() {
|
||||||
|
if (_completed) return;
|
||||||
|
_completed = true;
|
||||||
|
LoadTestTunnelConfig cfg = LoadTestTunnelConfig.this;
|
||||||
|
log(cfg, "expired after sending " + cfg.getFullMessageCount() + " / " + cfg.getFailedMessageCount()
|
||||||
|
+ " in " + (10*60*1000l - (cfg.getExpiration()-_context.clock().now())));
|
||||||
|
_context.statManager().addRateData("test.lifetimeSuccessful", cfg.getFullMessageCount(), cfg.getFailedMessageCount());
|
||||||
|
if (cfg.getFailedMessageCount() > 0)
|
||||||
|
_context.statManager().addRateData("test.lifetimeFailed", cfg.getFailedMessageCount(), cfg.getFullMessageCount());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -15,9 +15,9 @@ import net.i2p.CoreVersion;
|
|||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
public class RouterVersion {
|
public class RouterVersion {
|
||||||
public final static String ID = "$Revision: 1.321 $ $Date: 2005/12/30 15:57:53 $";
|
public final static String ID = "$Revision: 1.322 $ $Date: 2005/12/30 18:33:54 $";
|
||||||
public final static String VERSION = "0.6.1.8";
|
public final static String VERSION = "0.6.1.8";
|
||||||
public final static long BUILD = 5;
|
public final static long BUILD = 6;
|
||||||
public static void main(String args[]) {
|
public static void main(String args[]) {
|
||||||
System.out.println("I2P Router version: " + VERSION + "-" + BUILD);
|
System.out.println("I2P Router version: " + VERSION + "-" + BUILD);
|
||||||
System.out.println("Router ID: " + RouterVersion.ID);
|
System.out.println("Router ID: " + RouterVersion.ID);
|
||||||
|
@ -34,6 +34,7 @@ public class InboundMessageDistributor implements GarlicMessageReceiver.CloveRec
|
|||||||
_log = ctx.logManager().getLog(InboundMessageDistributor.class);
|
_log = ctx.logManager().getLog(InboundMessageDistributor.class);
|
||||||
_receiver = new GarlicMessageReceiver(ctx, this, client);
|
_receiver = new GarlicMessageReceiver(ctx, this, client);
|
||||||
_context.statManager().createRateStat("tunnel.dropDangerousClientTunnelMessage", "How many tunnel messages come down a client tunnel that we shouldn't expect (lifetime is the 'I2NP type')", "Tunnels", new long[] { 10*60*1000, 60*60*1000 });
|
_context.statManager().createRateStat("tunnel.dropDangerousClientTunnelMessage", "How many tunnel messages come down a client tunnel that we shouldn't expect (lifetime is the 'I2NP type')", "Tunnels", new long[] { 10*60*1000, 60*60*1000 });
|
||||||
|
_context.statManager().createRateStat("tunnel.handleLoadClove", "When do we receive load test cloves", "Tunnels", new long[] { 60*1000, 10*60*1000, 60*60*1000 });
|
||||||
}
|
}
|
||||||
|
|
||||||
public void distribute(I2NPMessage msg, Hash target) {
|
public void distribute(I2NPMessage msg, Hash target) {
|
||||||
@ -65,6 +66,8 @@ public class InboundMessageDistributor implements GarlicMessageReceiver.CloveRec
|
|||||||
// targetting us either implicitly (no target) or explicitly (no tunnel)
|
// targetting us either implicitly (no target) or explicitly (no tunnel)
|
||||||
// make sure we don't honor any remote requests directly (garlic instructions, etc)
|
// make sure we don't honor any remote requests directly (garlic instructions, etc)
|
||||||
if (msg.getType() == GarlicMessage.MESSAGE_TYPE) {
|
if (msg.getType() == GarlicMessage.MESSAGE_TYPE) {
|
||||||
|
// in case we're looking for replies to a garlic message (cough load tests cough)
|
||||||
|
_context.inNetMessagePool().handleReplies(msg);
|
||||||
if (_log.shouldLog(Log.DEBUG))
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
_log.debug("received garlic message in the tunnel, parse it out");
|
_log.debug("received garlic message in the tunnel, parse it out");
|
||||||
_receiver.receive((GarlicMessage)msg);
|
_receiver.receive((GarlicMessage)msg);
|
||||||
@ -149,6 +152,11 @@ public class InboundMessageDistributor implements GarlicMessageReceiver.CloveRec
|
|||||||
if (_log.shouldLog(Log.WARN))
|
if (_log.shouldLog(Log.WARN))
|
||||||
_log.warn("Bad store attempt", iae);
|
_log.warn("Bad store attempt", iae);
|
||||||
}
|
}
|
||||||
|
} else if (data instanceof DataMessage) {
|
||||||
|
// a data message targetting the local router is how we send load tests (real
|
||||||
|
// data messages target destinations)
|
||||||
|
_context.statManager().addRateData("tunnel.handleLoadClove", 1, 0);
|
||||||
|
_context.inNetMessagePool().add(data, null, null);
|
||||||
} else {
|
} else {
|
||||||
if ( (_client != null) && (data.getType() != DeliveryStatusMessage.MESSAGE_TYPE) ) {
|
if ( (_client != null) && (data.getType() != DeliveryStatusMessage.MESSAGE_TYPE) ) {
|
||||||
// drop it, since the data we receive shouldn't include other stuff,
|
// drop it, since the data we receive shouldn't include other stuff,
|
||||||
|
@ -16,8 +16,10 @@ class ClientPeerSelector extends TunnelPeerSelector {
|
|||||||
return null;
|
return null;
|
||||||
HashSet matches = new HashSet(length);
|
HashSet matches = new HashSet(length);
|
||||||
|
|
||||||
if (shouldSelectExplicit(settings))
|
if (length > 0) {
|
||||||
return selectExplicit(ctx, settings, length);
|
if (shouldSelectExplicit(settings))
|
||||||
|
return selectExplicit(ctx, settings, length);
|
||||||
|
}
|
||||||
|
|
||||||
Set exclude = getExclude(ctx, settings.isInbound(), settings.isExploratory());
|
Set exclude = getExclude(ctx, settings.isInbound(), settings.isExploratory());
|
||||||
ctx.profileOrganizer().selectFastPeers(length, exclude, matches);
|
ctx.profileOrganizer().selectFastPeers(length, exclude, matches);
|
||||||
@ -31,5 +33,4 @@ class ClientPeerSelector extends TunnelPeerSelector {
|
|||||||
rv.add(ctx.routerHash());
|
rv.add(ctx.routerHash());
|
||||||
return rv;
|
return rv;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -20,7 +20,7 @@ class ExploratoryPeerSelector extends TunnelPeerSelector {
|
|||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (shouldSelectExplicit(settings)) {
|
if (false && shouldSelectExplicit(settings)) {
|
||||||
List rv = selectExplicit(ctx, settings, length);
|
List rv = selectExplicit(ctx, settings, length);
|
||||||
if (l.shouldLog(Log.DEBUG))
|
if (l.shouldLog(Log.DEBUG))
|
||||||
l.debug("Explicit peers selected: " + rv);
|
l.debug("Explicit peers selected: " + rv);
|
||||||
|
@ -29,7 +29,7 @@ class OnCreatedJob extends JobImpl {
|
|||||||
getContext().tunnelDispatcher().joinOutbound(_cfg);
|
getContext().tunnelDispatcher().joinOutbound(_cfg);
|
||||||
}
|
}
|
||||||
|
|
||||||
_pool.getManager().buildComplete();
|
_pool.getManager().buildComplete(_cfg);
|
||||||
_pool.addTunnel(_cfg);
|
_pool.addTunnel(_cfg);
|
||||||
TestJob testJob = (_cfg.getLength() > 1 ? new TestJob(getContext(), _cfg, _pool) : null);
|
TestJob testJob = (_cfg.getLength() > 1 ? new TestJob(getContext(), _cfg, _pool) : null);
|
||||||
RebuildJob rebuildJob = new RebuildJob(getContext(), _cfg, _pool);
|
RebuildJob rebuildJob = new RebuildJob(getContext(), _cfg, _pool);
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package net.i2p.router.tunnel.pool;
|
package net.i2p.router.tunnel.pool;
|
||||||
|
|
||||||
import java.util.*;
|
import java.util.*;
|
||||||
|
import net.i2p.I2PAppContext;
|
||||||
import net.i2p.data.DataFormatException;
|
import net.i2p.data.DataFormatException;
|
||||||
import net.i2p.data.Hash;
|
import net.i2p.data.Hash;
|
||||||
import net.i2p.router.Router;
|
import net.i2p.router.Router;
|
||||||
@ -62,9 +63,12 @@ abstract class TunnelPeerSelector {
|
|||||||
}
|
}
|
||||||
|
|
||||||
protected boolean shouldSelectExplicit(TunnelPoolSettings settings) {
|
protected boolean shouldSelectExplicit(TunnelPoolSettings settings) {
|
||||||
|
if (settings.isExploratory()) return false;
|
||||||
Properties opts = settings.getUnknownOptions();
|
Properties opts = settings.getUnknownOptions();
|
||||||
if (opts != null) {
|
if (opts != null) {
|
||||||
String peers = opts.getProperty("explicitPeers");
|
String peers = opts.getProperty("explicitPeers");
|
||||||
|
if (peers == null)
|
||||||
|
peers = I2PAppContext.getGlobalContext().getProperty("explicitPeers");
|
||||||
if (peers != null)
|
if (peers != null)
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -77,6 +81,9 @@ abstract class TunnelPeerSelector {
|
|||||||
if (opts != null)
|
if (opts != null)
|
||||||
peers = opts.getProperty("explicitPeers");
|
peers = opts.getProperty("explicitPeers");
|
||||||
|
|
||||||
|
if (peers == null)
|
||||||
|
peers = I2PAppContext.getGlobalContext().getProperty("explicitPeers");
|
||||||
|
|
||||||
Log log = ctx.logManager().getLog(ClientPeerSelector.class);
|
Log log = ctx.logManager().getLog(ClientPeerSelector.class);
|
||||||
List rv = new ArrayList();
|
List rv = new ArrayList();
|
||||||
StringTokenizer tok = new StringTokenizer(peers, ",");
|
StringTokenizer tok = new StringTokenizer(peers, ",");
|
||||||
@ -90,8 +97,8 @@ abstract class TunnelPeerSelector {
|
|||||||
if (ctx.profileOrganizer().isSelectable(peer)) {
|
if (ctx.profileOrganizer().isSelectable(peer)) {
|
||||||
rv.add(peer);
|
rv.add(peer);
|
||||||
} else {
|
} else {
|
||||||
if (log.shouldLog(Log.WARN))
|
if (log.shouldLog(Log.DEBUG))
|
||||||
log.warn("Explicit peer is not selectable: " + peerStr);
|
log.debug("Explicit peer is not selectable: " + peerStr);
|
||||||
}
|
}
|
||||||
} catch (DataFormatException dfe) {
|
} catch (DataFormatException dfe) {
|
||||||
if (log.shouldLog(Log.ERROR))
|
if (log.shouldLog(Log.ERROR))
|
||||||
@ -99,19 +106,34 @@ abstract class TunnelPeerSelector {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int sz = rv.size();
|
||||||
Collections.shuffle(rv, ctx.random());
|
Collections.shuffle(rv, ctx.random());
|
||||||
|
|
||||||
|
|
||||||
while (rv.size() > length)
|
while (rv.size() > length)
|
||||||
rv.remove(0);
|
rv.remove(0);
|
||||||
|
|
||||||
|
if (log.shouldLog(Log.INFO)) {
|
||||||
|
StringBuffer buf = new StringBuffer();
|
||||||
|
if (settings.getDestinationNickname() != null)
|
||||||
|
buf.append("peers for ").append(settings.getDestinationNickname());
|
||||||
|
else if (settings.getDestination() != null)
|
||||||
|
buf.append("peers for ").append(settings.getDestination().toBase64());
|
||||||
|
else
|
||||||
|
buf.append("peers for exploratory ");
|
||||||
|
if (settings.isInbound())
|
||||||
|
buf.append(" inbound");
|
||||||
|
else
|
||||||
|
buf.append(" outbound");
|
||||||
|
buf.append(" peers: ").append(rv);
|
||||||
|
buf.append(", out of ").append(sz).append(" (not including self)");
|
||||||
|
log.info(buf.toString());
|
||||||
|
}
|
||||||
|
|
||||||
if (settings.isInbound())
|
if (settings.isInbound())
|
||||||
rv.add(0, ctx.routerHash());
|
rv.add(0, ctx.routerHash());
|
||||||
else
|
else
|
||||||
rv.add(ctx.routerHash());
|
rv.add(ctx.routerHash());
|
||||||
|
|
||||||
if (log.shouldLog(Log.INFO))
|
|
||||||
log.info(toString() + ": Selecting peers explicitly: " + rv);
|
|
||||||
return rv;
|
return rv;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -17,11 +17,13 @@ import net.i2p.stat.RateStat;
|
|||||||
import net.i2p.router.ClientTunnelSettings;
|
import net.i2p.router.ClientTunnelSettings;
|
||||||
import net.i2p.router.HandlerJobBuilder;
|
import net.i2p.router.HandlerJobBuilder;
|
||||||
import net.i2p.router.JobImpl;
|
import net.i2p.router.JobImpl;
|
||||||
|
import net.i2p.router.LoadTestManager;
|
||||||
import net.i2p.router.RouterContext;
|
import net.i2p.router.RouterContext;
|
||||||
import net.i2p.router.TunnelInfo;
|
import net.i2p.router.TunnelInfo;
|
||||||
import net.i2p.router.TunnelManagerFacade;
|
import net.i2p.router.TunnelManagerFacade;
|
||||||
import net.i2p.router.TunnelPoolSettings;
|
import net.i2p.router.TunnelPoolSettings;
|
||||||
import net.i2p.router.tunnel.HopConfig;
|
import net.i2p.router.tunnel.HopConfig;
|
||||||
|
import net.i2p.router.tunnel.TunnelCreatorConfig;
|
||||||
import net.i2p.util.Log;
|
import net.i2p.util.Log;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -40,6 +42,7 @@ public class TunnelPoolManager implements TunnelManagerFacade {
|
|||||||
private int _outstandingBuilds;
|
private int _outstandingBuilds;
|
||||||
/** max # of concurrent build requests */
|
/** max # of concurrent build requests */
|
||||||
private int _maxOutstandingBuilds;
|
private int _maxOutstandingBuilds;
|
||||||
|
private LoadTestManager _loadTestManager;
|
||||||
|
|
||||||
private static final String PROP_MAX_OUTSTANDING_BUILDS = "router.tunnel.maxConcurrentBuilds";
|
private static final String PROP_MAX_OUTSTANDING_BUILDS = "router.tunnel.maxConcurrentBuilds";
|
||||||
private static final int DEFAULT_MAX_OUTSTANDING_BUILDS = 20;
|
private static final int DEFAULT_MAX_OUTSTANDING_BUILDS = 20;
|
||||||
@ -70,6 +73,8 @@ public class TunnelPoolManager implements TunnelManagerFacade {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
_loadTestManager = new LoadTestManager(_context);
|
||||||
|
|
||||||
ctx.statManager().createRateStat("tunnel.testSuccessTime",
|
ctx.statManager().createRateStat("tunnel.testSuccessTime",
|
||||||
"How long do successful tunnel tests take?", "Tunnels",
|
"How long do successful tunnel tests take?", "Tunnels",
|
||||||
new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||||
@ -139,6 +144,10 @@ public class TunnelPoolManager implements TunnelManagerFacade {
|
|||||||
return info;
|
return info;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
info = _inboundExploratory.getTunnel(id);
|
||||||
|
if (info != null) return info;
|
||||||
|
info = _outboundExploratory.getTunnel(id);
|
||||||
|
if (info != null) return info;
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -332,6 +341,10 @@ public class TunnelPoolManager implements TunnelManagerFacade {
|
|||||||
return rv.booleanValue();
|
return rv.booleanValue();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void buildComplete(TunnelCreatorConfig cfg) {
|
||||||
|
buildComplete();
|
||||||
|
_loadTestManager.addTunnelTestCandidate(cfg);
|
||||||
|
}
|
||||||
void buildComplete() {
|
void buildComplete() {
|
||||||
synchronized (this) {
|
synchronized (this) {
|
||||||
if (_outstandingBuilds > 0)
|
if (_outstandingBuilds > 0)
|
||||||
@ -339,6 +352,9 @@ public class TunnelPoolManager implements TunnelManagerFacade {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
private static final String PROP_LOAD_TEST = "router.loadTest";
|
||||||
|
|
||||||
public void startup() {
|
public void startup() {
|
||||||
TunnelBuilder builder = new TunnelBuilder();
|
TunnelBuilder builder = new TunnelBuilder();
|
||||||
ExploratoryPeerSelector selector = new ExploratoryPeerSelector();
|
ExploratoryPeerSelector selector = new ExploratoryPeerSelector();
|
||||||
@ -359,6 +375,10 @@ public class TunnelPoolManager implements TunnelManagerFacade {
|
|||||||
// try to build up longer tunnels
|
// try to build up longer tunnels
|
||||||
_context.jobQueue().addJob(new BootstrapPool(_context, _inboundExploratory));
|
_context.jobQueue().addJob(new BootstrapPool(_context, _inboundExploratory));
|
||||||
_context.jobQueue().addJob(new BootstrapPool(_context, _outboundExploratory));
|
_context.jobQueue().addJob(new BootstrapPool(_context, _outboundExploratory));
|
||||||
|
|
||||||
|
if (Boolean.valueOf(_context.getProperty(PROP_LOAD_TEST, "true")).booleanValue()) {
|
||||||
|
_context.jobQueue().addJob(_loadTestManager.getTestJob());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private class BootstrapPool extends JobImpl {
|
private class BootstrapPool extends JobImpl {
|
||||||
|
Reference in New Issue
Block a user