merge of '25ef664ae94fb608b457b601780f6774ede7b39a'

and 'c71ada3c23327bd4f17070d019a448c0289bae63'
This commit is contained in:
z3d
2009-07-04 02:52:25 +00:00
141 changed files with 4124 additions and 1181 deletions

View File

@ -166,6 +166,8 @@ public class Blocklist {
*/
private void readBlocklistFile(String file) {
File BLFile = new File(file);
if (!BLFile.isAbsolute())
BLFile = new File(_context.getConfigDir(), file);
if (BLFile == null || (!BLFile.exists()) || BLFile.length() <= 0) {
if (_log.shouldLog(Log.WARN))
_log.warn("Blocklist file not found: " + file);
@ -701,6 +703,8 @@ public class Blocklist {
private synchronized void shitlistForever(Hash peer) {
String file = _context.getProperty(PROP_BLOCKLIST_FILE, BLOCKLIST_FILE_DEFAULT);
File BLFile = new File(file);
if (!BLFile.isAbsolute())
BLFile = new File(_context.getConfigDir(), file);
if (BLFile == null || (!BLFile.exists()) || BLFile.length() <= 0) {
if (_log.shouldLog(Log.ERROR))
_log.error("Blocklist file not found: " + file);

View File

@ -36,6 +36,7 @@ public abstract class CommSystemFacade implements Service {
public int countActiveSendPeers() { return 0; }
public boolean haveInboundCapacity() { return true; }
public boolean haveOutboundCapacity() { return true; }
public boolean haveHighOutboundCapacity() { return true; }
public List getMostRecentErrorMessages() { return Collections.EMPTY_LIST; }
/**
@ -62,6 +63,7 @@ public abstract class CommSystemFacade implements Service {
public byte[] getIP(Hash dest) { return null; }
public void queueLookup(byte[] ip) {}
public String getCountry(Hash peer) { return null; }
public String getCountryName(String code) { return code; }
public String renderPeerHTML(Hash peer) { return null; }
/**

View File

@ -669,9 +669,9 @@ public class JobQueue {
/** render the HTML for the job stats */
private void getJobStats(StringBuffer buf) {
buf.append("<table border=\"1\">\n");
buf.append("<tr><td><b>Job</b></td><td><b>Runs</b></td>");
buf.append("<td><b>Time</b></td><td><b><i>Avg</i></b></td><td><b><i>Max</i></b></td><td><b><i>Min</i></b></td>");
buf.append("<td><b>Pending</b></td><td><b><i>Avg</i></b></td><td><b><i>Max</i></b></td><td><b><i>Min</i></b></td></tr>\n");
buf.append("<tr><th>Job</th><th>Runs</th>");
buf.append("<th>Time</th><th><i>Avg</i></th><th><i>Max</i></th><th><i>Min</i></th>");
buf.append("<th>Pending</th><th><i>Avg</i></th><th><i>Max</i></th><th><i>Min</i></th></tr>\n");
long totRuns = 0;
long totExecTime = 0;
long avgExecTime = 0;

View File

@ -147,10 +147,8 @@ public class KeyManager {
super(KeyManager.this._context);
}
public void runJob() {
String keyDir = getContext().getProperty(PROP_KEYDIR);
if (keyDir == null)
keyDir = DEFAULT_KEYDIR;
File dir = new File(keyDir);
String keyDir = getContext().getProperty(PROP_KEYDIR, DEFAULT_KEYDIR);
File dir = new File(getContext().getRouterDir(), keyDir);
if (!dir.exists())
dir.mkdirs();
if (dir.exists() && dir.isDirectory() && dir.canRead() && dir.canWrite()) {

View File

@ -58,6 +58,8 @@ public abstract class NetworkDatabaseFacade implements Service {
public abstract Set<Hash> getAllRouters();
public int getKnownRouters() { return 0; }
public int getKnownLeaseSets() { return 0; }
public boolean isInitialized() { return true; }
public void rescan() {}
public void renderRouterInfoHTML(Writer out, String s) throws IOException {}
public void renderLeaseSetHTML(Writer out) throws IOException {}
public void renderStatusHTML(Writer out, boolean b) throws IOException {}

View File

@ -44,6 +44,7 @@ import net.i2p.util.I2PThread;
import net.i2p.util.Log;
import net.i2p.util.SimpleScheduler;
import net.i2p.util.SimpleTimer;
import net.i2p.util.WorkingDir;
/**
* Main driver for the router.
@ -53,6 +54,7 @@ public class Router {
private Log _log;
private RouterContext _context;
private final Properties _config;
/** full path */
private String _configFilename;
private RouterInfo _routerInfo;
private long _started;
@ -104,14 +106,6 @@ public class Router {
public Router(Properties envProps) { this(null, envProps); }
public Router(String configFilename) { this(configFilename, null); }
public Router(String configFilename, Properties envProps) {
if (!beginMarkingLiveliness(envProps)) {
System.err.println("ERROR: There appears to be another router already running!");
System.err.println(" Please make sure to shut down old instances before starting up");
System.err.println(" a new one. If you are positive that no other instance is running,");
System.err.println(" please delete the file " + getPingFile(envProps));
System.exit(-1);
}
_gracefulExitCode = -1;
_config = new Properties();
@ -125,7 +119,35 @@ public class Router {
_configFilename = configFilename;
}
// we need the user directory figured out by now, so figure it out here rather than
// in the RouterContext() constructor.
//
// Fixme read config file before migration? or before? or both?
//
// Then add it to envProps (but not _config, we don't want it in the router.config file)
// where it will then be available to all via _context.dir()
//
// This call also migrates all files to the new working directory,
// including router.config
//
// Do we copy all the data files to the new directory? default false
String migrate = System.getProperty("i2p.dir.migrate");
boolean migrateFiles = Boolean.valueOf(migrate).booleanValue();
String userDir = WorkingDir.getWorkingDir(migrateFiles);
// Use the router.config file specified in the router.configLocation property
// (default "router.config"),
// if it is an abolute path, otherwise look in the userDir returned by getWorkingDir
// replace relative path with absolute
File cf = new File(_configFilename);
if (!cf.isAbsolute()) {
cf = new File(userDir, _configFilename);
_configFilename = cf.getAbsolutePath();
}
readConfig();
if (envProps == null) {
envProps = _config;
} else {
@ -135,11 +157,42 @@ public class Router {
envProps.setProperty(k, v);
}
}
// This doesn't work, guess it has to be in the static block above?
// if (Boolean.valueOf(envProps.getProperty("router.disableIPv6")).booleanValue())
// System.setProperty("java.net.preferIPv4Stack", "true");
if (envProps.getProperty("i2p.dir.config") == null)
envProps.setProperty("i2p.dir.config", userDir);
// The important thing that happens here is the directory paths are set and created
// i2p.dir.router defaults to i2p.dir.config
// i2p.dir.app defaults to i2p.dir.router
// i2p.dir.log defaults to i2p.dir.router
// i2p.dir.pid defaults to i2p.dir.router
// i2p.dir.base defaults to user.dir == $CWD
_context = new RouterContext(this, envProps);
// This is here so that we can get the directory location from the context
// for the ping file
if (!beginMarkingLiveliness()) {
System.err.println("ERROR: There appears to be another router already running!");
System.err.println(" Please make sure to shut down old instances before starting up");
System.err.println(" a new one. If you are positive that no other instance is running,");
System.err.println(" please delete the file " + getPingFile().getAbsolutePath());
System.exit(-1);
}
// This is here so that we can get the directory location from the context
// for the zip file and the base location to unzip to.
// If it does an update, it never returns.
// I guess it's better to have the other-router check above this, we don't want to
// overwrite an existing running router's jar files. Other than ours.
installUpdates();
// NOW we start all the activity
_context.initAll();
_routerInfo = null;
_higherVersionSeen = false;
_log = _context.logManager().getLog(Router.class);
@ -245,6 +298,7 @@ public class Router {
_context.keyManager().startup();
// why are we reading this again, it's read in the constructor
readConfig();
setupHandlers();
@ -285,6 +339,7 @@ public class Router {
}
}
/** this does not use ctx.getConfigDir(), must provide a full path in filename */
private static Properties getConfig(RouterContext ctx, String filename) {
Log log = null;
if (ctx != null) {
@ -456,11 +511,11 @@ public class Router {
};
static final String IDENTLOG = "identlog.txt";
public static void killKeys() {
public void killKeys() {
new Exception("Clearing identity files").printStackTrace();
int remCount = 0;
for (int i = 0; i < _rebuildFiles.length; i++) {
File f = new File(_rebuildFiles[i]);
File f = new File(_context.getRouterDir(),_rebuildFiles[i]);
if (f.exists()) {
boolean removed = f.delete();
if (removed) {
@ -474,7 +529,7 @@ public class Router {
if (remCount > 0) {
FileOutputStream log = null;
try {
log = new FileOutputStream(IDENTLOG, true);
log = new FileOutputStream(new File(_context.getRouterDir(), IDENTLOG), true);
log.write((new Date() + ": Old router identity keys cleared\n").getBytes());
} catch (IOException ioe) {
// ignore
@ -814,8 +869,9 @@ public class Router {
try { _context.messageValidator().shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the message validator", t); }
try { _context.inNetMessagePool().shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the inbound net pool", t); }
//try { _sessionKeyPersistenceHelper.shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the session key manager", t); }
_context.deleteTempDir();
RouterContext.listContexts().remove(_context);
dumpStats();
//dumpStats();
finalShutdown(exitCode);
}
@ -833,7 +889,7 @@ public class Router {
killKeys();
}
File f = new File(getPingFile());
File f = getPingFile();
f.delete();
if (_killVMOnEnd) {
try { Thread.sleep(1000); } catch (InterruptedException ie) {}
@ -1003,8 +1059,9 @@ public class Router {
public static void main(String args[]) {
System.out.println("Starting I2P " + RouterVersion.FULL_VERSION);
installUpdates();
verifyWrapperConfig();
// installUpdates() moved to constructor so we can get file locations from the context
// installUpdates();
//verifyWrapperConfig();
Router r = new Router();
if ( (args != null) && (args.length == 1) && ("rebuild".equals(args[0])) ) {
r.rebuildNewIdentity();
@ -1015,19 +1072,51 @@ public class Router {
public static final String UPDATE_FILE = "i2pupdate.zip";
private static void installUpdates() {
File updateFile = new File(UPDATE_FILE);
if (updateFile.exists()) {
/**
* Unzip update file found in the router dir OR base dir, to the base dir
*
* If we can't write to the base dir, complain.
* Note: _log not available here.
*/
private void installUpdates() {
File updateFile = new File(_context.getRouterDir(), UPDATE_FILE);
boolean exists = updateFile.exists();
if (!exists) {
updateFile = new File(_context.getBaseDir(), UPDATE_FILE);
exists = updateFile.exists();
}
if (exists) {
// do a simple permissions test, if it fails leave the file in place and don't restart
File test = new File(_context.getBaseDir(), "history.txt");
if ((test.exists() && !test.canWrite()) || (!_context.getBaseDir().canWrite())) {
System.out.println("ERROR: No write permissions on " + _context.getBaseDir() +
" to extract software update file");
// carry on
return;
}
System.out.println("INFO: Update file exists [" + UPDATE_FILE + "] - installing");
boolean ok = FileUtil.extractZip(updateFile, new File("."));
boolean ok = FileUtil.extractZip(updateFile, _context.getBaseDir());
if (ok)
System.out.println("INFO: Update installed");
else
System.out.println("ERROR: Update failed!");
boolean deleted = updateFile.delete();
if (!deleted) {
System.out.println("ERROR: Unable to delete the update file!");
updateFile.deleteOnExit();
if (!ok) {
// we can't leave the file in place or we'll continually restart, so rename it
File bad = new File(_context.getRouterDir(), "BAD-" + UPDATE_FILE);
boolean renamed = updateFile.renameTo(bad);
if (renamed) {
System.out.println("Moved update file to " + bad.getAbsolutePath());
} else {
System.out.println("Deleting file " + updateFile.getAbsolutePath());
ok = true; // so it will be deleted
}
}
if (ok) {
boolean deleted = updateFile.delete();
if (!deleted) {
System.out.println("ERROR: Unable to delete the update file!");
updateFile.deleteOnExit();
}
}
if (System.getProperty("wrapper.version") != null)
System.out.println("INFO: Restarting after update");
@ -1037,6 +1126,7 @@ public class Router {
}
}
/*******
private static void verifyWrapperConfig() {
File cfgUpdated = new File("wrapper.config.updated");
if (cfgUpdated.exists()) {
@ -1046,15 +1136,22 @@ public class Router {
System.exit(EXIT_HARD);
}
}
*******/
/*
private static String getPingFile(Properties envProps) {
if (envProps != null)
return envProps.getProperty("router.pingFile", "router.ping");
else
return "router.ping";
}
private String getPingFile() {
return _context.getProperty("router.pingFile", "router.ping");
*/
private File getPingFile() {
String s = _context.getProperty("router.pingFile", "router.ping");
File f = new File(s);
if (!f.isAbsolute())
f = new File(_context.getPIDDir(), s);
return f;
}
static final long LIVELINESS_DELAY = 60*1000;
@ -1066,9 +1163,8 @@ public class Router {
*
* @return true if the router is the only one running
*/
private boolean beginMarkingLiveliness(Properties envProps) {
String filename = getPingFile(envProps);
File f = new File(filename);
private boolean beginMarkingLiveliness() {
File f = getPingFile();
if (f.exists()) {
long lastWritten = f.lastModified();
if (System.currentTimeMillis()-lastWritten > LIVELINESS_DELAY) {
@ -1376,15 +1472,14 @@ private static class PersistRouterInfoJob extends JobImpl {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Persisting updated router info");
String infoFilename = getContext().getProperty(Router.PROP_INFO_FILENAME);
if (infoFilename == null)
infoFilename = Router.PROP_INFO_FILENAME_DEFAULT;
String infoFilename = getContext().getProperty(PROP_INFO_FILENAME, PROP_INFO_FILENAME_DEFAULT);
File infoFile = new File(getContext().getRouterDir(), infoFilename);
RouterInfo info = getContext().router().getRouterInfo();
FileOutputStream fos = null;
try {
fos = new FileOutputStream(infoFilename);
fos = new FileOutputStream(infoFile);
info.writeBytes(fos);
} catch (DataFormatException dfe) {
_log.error("Error rebuilding the router information", dfe);

View File

@ -70,7 +70,11 @@ public class RouterContext extends I2PAppContext {
public RouterContext(Router router, Properties envProps) {
super(filterProps(envProps));
_router = router;
initAll();
// Disabled here so that the router can get a context and get the
// directory locations from it, to do an update, without having
// to init everything. Caller MUST call initAll() afterwards.
// Sorry, this breaks some main() unit tests out there.
//initAll();
_contexts.add(this);
}
/**
@ -86,7 +90,7 @@ public class RouterContext extends I2PAppContext {
envProps.setProperty("time.disabled", "false");
return envProps;
}
private void initAll() {
public void initAll() {
//_adminManager = new AdminManager(this);
if ("false".equals(getProperty("i2p.dummyClientFacade", "false")))
_clientManagerFacade = new ClientManagerFacadeImpl(this);

View File

@ -1,13 +1,39 @@
package net.i2p.router;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.PrintStream;
/**
* This is the class called by the runplain.sh script on linux
* and the i2p.exe launcher on Windows.
* (i.e. no wrapper)
*
* If there is no -Dwrapper.log=/path/to/wrapper.log on the java command line
* to specify a log file, check for existence of wrapper.log in CWD,
* for backward compatibility in old installations (don't move it).
* Otherwise, use (system temp dir)/wrapper.log.
* Create if it doesn't exist, and append to it if it does.
* Put the location in the environment as an absolute path, so logs.jsp can find it.
*/
public class RouterLaunch {
private static final String PROP_WRAPPER_LOG = "wrapper.logfile";
private static final String DEFAULT_WRAPPER_LOG = "wrapper.log";
public static void main(String args[]) {
String path = System.getProperty(PROP_WRAPPER_LOG);
File logfile;
if (path != null) {
logfile = new File(path);
} else {
logfile = new File(DEFAULT_WRAPPER_LOG);
if (!logfile.exists())
logfile = new File(System.getProperty("java.io.tmpdir"), DEFAULT_WRAPPER_LOG);
}
System.setProperty(PROP_WRAPPER_LOG, logfile.getAbsolutePath());
try {
System.setOut(new PrintStream(new FileOutputStream("wrapper.log")));
System.setOut(new PrintStream(new FileOutputStream(logfile, true)));
} catch (IOException ioe) {
ioe.printStackTrace();
}

View File

@ -18,7 +18,7 @@ public class RouterVersion {
/** deprecated */
public final static String ID = "Monotone";
public final static String VERSION = CoreVersion.VERSION;
public final static long BUILD = 1;
public final static long BUILD = 2;
/** for example "-test" */
public final static String EXTRA = "";
public final static String FULL_VERSION = VERSION + "-" + BUILD + EXTRA;

View File

@ -87,14 +87,9 @@ public class StatisticsManager implements Service {
if (_includePeerRankings) {
long publishedUptime = _context.router().getUptime();
boolean commentOutIn074 = RouterVersion.VERSION.equals("0.7.3");
// Don't publish these for first hour
if (publishedUptime > 62*60*1000) {
if (commentOutIn074)
includeThroughput(stats);
else
includeAverageThroughput(stats);
}
if (publishedUptime > 62*60*1000)
includeAverageThroughput(stats);
//includeRate("router.invalidMessageTime", stats, new long[] { 10*60*1000 });
//includeRate("router.duplicateMessageId", stats, new long[] { 24*60*60*1000 });
//includeRate("tunnel.duplicateIV", stats, new long[] { 24*60*60*1000 });
@ -144,12 +139,18 @@ public class StatisticsManager implements Service {
//includeRate("tunnel.buildRequestTime", stats, new long[] { 10*60*1000 });
long rate = 60*60*1000;
includeRate("tunnel.buildClientExpire", stats, new long[] { rate });
includeRate("tunnel.buildClientReject", stats, new long[] { rate });
includeRate("tunnel.buildClientSuccess", stats, new long[] { rate });
includeRate("tunnel.buildExploratoryExpire", stats, new long[] { rate });
includeRate("tunnel.buildExploratoryReject", stats, new long[] { rate });
includeRate("tunnel.buildExploratorySuccess", stats, new long[] { rate });
boolean commentOutIn076 = RouterVersion.VERSION.equals("0.7.5");
if (commentOutIn076) {
includeRate("tunnel.buildClientExpire", stats, new long[] { rate });
includeRate("tunnel.buildClientReject", stats, new long[] { rate });
includeRate("tunnel.buildClientSuccess", stats, new long[] { rate });
includeRate("tunnel.buildExploratoryExpire", stats, new long[] { rate });
includeRate("tunnel.buildExploratoryReject", stats, new long[] { rate });
includeRate("tunnel.buildExploratorySuccess", stats, new long[] { rate });
} else {
includeTunnelRates("Client", stats, rate);
includeTunnelRates("Exploratory", stats, rate);
}
//includeRate("tunnel.rejectTimeout", stats, new long[] { 10*60*1000 });
//includeRate("tunnel.rejectOverloaded", stats, new long[] { 10*60*1000 });
//includeRate("tunnel.acceptLoad", stats, new long[] { 10*60*1000 });
@ -228,6 +229,49 @@ public class StatisticsManager implements Service {
return buf.toString();
}
private static final String[] tunnelStats = { "Expire", "Reject", "Success" };
/**
* Add tunnel build rates with some mods to hide absolute quantities
* In particular, report counts normalized to 100 (i.e. a percentage)
*/
private void includeTunnelRates(String tunnelType, Properties stats, long selectedPeriod) {
long totalEvents = 0;
for (String tunnelStat : tunnelStats) {
String rateName = "tunnel.build" + tunnelType + tunnelStat;
RateStat stat = _context.statManager().getRate(rateName);
if (stat == null) continue;
Rate curRate = stat.getRate(selectedPeriod);
if (curRate == null) continue;
totalEvents += curRate.getLastEventCount();
}
if (totalEvents <= 0)
return;
for (String tunnelStat : tunnelStats) {
String rateName = "tunnel.build" + tunnelType + tunnelStat;
RateStat stat = _context.statManager().getRate(rateName);
if (stat == null) continue;
Rate curRate = stat.getRate(selectedPeriod);
if (curRate == null) continue;
double fudgeQuantity = 100.0d * curRate.getLastEventCount() / totalEvents;
stats.setProperty("stat_" + rateName + '.' + getPeriod(curRate), renderRate(curRate, fudgeQuantity));
}
}
private String renderRate(Rate rate, double fudgeQuantity) {
StringBuffer buf = new StringBuffer(128);
buf.append(num(rate.getAverageValue())).append(';');
buf.append(num(rate.getExtremeAverageValue())).append(';');
buf.append(pct(rate.getPercentageOfLifetimeValue())).append(';');
if (rate.getLifetimeTotalEventTime() > 0) {
// bah saturation
buf.append("0;0;0;0;");
}
long numPeriods = rate.getLifetimePeriods();
buf.append(num(fudgeQuantity)).append(';');
return buf.toString();
}
/* report the same data for tx and rx, for enhanced anonymity */
private void includeAverageThroughput(Properties stats) {
RateStat sendRate = _context.statManager().getRate("bw.sendRate");
@ -245,27 +289,6 @@ public class StatisticsManager implements Service {
stats.setProperty("stat_bandwidthReceiveBps.60m", str);
}
private void includeThroughput(Properties stats) {
RateStat sendRate = _context.statManager().getRate("bw.sendRate");
if (sendRate != null) {
if (_context.router().getUptime() > 60*60*1000) {
Rate r = sendRate.getRate(60*60*1000);
if (r != null)
stats.setProperty("stat_bandwidthSendBps.60m", num(r.getAverageValue()) + ';' + num(r.getExtremeAverageValue()) + ";0;0;");
}
}
RateStat recvRate = _context.statManager().getRate("bw.recvRate");
if (recvRate != null) {
if (_context.router().getUptime() > 60*60*1000) {
Rate r = recvRate.getRate(60*60*1000);
if (r != null)
stats.setProperty("stat_bandwidthReceiveBps.60m", num(r.getAverageValue()) + ';' + num(r.getExtremeAverageValue()) + ";0;0;");
}
}
}
private String getPeriod(Rate rate) { return DataHelper.formatDuration(rate.getPeriod()); }
private final String num(double num) {

View File

@ -43,7 +43,7 @@ public class ClientListenerRunner implements Runnable {
_running = false;
_listening = false;
String val = context.getProperty(BIND_ALL_INTERFACES, "False");
String val = context.getProperty(BIND_ALL_INTERFACES);
_bindAllInterfaces = Boolean.valueOf(val).booleanValue();
}

View File

@ -49,18 +49,8 @@ public class ClientManagerFacadeImpl extends ClientManagerFacade {
public void startup() {
_log.info("Starting up the client subsystem");
String portStr = _context.router().getConfigSetting(PROP_CLIENT_PORT);
if (portStr != null) {
try {
int port = Integer.parseInt(portStr);
_manager = new ClientManager(_context, port);
} catch (NumberFormatException nfe) {
_log.error("Error setting the port: " + portStr + " is not valid", nfe);
_manager = new ClientManager(_context, DEFAULT_PORT);
}
} else {
_manager = new ClientManager(_context, DEFAULT_PORT);
}
int port = _context.getProperty(PROP_CLIENT_PORT, DEFAULT_PORT);
_manager = new ClientManager(_context, port);
}
public void shutdown() {

View File

@ -14,6 +14,7 @@ import net.i2p.data.DataStructure;
import net.i2p.data.Hash;
public interface DataStore {
public boolean isInitialized();
public boolean isKnown(Hash key);
public DataStructure get(Hash key);
public DataStructure get(Hash key, boolean persist);
@ -24,6 +25,7 @@ public interface DataStore {
public Set getKeys();
public void stop();
public void restart();
public void rescan();
public int countLeaseSets();
}

View File

@ -10,12 +10,15 @@ package net.i2p.router.networkdb.kademlia;
import java.io.IOException;
import java.io.Writer;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
@ -42,6 +45,7 @@ import net.i2p.router.networkdb.DatabaseStoreMessageHandler;
import net.i2p.router.networkdb.PublishLocalRouterInfoJob;
import net.i2p.router.peermanager.PeerProfile;
import net.i2p.util.Log;
import net.i2p.util.ObjectCounter;
/**
* Kademlia based version of the network database
@ -139,6 +143,11 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
context.statManager().createRateStat("netDb.exploreKeySet", "how many keys are queued for exploration?", "NetworkDatabase", new long[] { 10*60*1000 });
}
@Override
public boolean isInitialized() {
return _initialized && _ds != null && _ds.isInitialized();
}
protected PeerSelector createPeerSelector() { return new PeerSelector(_context); }
public PeerSelector getPeerSelector() { return _peerSelector; }
@ -177,7 +186,8 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
public void shutdown() {
_initialized = false;
_kb = null;
_ds.stop();
if (_ds != null)
_ds.stop();
_ds = null;
_exploreKeys.clear(); // hope this doesn't cause an explosion, it shouldn't.
// _exploreKeys = null;
@ -203,6 +213,12 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
publish(ri);
}
@Override
public void rescan() {
if (isInitialized())
_ds.rescan();
}
String getDbDir() { return _dbDir; }
public void startup() {
@ -999,8 +1015,8 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
out.write(buf.toString());
buf.setLength(0);
/* coreVersion to Map of routerVersion to Integer */
Map versions = new TreeMap();
ObjectCounter<String> versions = new ObjectCounter();
ObjectCounter<String> countries = new ObjectCounter();
Set routers = new TreeSet(new RouterInfoComparator());
routers.addAll(getRouters());
@ -1012,40 +1028,47 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
renderRouterInfo(buf, ri, false, full);
out.write(buf.toString());
buf.setLength(0);
String coreVersion = ri.getOption("coreVersion");
String routerVersion = ri.getOption("router.version");
if ( (coreVersion != null) && (routerVersion != null) ) {
Map routerVersions = (Map)versions.get(coreVersion);
if (routerVersions == null) {
routerVersions = new TreeMap();
versions.put(coreVersion, routerVersions);
}
Integer val = (Integer)routerVersions.get(routerVersion);
if (val == null)
routerVersions.put(routerVersion, Integer.valueOf(1));
else
routerVersions.put(routerVersion, Integer.valueOf(val.intValue() + 1));
}
if (routerVersion != null)
versions.increment(routerVersion);
String country = _context.commSystem().getCountry(key);
if(country != null)
countries.increment(country);
}
}
if (versions.size() > 0) {
buf.append("<table border=\"0\" cellspacing=\"30\"><tr><td valign=\"top\">");
List<String> versionList = new ArrayList(versions.objects());
if (versionList.size() > 0) {
Collections.sort(versionList, Collections.reverseOrder());
buf.append("<table border=\"1\">\n");
buf.append("<tr><td><b>Core version</b></td><td><b>Router version</b></td><td><b>Number</b></td></tr>\n");
for (Iterator iter = versions.entrySet().iterator(); iter.hasNext(); ) {
Map.Entry entry = (Map.Entry)iter.next();
String coreVersion = (String)entry.getKey();
Map routerVersions = (Map)entry.getValue();
for (Iterator routerIter = routerVersions.keySet().iterator(); routerIter.hasNext(); ) {
String routerVersion = (String)routerIter.next();
Integer num = (Integer)routerVersions.get(routerVersion);
buf.append("<tr><td>").append(DataHelper.stripHTML(coreVersion));
buf.append("</td><td>").append(DataHelper.stripHTML(routerVersion));
buf.append("</td><td>").append(num.intValue()).append("</td></tr>\n");
}
buf.append("<tr><th>Version</th><th>Count</th></tr>\n");
for (String routerVersion : versionList) {
int num = versions.count(routerVersion);
buf.append("<tr><td>").append(DataHelper.stripHTML(routerVersion));
buf.append("</td><td align=\"right\">").append(num).append("</td></tr>\n");
}
buf.append("</table>\n");
}
buf.append("</td><td valign=\"top\">");
out.write(buf.toString());
buf.setLength(0);
List<String> countryList = new ArrayList(countries.objects());
if (countryList.size() > 0) {
Collections.sort(countryList);
buf.append("<table border=\"1\">\n");
buf.append("<tr><th>Country</th><th>Count</th></tr>\n");
for (String country : countryList) {
int num = countries.count(country);
buf.append("<tr><td><img alt=\"").append(country.toUpperCase()).append("\"");
buf.append(" src=\"/flags.jsp?c=").append(country).append("\"> ");
buf.append(_context.commSystem().getCountryName(country));
buf.append("</td><td align=\"right\">").append(num).append("</td></tr>\n");
}
buf.append("</table>\n");
}
buf.append("</td></tr></table>");
out.write(buf.toString());
out.flush();
}

View File

@ -26,6 +26,7 @@ import net.i2p.data.RouterInfo;
import net.i2p.router.JobImpl;
import net.i2p.router.Router;
import net.i2p.router.RouterContext;
import net.i2p.router.networkdb.reseed.ReseedChecker;
import net.i2p.util.I2PThread;
import net.i2p.util.Log;
@ -39,6 +40,8 @@ class PersistentDataStore extends TransientDataStore {
private String _dbDir;
private KademliaNetworkDatabaseFacade _facade;
private Writer _writer;
private ReadJob _readJob;
private boolean _initialized;
private final static int READ_DELAY = 60*1000;
@ -47,7 +50,8 @@ class PersistentDataStore extends TransientDataStore {
_log = ctx.logManager().getLog(PersistentDataStore.class);
_dbDir = dbDir;
_facade = facade;
_context.jobQueue().addJob(new ReadJob());
_readJob = new ReadJob();
_context.jobQueue().addJob(_readJob);
ctx.statManager().createRateStat("netDb.writeClobber", "How often we clobber a pending netDb write", "NetworkDatabase", new long[] { 20*60*1000 });
ctx.statManager().createRateStat("netDb.writePending", "How many pending writes are there", "NetworkDatabase", new long[] { 60*1000 });
ctx.statManager().createRateStat("netDb.writeOut", "How many we wrote", "NetworkDatabase", new long[] { 20*60*1000 });
@ -58,7 +62,10 @@ class PersistentDataStore extends TransientDataStore {
//writer.setDaemon(true);
writer.start();
}
public boolean isInitialized() { return _initialized; }
// this doesn't stop the read job or the writer, maybe it should?
@Override
public void stop() {
super.stop();
@ -71,6 +78,11 @@ class PersistentDataStore extends TransientDataStore {
_dbDir = _facade.getDbDir();
}
public void rescan() {
if (_initialized)
_readJob.wakeup();
}
@Override
public DataStructure get(Hash key) {
return get(key, true);
@ -317,6 +329,10 @@ class PersistentDataStore extends TransientDataStore {
requeue(READ_DELAY);
}
public void wakeup() {
requeue(0);
}
private void readFiles() {
int routerCount = 0;
try {
@ -336,9 +352,10 @@ class PersistentDataStore extends TransientDataStore {
_log.error("Error reading files in the db dir", ioe);
}
if ( (routerCount <= 5) && (!_alreadyWarned) ) {
_log.error("Very few routerInfo files remaining - please reseed");
if (!_alreadyWarned) {
ReseedChecker.checkReseed(_context, routerCount);
_alreadyWarned = true;
_initialized = true;
}
}
}
@ -410,7 +427,7 @@ class PersistentDataStore extends TransientDataStore {
private File getDbDir() throws IOException {
File f = new File(_dbDir);
File f = new File(_context.getRouterDir(), _dbDir);
if (!f.exists()) {
boolean created = f.mkdirs();
if (!created)

View File

@ -35,6 +35,8 @@ class TransientDataStore implements DataStore {
_log.info("Data Store initialized");
}
public boolean isInitialized() { return true; }
public void stop() {
_data.clear();
}
@ -43,6 +45,8 @@ class TransientDataStore implements DataStore {
stop();
}
public void rescan() {}
public Set getKeys() {
return new HashSet(_data.keySet());
}

View File

@ -0,0 +1,46 @@
package net.i2p.router.networkdb.reseed;
import java.io.File;
import net.i2p.router.RouterContext;
import net.i2p.util.Log;
/**
* Moved from RouterConsoleRunner.java
*
* Reseeding is not strictly a router function, it used to be
* in the routerconsole app, but this made it impossible to
* bootstrap an embedded router lacking a routerconsole,
* in iMule or android for example, without additional modifications.
*
* Also, as this is now called from PersistentDataStore, not from the
* routerconsole, we can get started as soon as the netdb has read
* the netDb/ directory, not when the console starts,
router/java/src/net/i2p/router/networkdb/eseed/ReseedChecker.java
*/
public class ReseedChecker {
private static final int MINIMUM = 15;
public static void checkReseed(RouterContext context, int count) {
if (count >= MINIMUM)
return;
// we check the i2p installation directory for a flag telling us not to reseed,
// but also check the home directory for that flag too, since new users installing i2p
// don't have an installation directory that they can put the flag in yet.
File noReseedFile = new File(new File(System.getProperty("user.home")), ".i2pnoreseed");
File noReseedFileAlt1 = new File(new File(System.getProperty("user.home")), "noreseed.i2p");
File noReseedFileAlt2 = new File(context.getConfigDir(), ".i2pnoreseed");
File noReseedFileAlt3 = new File(context.getConfigDir(), "noreseed.i2p");
if (!noReseedFile.exists() && !noReseedFileAlt1.exists() && !noReseedFileAlt2.exists() && !noReseedFileAlt3.exists()) {
Log _log = context.logManager().getLog(ReseedChecker.class);
if (count <= 1)
_log.error("Downloading peer router information for a new I2P installation");
else
_log.error("Very few routerInfo files remaining - reseeding now");
Reseeder reseeder = new Reseeder(context);
reseeder.requestReseed();
}
}
}

View File

@ -0,0 +1,266 @@
package net.i2p.router.networkdb.reseed;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.net.URL;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import java.util.StringTokenizer;
import net.i2p.I2PAppContext;
import net.i2p.router.RouterContext;
import net.i2p.util.EepGet;
import net.i2p.util.I2PThread;
import net.i2p.util.Log;
/**
* Moved from ReseedHandler in routerconsole. See ReseedChecker for additional comments.
*
* Handler to deal with reseed requests. This will reseed from the URLs
* specified below unless the I2P configuration property "i2p.reseedURL" is
* set. It always writes to ./netDb/, so don't mess with that.
*
*/
public class Reseeder {
private static ReseedRunner _reseedRunner;
private RouterContext _context;
private Log _log;
// Reject unreasonably big files, because we download into a ByteArrayOutputStream.
private static final long MAX_RESEED_RESPONSE_SIZE = 8 * 1024 * 1024;
private static final String DEFAULT_SEED_URL = "http://i2pdb.tin0.de/netDb/,http://netdb.i2p2.de/";
public Reseeder(RouterContext ctx) {
_context = ctx;
_log = ctx.logManager().getLog(Reseeder.class);
}
public void requestReseed() {
synchronized (Reseeder.class) {
if (_reseedRunner == null)
_reseedRunner = new ReseedRunner();
if (_reseedRunner.isRunning()) {
return;
} else {
System.setProperty("net.i2p.router.web.Reseeder.reseedInProgress", "true");
I2PThread reseed = new I2PThread(_reseedRunner, "Reseed");
reseed.start();
}
}
}
public class ReseedRunner implements Runnable, EepGet.StatusListener {
private boolean _isRunning;
public ReseedRunner() {
_isRunning = false;
System.setProperty("net.i2p.router.web.Reseeder.statusMessage","Reseeding.");
}
public boolean isRunning() { return _isRunning; }
public void run() {
_isRunning = true;
System.out.println("Reseed start");
reseed(false);
System.out.println("Reseed complete");
System.setProperty("net.i2p.router.web.Reseeder.reseedInProgress", "false");
_isRunning = false;
}
// EepGet status listeners
public void attemptFailed(String url, long bytesTransferred, long bytesRemaining, int currentAttempt, int numRetries, Exception cause) {
// Since readURL() runs an EepGet with 0 retries,
// we can report errors with attemptFailed() instead of transferFailed().
// It has the benefit of providing cause of failure, which helps resolve issues.
if (_log.shouldLog(Log.ERROR)) _log.error("EepGet failed on " + url, cause);
}
public void bytesTransferred(long alreadyTransferred, int currentWrite, long bytesTransferred, long bytesRemaining, String url) {}
public void transferComplete(long alreadyTransferred, long bytesTransferred, long bytesRemaining, String url, String outputFile, boolean notModified) {}
public void transferFailed(String url, long bytesTransferred, long bytesRemaining, int currentAttempt) {}
public void headerReceived(String url, int attemptNum, String key, String val) {}
public void attempting(String url) {}
// End of EepGet status listeners
/**
* Reseed has been requested, so lets go ahead and do it. Fetch all of
* the routerInfo-*.dat files from the specified URL (or the default) and
* save them into this router's netDb dir.
*
*/
private static final String RESEED_TIPS =
"Ensure that nothing blocks outbound HTTP, check <a href=logs.jsp>logs</a> " +
"and if nothing helps, read FAQ about reseeding manually.";
private void reseed(boolean echoStatus) {
List URLList = new ArrayList();
String URLs = _context.getProperty("i2p.reseedURL", DEFAULT_SEED_URL);
StringTokenizer tok = new StringTokenizer(URLs, " ,");
while (tok.hasMoreTokens())
URLList.add(tok.nextToken().trim());
Collections.shuffle(URLList);
for (int i = 0; i < URLList.size() && _isRunning; i++)
reseedOne((String) URLList.get(i), echoStatus);
}
/**
* Fetch a directory listing and then up to 200 routerInfo files in the listing.
* The listing must contain (exactly) strings that match:
* href="routerInfo-{hash}.dat">
* OR
* HREF="routerInfo-{hash}.dat">
* and then it fetches the files
* {seedURL}routerInfo-{hash}.dat
* after appending a '/' to seedURL if it doesn't have one.
* Essentially this means that the seedURL must be a directory, it
* can't end with 'index.html', for example.
*
* Jetty directory listings are not compatible, as they look like
* HREF="/full/path/to/routerInfo-...
**/
private void reseedOne(String seedURL, boolean echoStatus) {
try {
System.setProperty("net.i2p.router.web.Reseeder.errorMessage","");
System.setProperty("net.i2p.router.web.Reseeder.statusMessage","Reseeding: fetching seed URL.");
System.err.println("Reseed from " + seedURL);
URL dir = new URL(seedURL);
byte contentRaw[] = readURL(dir);
if (contentRaw == null) {
System.setProperty("net.i2p.router.web.Reseeder.errorMessage",
"Last reseed failed fully (failed reading seed URL). " +
RESEED_TIPS);
// Logging deprecated here since attemptFailed() provides better info
_log.debug("Failed reading seed URL: " + seedURL);
return;
}
String content = new String(contentRaw);
Set urls = new HashSet();
int cur = 0;
int total = 0;
while (total++ < 1000) {
int start = content.indexOf("href=\"routerInfo-", cur);
if (start < 0) {
start = content.indexOf("HREF=\"routerInfo-", cur);
if (start < 0)
break;
}
int end = content.indexOf(".dat\">", start);
String name = content.substring(start+"href=\"routerInfo-".length(), end);
urls.add(name);
cur = end + 1;
}
if (total <= 0) {
_log.error("Read " + contentRaw.length + " bytes from seed " + seedURL + ", but found no routerInfo URLs.");
System.setProperty("net.i2p.router.web.Reseeder.errorMessage",
"Last reseed failed fully (no routerInfo URLs at seed URL). " +
RESEED_TIPS);
return;
}
List urlList = new ArrayList(urls);
Collections.shuffle(urlList);
int fetched = 0;
int errors = 0;
// 200 max from one URL
for (Iterator iter = urlList.iterator(); iter.hasNext() && fetched < 200; ) {
try {
System.setProperty("net.i2p.router.web.Reseeder.statusMessage",
"Reseeding: fetching router info from seed URL (" +
fetched + " successful, " + errors + " errors, " + total + " total).");
fetchSeed(seedURL, (String)iter.next());
fetched++;
if (echoStatus) {
System.out.print(".");
if (fetched % 60 == 0)
System.out.println();
}
} catch (Exception e) {
errors++;
}
}
System.err.println("Reseed got " + fetched + " router infos from " + seedURL);
int failPercent = 100 * errors / total;
// Less than 10% of failures is considered success,
// because some routerInfos will always fail.
if ((failPercent >= 10) && (failPercent < 90)) {
System.setProperty("net.i2p.router.web.Reseeder.errorMessage",
"Last reseed failed partly (" + failPercent + "% of " + total + "). " +
RESEED_TIPS);
}
if (failPercent >= 90) {
System.setProperty("net.i2p.router.web.Reseeder.errorMessage",
"Last reseed failed (" + failPercent + "% of " + total + "). " +
RESEED_TIPS);
}
if (fetched > 0)
_context.netDb().rescan();
// Don't go on to the next URL if we have enough
if (fetched >= 100)
_isRunning = false;
} catch (Throwable t) {
System.setProperty("net.i2p.router.web.Reseeder.errorMessage",
"Last reseed failed fully (exception caught). " +
RESEED_TIPS);
_log.error("Error reseeding", t);
}
}
/* Since we don't return a value, we should always throw an exception if something fails. */
private void fetchSeed(String seedURL, String peer) throws Exception {
URL url = new URL(seedURL + (seedURL.endsWith("/") ? "" : "/") + "routerInfo-" + peer + ".dat");
byte data[] = readURL(url);
if (data == null) {
// Logging deprecated here since attemptFailed() provides better info
_log.debug("Failed fetching seed: " + url.toString());
throw new Exception ("Failed fetching seed.");
}
//System.out.println("read: " + (data != null ? data.length : -1));
writeSeed(peer, data);
}
private byte[] readURL(URL url) throws Exception {
ByteArrayOutputStream baos = new ByteArrayOutputStream(4*1024);
// Do a non-proxied eepget into our ByteArrayOutputStream with 0 retries
EepGet get = new EepGet( I2PAppContext.getGlobalContext(), false, null, -1, 0, 0, MAX_RESEED_RESPONSE_SIZE,
null, baos, url.toString(), false, null, null);
get.addStatusListener(ReseedRunner.this);
if (get.fetch()) return baos.toByteArray(); else return null;
}
private void writeSeed(String name, byte data[]) throws Exception {
String dirName = "netDb"; // _context.getProperty("router.networkDatabase.dbDir", "netDb");
File netDbDir = new File(_context.getRouterDir(), dirName);
if (!netDbDir.exists()) {
boolean ok = netDbDir.mkdirs();
}
FileOutputStream fos = new FileOutputStream(new File(netDbDir, "routerInfo-" + name + ".dat"));
fos.write(data);
fos.close();
}
}
/******
public static void main(String args[]) {
if ( (args != null) && (args.length == 1) && (!Boolean.valueOf(args[0]).booleanValue()) ) {
System.out.println("Not reseeding, as requested");
return; // not reseeding on request
}
System.out.println("Reseeding");
Reseeder reseedHandler = new Reseeder();
reseedHandler.requestReseed();
}
******/
}

View File

@ -116,7 +116,14 @@ class PeerManager {
case PeerSelectionCriteria.PURPOSE_TEST:
// for now, the peers we test will be the reliable ones
//_organizer.selectWellIntegratedPeers(criteria.getMinimumRequired(), exclude, curVals);
_organizer.selectNotFailingPeers(criteria.getMinimumRequired(), exclude, peers);
// The PeerTestJob does only run every 5 minutes, but
// this was helping drive us to connection limits, let's leave the exploration
// to the ExploratoryPeerSelector, which will restrict to connected peers
// when we get close to the limit. So let's stick with connected peers here.
// Todo: what's the point of the PeerTestJob anyway?
//_organizer.selectNotFailingPeers(criteria.getMinimumRequired(), exclude, peers);
_organizer.selectActiveNotFailingPeers(criteria.getMinimumRequired(), exclude, peers);
break;
case PeerSelectionCriteria.PURPOSE_TUNNEL:
// pull all of the fast ones, regardless of how many we

View File

@ -316,8 +316,8 @@ public class ProfileOrganizer {
} finally { releaseReadLock(); }
if (matches.size() < howMany) {
if (_log.shouldLog(Log.INFO))
_log.info("selectHighCap("+howMany+"), not enough fast (" + matches.size() + ") going on to notFailing");
selectNotFailingPeers(howMany, exclude, matches, mask);
_log.info("selectHighCap("+howMany+"), not enough highcap (" + matches.size() + ") going on to ANFP2");
selectActiveNotFailingPeers2(howMany, exclude, matches, mask);
} else {
if (_log.shouldLog(Log.INFO))
_log.info("selectHighCap("+howMany+"), found enough highCap (" + matches.size() + ")");
@ -375,6 +375,7 @@ public class ProfileOrganizer {
selectAllNotFailingPeers(howMany, exclude, matches, onlyNotFailing, mask);
return;
}
/**
* Return a set of Hashes for peers that are both not failing and we're actively
* talking with.
@ -403,6 +404,39 @@ public class ProfileOrganizer {
}
}
/**
* Return a set of Hashes for peers that are both not failing and we're actively
* talking with.
*
* We use commSystem().isEstablished(), not profile.getIsActive(), as the
* NTCP idle time is now shorter than the 5 minute getIsActive() threshold,
* and we're using this to try and limit connections.
*
* This DOES cascade further to non-connected peers.
*/
private void selectActiveNotFailingPeers2(int howMany, Set exclude, Set matches, int mask) {
if (matches.size() < howMany) {
Map<Hash, PeerProfile> activePeers = new HashMap();
getReadLock();
try {
for (Iterator<Map.Entry<Hash, PeerProfile>> iter = _notFailingPeers.entrySet().iterator(); iter.hasNext(); ) {
Map.Entry<Hash, PeerProfile> e = iter.next();
if (_context.commSystem().isEstablished(e.getKey()))
activePeers.put(e.getKey(), e.getValue());
}
locked_selectPeers(activePeers, howMany, exclude, matches, mask);
} finally { releaseReadLock(); }
}
if (matches.size() < howMany) {
if (_log.shouldLog(Log.INFO))
_log.info("selectANFP2("+howMany+"), not enough ANFP (" + matches.size() + ") going on to notFailing");
selectNotFailingPeers(howMany, exclude, matches, mask);
} else {
if (_log.shouldLog(Log.INFO))
_log.info("selectANFP2("+howMany+"), found enough ANFP (" + matches.size() + ")");
}
}
/**
* Return a set of Hashes for peers that are not failing.
*
@ -520,8 +554,8 @@ public class ProfileOrganizer {
}
}
}
if (_log.shouldLog(Log.INFO))
_log.info("Unreachable: " + l);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Unreachable: " + l);
return l;
}

View File

@ -61,13 +61,13 @@ class ProfileOrganizerRenderer {
buf.append("<p>Showing ").append(order.size()).append(" recent profiles, hiding ").append(peers.size()-order.size()).append(" older profiles</p>");
buf.append("<table border=\"1\">");
buf.append("<tr>");
buf.append("<td><b>Peer</b></td>");
buf.append("<td><b>Groups (Caps)</b></td>");
buf.append("<td><b>Speed</b></td>");
buf.append("<td><b>Capacity</b></td>");
buf.append("<td><b>Integration</b></td>");
buf.append("<td><b>Failing?</b></td>");
buf.append("<td>&nbsp;</td>");
buf.append("<th>Peer</th>");
buf.append("<th>Groups (Caps)</th>");
buf.append("<th>Speed</th>");
buf.append("<th>Capacity</th>");
buf.append("<th>Integration</th>");
buf.append("<th>Failing?</th>");
buf.append("<th>&nbsp;</th>");
buf.append("</tr>");
int prevTier = 1;
for (Iterator iter = order.iterator(); iter.hasNext();) {
@ -159,22 +159,22 @@ class ProfileOrganizerRenderer {
buf.append("<h2>Floodfill and Integrated Peers</h2>\n");
buf.append("<table border=\"1\">");
buf.append("<tr>");
buf.append("<td><b>Peer</b></td>");
buf.append("<td><b>Caps</b></td>");
buf.append("<td><b>Integ. Value</b></td>");
buf.append("<td><b>Last Heard About</b></td>");
buf.append("<td><b>Last Heard From</b></td>");
buf.append("<td><b>Last Successful Send</b></td>");
buf.append("<td><b>Last Failed Send</b></td>");
buf.append("<td><b>10m Resp. Time</b></td>");
buf.append("<td><b>1h Resp. Time</b></td>");
buf.append("<td><b>1d Resp. Time</b></td>");
buf.append("<td><b>Successful Lookups</b></td>");
buf.append("<td><b>Failed Lookups</b></td>");
buf.append("<td><b>New Stores</b></td>");
buf.append("<td><b>Old Stores</b></td>");
buf.append("<td><b>1h Fail Rate</b></td>");
buf.append("<td><b>1d Fail Rate</b></td>");
buf.append("<th>Peer</th>");
buf.append("<th>Caps</th>");
buf.append("<th>Integ. Value</th>");
buf.append("<th>Last Heard About</th>");
buf.append("<th>Last Heard From</th>");
buf.append("<th>Last Successful Send</th>");
buf.append("<th>Last Failed Send</th>");
buf.append("<th>10m Resp. Time</th>");
buf.append("<th>1h Resp. Time</th>");
buf.append("<th>1d Resp. Time</th>");
buf.append("<th>Successful Lookups</th>");
buf.append("<th>Failed Lookups</th>");
buf.append("<th>New Stores</th>");
buf.append("<th>Old Stores</th>");
buf.append("<th>1h Fail Rate</th>");
buf.append("<th>1d Fail Rate</th>");
buf.append("</tr>");
for (Iterator iter = integratedPeers.iterator(); iter.hasNext();) {
PeerProfile prof = (PeerProfile)iter.next();

View File

@ -297,18 +297,8 @@ class ProfilePersistenceHelper {
private File getProfileDir() {
if (_profileDir == null) {
String dir = null;
if (_context.router() == null) {
dir = _context.getProperty(PROP_PEER_PROFILE_DIR, DEFAULT_PEER_PROFILE_DIR);
} else {
dir = _context.router().getConfigSetting(PROP_PEER_PROFILE_DIR);
if (dir == null) {
_log.info("No peer profile dir specified [" + PROP_PEER_PROFILE_DIR
+ "], using [" + DEFAULT_PEER_PROFILE_DIR + "]");
dir = DEFAULT_PEER_PROFILE_DIR;
}
}
_profileDir = new File(dir);
String dir = _context.getProperty(PROP_PEER_PROFILE_DIR, DEFAULT_PEER_PROFILE_DIR);
_profileDir = new File(_context.getRouterDir(), dir);
}
return _profileDir;
}

View File

@ -43,6 +43,8 @@ public class ClientAppConfig {
Properties rv = new Properties();
String clientConfigFile = ctx.getProperty(PROP_CLIENT_CONFIG_FILENAME, DEFAULT_CLIENT_CONFIG_FILENAME);
File cfgFile = new File(clientConfigFile);
if (!cfgFile.isAbsolute())
cfgFile = new File(ctx.getConfigDir(), clientConfigFile);
// fall back to use router.config's clientApp.* lines
if (!cfgFile.exists())
@ -91,9 +93,12 @@ public class ClientAppConfig {
public static void writeClientAppConfig(RouterContext ctx, List apps) {
String clientConfigFile = ctx.getProperty(PROP_CLIENT_CONFIG_FILENAME, DEFAULT_CLIENT_CONFIG_FILENAME);
File cfgFile = new File(clientConfigFile);
if (!cfgFile.isAbsolute())
cfgFile = new File(ctx.getConfigDir(), clientConfigFile);
FileOutputStream fos = null;
try {
fos = new FileOutputStream(clientConfigFile);
fos = new FileOutputStream(cfgFile);
StringBuffer buf = new StringBuffer(2048);
for(int i = 0; i < apps.size(); i++) {
ClientAppConfig app = (ClientAppConfig) apps.get(i);

View File

@ -8,6 +8,7 @@ package net.i2p.router.startup;
*
*/
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.HashSet;
@ -76,16 +77,14 @@ public class CreateRouterInfoJob extends JobImpl {
info.sign(signingPrivKey);
String infoFilename = getContext().router().getConfigSetting(Router.PROP_INFO_FILENAME);
if (infoFilename == null)
infoFilename = Router.PROP_INFO_FILENAME_DEFAULT;
fos1 = new FileOutputStream(infoFilename);
String infoFilename = getContext().getProperty(Router.PROP_INFO_FILENAME, Router.PROP_INFO_FILENAME_DEFAULT);
File ifile = new File(getContext().getRouterDir(), infoFilename);
fos1 = new FileOutputStream(ifile);
info.writeBytes(fos1);
String keyFilename = getContext().router().getConfigSetting(Router.PROP_KEYS_FILENAME);
if (keyFilename == null)
keyFilename = Router.PROP_KEYS_FILENAME_DEFAULT;
fos2 = new FileOutputStream(keyFilename);
String keyFilename = getContext().getProperty(Router.PROP_KEYS_FILENAME, Router.PROP_KEYS_FILENAME_DEFAULT);
File kfile = new File(getContext().getRouterDir(), keyFilename);
fos2 = new FileOutputStream(kfile);
privkey.writeBytes(fos2);
signingPrivKey.writeBytes(fos2);
pubkey.writeBytes(fos2);
@ -96,7 +95,7 @@ public class CreateRouterInfoJob extends JobImpl {
getContext().keyManager().setPrivateKey(privkey);
getContext().keyManager().setPublicKey(pubkey);
_log.info("Router info created and stored at " + infoFilename + " with private keys stored at " + keyFilename + " [" + info + "]");
_log.info("Router info created and stored at " + ifile.getAbsolutePath() + " with private keys stored at " + kfile.getAbsolutePath() + " [" + info + "]");
} catch (DataFormatException dfe) {
_log.error("Error building the new router information", dfe);
} catch (IOException ioe) {

View File

@ -51,21 +51,17 @@ public class LoadRouterInfoJob extends JobImpl {
}
private void loadRouterInfo() {
String routerInfoFile = getContext().router().getConfigSetting(Router.PROP_INFO_FILENAME);
if (routerInfoFile == null)
routerInfoFile = Router.PROP_INFO_FILENAME_DEFAULT;
String routerInfoFile = getContext().getProperty(Router.PROP_INFO_FILENAME, Router.PROP_INFO_FILENAME_DEFAULT);
RouterInfo info = null;
boolean failedRead = false;
String keyFilename = getContext().router().getConfigSetting(Router.PROP_KEYS_FILENAME);
if (keyFilename == null)
keyFilename = Router.PROP_KEYS_FILENAME_DEFAULT;
String keyFilename = getContext().getProperty(Router.PROP_KEYS_FILENAME, Router.PROP_KEYS_FILENAME_DEFAULT);
File rif = new File(routerInfoFile);
File rif = new File(getContext().getRouterDir(), routerInfoFile);
if (rif.exists())
_infoExists = true;
File rkf = new File(keyFilename);
File rkf = new File(getContext().getRouterDir(), keyFilename);
if (rkf.exists())
_keysExist = true;
@ -98,14 +94,14 @@ public class LoadRouterInfoJob extends JobImpl {
_us = info;
} catch (IOException ioe) {
_log.error("Error reading the router info from " + routerInfoFile + " and the keys from " + keyFilename, ioe);
_log.error("Error reading the router info from " + rif.getAbsolutePath() + " and the keys from " + rkf.getAbsolutePath(), ioe);
_us = null;
rif.delete();
rkf.delete();
_infoExists = false;
_keysExist = false;
} catch (DataFormatException dfe) {
_log.error("Corrupt router info or keys at " + routerInfoFile + " / " + keyFilename, dfe);
_log.error("Corrupt router info or keys at " + rif.getAbsolutePath() + " / " + rkf.getAbsolutePath(), dfe);
_us = null;
rif.delete();
rkf.delete();

View File

@ -57,18 +57,11 @@ public class RebuildRouterInfoJob extends JobImpl {
public void runJob() {
_log.debug("Testing to rebuild router info");
String infoFile = getContext().router().getConfigSetting(Router.PROP_INFO_FILENAME);
if (infoFile == null) {
_log.debug("Info filename not configured, defaulting to " + Router.PROP_INFO_FILENAME_DEFAULT);
infoFile = Router.PROP_INFO_FILENAME_DEFAULT;
}
String infoFile = getContext().getProperty(Router.PROP_INFO_FILENAME, Router.PROP_INFO_FILENAME_DEFAULT);
File info = new File(getContext().getRouterDir(), infoFile);
String keyFilename = getContext().getProperty(Router.PROP_KEYS_FILENAME, Router.PROP_KEYS_FILENAME_DEFAULT);
File keyFile = new File(getContext().getRouterDir(), keyFilename);
String keyFilename = getContext().router().getConfigSetting(Router.PROP_KEYS_FILENAME);
if (keyFilename == null)
keyFilename = Router.PROP_KEYS_FILENAME_DEFAULT;
File keyFile = new File(keyFilename);
File info = new File(infoFile);
if (!info.exists() || !keyFile.exists()) {
_log.info("Router info file [" + info.getAbsolutePath() + "] or private key file [" + keyFile.getAbsolutePath() + "] deleted, rebuilding");
rebuildRouterInfo();
@ -86,14 +79,10 @@ public class RebuildRouterInfoJob extends JobImpl {
_log.debug("Rebuilding the new router info");
boolean fullRebuild = false;
RouterInfo info = null;
String infoFilename = getContext().router().getConfigSetting(Router.PROP_INFO_FILENAME);
if (infoFilename == null)
infoFilename = Router.PROP_INFO_FILENAME_DEFAULT;
String keyFilename = getContext().router().getConfigSetting(Router.PROP_KEYS_FILENAME);
if (keyFilename == null)
keyFilename = Router.PROP_KEYS_FILENAME_DEFAULT;
File keyFile = new File(keyFilename);
String infoFilename = getContext().getProperty(Router.PROP_INFO_FILENAME, Router.PROP_INFO_FILENAME_DEFAULT);
File infoFile = new File(getContext().getRouterDir(), infoFilename);
String keyFilename = getContext().getProperty(Router.PROP_KEYS_FILENAME, Router.PROP_KEYS_FILENAME_DEFAULT);
File keyFile = new File(getContext().getRouterDir(), keyFilename);
if (keyFile.exists()) {
// ok, no need to rebuild a brand new identity, just update what we can
@ -146,7 +135,7 @@ public class RebuildRouterInfoJob extends JobImpl {
FileOutputStream fos = null;
try {
fos = new FileOutputStream(infoFilename);
fos = new FileOutputStream(infoFile);
info.writeBytes(fos);
} catch (DataFormatException dfe) {
_log.error("Error rebuilding the router information", dfe);

View File

@ -73,6 +73,8 @@ public class CommSystemFacadeImpl extends CommSystemFacade {
public boolean haveInboundCapacity() { return (_manager == null ? false : _manager.haveInboundCapacity()); }
@Override
public boolean haveOutboundCapacity() { return (_manager == null ? false : _manager.haveOutboundCapacity()); }
@Override
public boolean haveHighOutboundCapacity() { return (_manager == null ? false : _manager.haveHighOutboundCapacity()); }
/**
* Framed average clock skew of connected peers in seconds, or null if we cannot answer.
@ -433,6 +435,16 @@ public class CommSystemFacadeImpl extends CommSystemFacade {
return props.getProperty("host");
}
/** full name for a country code, or the code if we don't know the name */
public String getCountryName(String c) {
if (_geoIP == null)
return c;
String n = _geoIP.fullName(c);
if (n == null)
return c;
return n;
}
/** Provide a consistent "look" for displaying router IDs in the console */
public String renderPeerHTML(Hash peer) {
String h = peer.toBase64().substring(0, 4);
@ -440,11 +452,7 @@ public class CommSystemFacadeImpl extends CommSystemFacade {
String c = getCountry(peer);
if (c != null) {
buf.append("<img alt=\"").append(c.toUpperCase()).append("\" title=\"");
String n = _geoIP.fullName(c);
if (n != null)
buf.append(n);
else
buf.append(c);
buf.append(getCountryName(c));
buf.append("\" src=\"/flags.jsp?c=").append(c).append("\"> ");
}
buf.append("<tt>");

View File

@ -130,7 +130,8 @@ public class GeoIP {
*
*/
private void readCountryFile() {
File GeoFile = new File(GEOIP_DIR_DEFAULT, COUNTRY_FILE_DEFAULT);
File GeoFile = new File(_context.getBaseDir(), GEOIP_DIR_DEFAULT);
GeoFile = new File(GeoFile, COUNTRY_FILE_DEFAULT);
if (GeoFile == null || (!GeoFile.exists())) {
if (_log.shouldLog(Log.WARN))
_log.warn("Country file not found: " + GeoFile.getAbsolutePath());
@ -188,7 +189,8 @@ public class GeoIP {
*
*/
private String[] readGeoIPFile(Long[] search) {
File GeoFile = new File(GEOIP_DIR_DEFAULT, GEOIP_FILE_DEFAULT);
File GeoFile = new File(_context.getBaseDir(), GEOIP_DIR_DEFAULT);
GeoFile = new File(GeoFile, GEOIP_FILE_DEFAULT);
if (GeoFile == null || (!GeoFile.exists())) {
if (_log.shouldLog(Log.WARN))
_log.warn("GeoIP file not found: " + GeoFile.getAbsolutePath());

View File

@ -47,6 +47,7 @@ public interface Transport {
public int countActivePeers();
public int countActiveSendPeers();
public boolean haveCapacity();
public boolean haveHighCapacity();
public Vector getClockSkews();
public List getMostRecentErrorMessages();

View File

@ -559,6 +559,7 @@ public abstract class TransportImpl implements Transport {
if ((addr[0]&0xFF) >= 224) return false; // no multicast
if ((addr[0]&0xFF) == 0) return false;
if ( ((addr[0]&0xFF) == 169) && ((addr[1]&0xFF) == 254) ) return false;
if ((addr[0]&0xFF) == 5) return false; // Hamachi
return true; // or at least possible to be true
} else if (addr.length == 16) {
return false;

View File

@ -215,6 +215,20 @@ public class TransportManager implements TransportEventListener {
return false;
}
/**
* Are all transports well below their outbound connection limit
* Use for throttling in the router.
*/
public boolean haveHighOutboundCapacity() {
if (_transports.size() <= 0)
return false;
for (int i = 0; i < _transports.size(); i++) {
if (!((Transport)_transports.get(i)).haveHighCapacity())
return false;
}
return true;
}
/**
* Is at least one transport below its inbound connection limit + some margin
* Use for throttling in the router.

View File

@ -31,7 +31,7 @@ import net.i2p.util.Log;
public class EventPumper implements Runnable {
private RouterContext _context;
private Log _log;
private boolean _alive;
private volatile boolean _alive;
private Selector _selector;
private final List _bufCache;
private final List _wantsRead = new ArrayList(16);
@ -64,7 +64,7 @@ public class EventPumper implements Runnable {
_expireIdleWriteTime = MAX_EXPIRE_IDLE_TIME;
}
public void startPumping() {
public synchronized void startPumping() {
if (_log.shouldLog(Log.INFO))
_log.info("Starting pumper");
// _wantsRead = new ArrayList(16);
@ -83,7 +83,7 @@ public class EventPumper implements Runnable {
}
}
public void stopPumping() {
public synchronized void stopPumping() {
_alive = false;
if (_selector != null && _selector.isOpen())
_selector.wakeup();

View File

@ -326,6 +326,10 @@ public class NTCPTransport extends TransportImpl {
return countActivePeers() < getMaxConnections() * 4 / 5;
}
public boolean haveHighCapacity() {
return countActivePeers() < getMaxConnections() / 2;
}
/** queue up afterSend call, which can take some time w/ jobs, etc */
void sendComplete(OutNetMessage msg) { _finisher.add(msg); }
@ -412,7 +416,7 @@ public class NTCPTransport extends TransportImpl {
private static final int NUM_CONCURRENT_READERS = 3;
private static final int NUM_CONCURRENT_WRITERS = 3;
public RouterAddress startListening() {
public synchronized RouterAddress startListening() {
if (_log.shouldLog(Log.DEBUG)) _log.debug("Starting ntcp transport listening");
_finisher.start();
_pumper.startPumping();
@ -424,7 +428,7 @@ public class NTCPTransport extends TransportImpl {
return bindAddress();
}
public RouterAddress restartListening(RouterAddress addr) {
public synchronized RouterAddress restartListening(RouterAddress addr) {
if (_log.shouldLog(Log.DEBUG)) _log.debug("Restarting ntcp transport listening");
_finisher.start();
_pumper.startPumping();
@ -598,7 +602,7 @@ public class NTCPTransport extends TransportImpl {
* This doesn't (completely) block, caller should check isAlive()
* before calling startListening() or restartListening()
*/
public void stopListening() {
public synchronized void stopListening() {
if (_log.shouldLog(Log.DEBUG)) _log.debug("Stopping ntcp transport");
_pumper.stopPumping();
_writer.stopWriting();

View File

@ -1382,6 +1382,12 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
}
}
public boolean haveHighCapacity() {
synchronized (_peersByIdent) {
return _peersByIdent.size() < getMaxConnections() / 2;
}
}
/**
* Return our peer clock skews on this transport.
* Vector composed of Long, each element representing a peer skew in seconds.
@ -1763,48 +1769,48 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
buf.append(" timeout: ").append(DataHelper.formatDuration(_expireTimeout));
buf.append("</b><br />\n");
buf.append("<table border=\"1\">\n");
buf.append("<tr><td><b><a href=\"#def.peer\">Peer</a></b>");
buf.append("<tr><th><a href=\"#def.peer\">Peer</a>");
if (sortFlags != FLAG_ALPHA)
buf.append(" <a href=\"").append(urlBase).append("?sort=0\">V</a> ");
buf.append("</td><td><b><a href=\"#def.dir\">Dir/Intro</a></b></td><td><b><a href=\"#def.idle\">Idle</a></b>");
buf.append("</th><th><a href=\"#def.dir\">Dir/Intro</a></th><th><a href=\"#def.idle\">Idle</a>");
appendSortLinks(buf, urlBase, sortFlags, "Sort by idle inbound", FLAG_IDLE_IN);
buf.append("/");
appendSortLinks(buf, urlBase, sortFlags, "Sort by idle outbound", FLAG_IDLE_OUT);
buf.append("</td>");
buf.append("<td><b><a href=\"#def.rate\">In/Out</a></b>");
buf.append("</th>");
buf.append("<th><a href=\"#def.rate\">In/Out</a>");
appendSortLinks(buf, urlBase, sortFlags, "Sort by inbound rate", FLAG_RATE_IN);
buf.append("/");
appendSortLinks(buf, urlBase, sortFlags, "Sort by outbound rate", FLAG_RATE_OUT);
buf.append("</td>\n");
buf.append("<td><b><a href=\"#def.up\">Up</a></b>");
buf.append("</th>\n");
buf.append("<th><a href=\"#def.up\">Up</a>");
appendSortLinks(buf, urlBase, sortFlags, "Sort by connection uptime", FLAG_UPTIME);
buf.append("</td><td><b><a href=\"#def.skew\">skew</a></b>");
buf.append("</th><th><a href=\"#def.skew\">skew</a>");
appendSortLinks(buf, urlBase, sortFlags, "Sort by clock skew", FLAG_SKEW);
buf.append("</td>\n");
buf.append("<td><b><a href=\"#def.cwnd\">Cwnd</a></b>");
buf.append("</th>\n");
buf.append("<th><a href=\"#def.cwnd\">Cwnd</a>");
appendSortLinks(buf, urlBase, sortFlags, "Sort by congestion window", FLAG_CWND);
buf.append("</td><td><b><a href=\"#def.ssthresh\">Ssthresh</a></b>");
buf.append("</th><th><a href=\"#def.ssthresh\">Ssthresh</a>");
appendSortLinks(buf, urlBase, sortFlags, "Sort by slow start threshold", FLAG_SSTHRESH);
buf.append("</td>\n");
buf.append("<td><b><a href=\"#def.rtt\">Rtt</a></b>");
buf.append("</th>\n");
buf.append("<th><a href=\"#def.rtt\">Rtt</a>");
appendSortLinks(buf, urlBase, sortFlags, "Sort by round trip time", FLAG_RTT);
buf.append("</td><td><b><a href=\"#def.dev\">Dev</a></b>");
buf.append("</th><th><a href=\"#def.dev\">Dev</a>");
appendSortLinks(buf, urlBase, sortFlags, "Sort by round trip time deviation", FLAG_DEV);
buf.append("</td><td><b><a href=\"#def.rto\">Rto</a></b>");
buf.append("</th><th><a href=\"#def.rto\">Rto</a>");
appendSortLinks(buf, urlBase, sortFlags, "Sort by retransmission timeout", FLAG_RTO);
buf.append("</td>\n");
buf.append("<td><b><a href=\"#def.mtu\">Mtu</a></b>");
buf.append("</th>\n");
buf.append("<th><a href=\"#def.mtu\">Mtu</a>");
appendSortLinks(buf, urlBase, sortFlags, "Sort by maximum transmit unit", FLAG_MTU);
buf.append("</td><td><b><a href=\"#def.send\">Send</a></b>");
buf.append("</th><th><a href=\"#def.send\">Send</a>");
appendSortLinks(buf, urlBase, sortFlags, "Sort by packets sent", FLAG_SEND);
buf.append("</td><td><b><a href=\"#def.recv\">Recv</a></b>");
buf.append("</th><th><a href=\"#def.recv\">Recv</a>");
appendSortLinks(buf, urlBase, sortFlags, "Sort by packets received", FLAG_RECV);
buf.append("</td>\n");
buf.append("<td><b><a href=\"#def.resent\">Resent</a></b>");
buf.append("</th>\n");
buf.append("<th><a href=\"#def.resent\">Resent</a>");
appendSortLinks(buf, urlBase, sortFlags, "Sort by packets retransmitted", FLAG_RESEND);
buf.append("</td><td><b><a href=\"#def.dupRecv\">DupRecv</a></b>");
buf.append("</th><th><a href=\"#def.dupRecv\">DupRecv</a>");
appendSortLinks(buf, urlBase, sortFlags, "Sort by packets received more than once", FLAG_DUP);
buf.append("</td>\n");
buf.append("</th>\n");
buf.append("</tr>\n");
out.write(buf.toString());
buf.setLength(0);

View File

@ -47,7 +47,7 @@ class ExploratoryPeerSelector extends TunnelPeerSelector {
//
if (exploreHighCap)
ctx.profileOrganizer().selectHighCapacityPeers(length, exclude, matches);
else if (ctx.commSystem().haveOutboundCapacity())
else if (ctx.commSystem().haveHighOutboundCapacity())
ctx.profileOrganizer().selectNotFailingPeers(length, exclude, matches, false);
else // use only connected peers so we don't make more connections
ctx.profileOrganizer().selectActiveNotFailingPeers(length, exclude, matches);

View File

@ -29,6 +29,7 @@ import net.i2p.router.tunnel.HopConfig;
import net.i2p.stat.RateStat;
import net.i2p.util.I2PThread;
import net.i2p.util.Log;
import net.i2p.util.ObjectCounter;
/**
*
@ -438,9 +439,9 @@ public class TunnelPoolManager implements TunnelManagerFacade {
List participating = _context.tunnelDispatcher().listParticipatingTunnels();
Collections.sort(participating, new TunnelComparator());
out.write("<h2><a name=\"participating\">Participating tunnels</a>:</h2><table border=\"1\">\n");
out.write("<tr><td><b>Receive on</b></td><td><b>From</b></td><td>"
+ "<b>Send on</b></td><td><b>To</b></td><td><b>Expiration</b></td>"
+ "<td><b>Usage</b></td><td><b>Rate</b></td><td><b>Role</b></td></tr>\n");
out.write("<tr><th>Receive on</th><th>From</th><th>"
+ "Send on</th><th>To</th><th>Expiration</th>"
+ "<th>Usage</th><th>Rate</th><th>Role</th></tr>\n");
long processed = 0;
RateStat rs = _context.statManager().getRate("tunnel.participatingMessageCount");
if (rs != null)
@ -588,20 +589,20 @@ public class TunnelPoolManager implements TunnelManagerFacade {
private void renderPeers(Writer out) throws IOException {
// count up the peers in the local pools
HashCounter lc = new HashCounter();
ObjectCounter<Hash> lc = new ObjectCounter();
int tunnelCount = countTunnelsPerPeer(lc);
// count up the peers in the participating tunnels
HashCounter pc = new HashCounter();
ObjectCounter<Hash> pc = new ObjectCounter();
int partCount = countParticipatingPerPeer(pc);
Set<Hash> peers = new HashSet(lc.hashes());
peers.addAll(pc.hashes());
Set<Hash> peers = new HashSet(lc.objects());
peers.addAll(pc.objects());
List<Hash> peerList = new ArrayList(peers);
Collections.sort(peerList, new HashComparator());
out.write("<h2><a name=\"peers\">Tunnel Counts By Peer</a>:</h2>\n");
out.write("<table border=\"1\"><tr><td><b>Peer</b></td><td><b>Expl. + Client</b></td><td><b>% of total</b></td><td><b>Part. from + to</b></td><td><b>% of total</b></td></tr>\n");
out.write("<table border=\"1\"><tr><th>Peer</th><th>Expl. + Client</th><th>% of total</th><th>Part. from + to</th><th>% of total</th></tr>\n");
for (Hash h : peerList) {
out.write("<tr><td align=\"right\">");
out.write(netDbLink(h));
@ -625,7 +626,7 @@ public class TunnelPoolManager implements TunnelManagerFacade {
}
/** @return total number of non-fallback expl. + client tunnels */
private int countTunnelsPerPeer(HashCounter lc) {
private int countTunnelsPerPeer(ObjectCounter<Hash> lc) {
List<TunnelPool> pools = new ArrayList();
listPools(pools);
int tunnelCount = 0;
@ -661,12 +662,12 @@ public class TunnelPoolManager implements TunnelManagerFacade {
* @return Set of peers that should not be allowed in another tunnel
*/
public Set<Hash> selectPeersInTooManyTunnels() {
HashCounter lc = new HashCounter();
ObjectCounter<Hash> lc = new ObjectCounter();
int tunnelCount = countTunnelsPerPeer(lc);
Set<Hash> rv = new HashSet();
if (tunnelCount >= 4 && _context.router().getUptime() > 10*60*1000) {
int max = _context.getProperty("router.maxTunnelPercentage", DEFAULT_MAX_PCT_TUNNELS);
for (Hash h : lc.hashes()) {
for (Hash h : lc.objects()) {
if (lc.count(h) > 0 && (lc.count(h) + 1) * 100 / (tunnelCount + 1) > max)
rv.add(h);
}
@ -675,7 +676,7 @@ public class TunnelPoolManager implements TunnelManagerFacade {
}
/** @return total number of part. tunnels */
private int countParticipatingPerPeer(HashCounter pc) {
private int countParticipatingPerPeer(ObjectCounter<Hash> pc) {
List<HopConfig> participating = _context.tunnelDispatcher().listParticipatingTunnels();
for (HopConfig cfg : participating) {
Hash from = cfg.getReceiveFrom();
@ -694,27 +695,6 @@ public class TunnelPoolManager implements TunnelManagerFacade {
}
}
private static class HashCounter {
private ConcurrentHashMap<Hash, Integer> _map;
public HashCounter() {
_map = new ConcurrentHashMap();
}
public void increment(Hash h) {
Integer i = _map.putIfAbsent(h, Integer.valueOf(1));
if (i != null)
_map.put(h, Integer.valueOf(i.intValue() + 1));
}
public int count(Hash h) {
Integer i = _map.get(h);
if (i != null)
return i.intValue();
return 0;
}
public Set<Hash> hashes() {
return _map.keySet();
}
}
private String getCapacity(Hash peer) {
RouterInfo info = _context.netDb().lookupRouterInfoLocally(peer);
if (info != null) {

View File

@ -276,7 +276,14 @@ public class ControlPoint implements HTTPRequestListener
{
int nRoots = devNodeList.size();
for (int n=0; n<nRoots; n++) {
Node rootNode = devNodeList.getNode(n);
// AIOOB was thrown from here, maybe would be better to
// copy the list before traversal?
Node rootNode;
try {
rootNode = devNodeList.getNode(n);
} catch (ArrayIndexOutOfBoundsException aioob) {
break;
}
Device dev = getDevice(rootNode);
if (dev == null)
continue;