forked from I2P_Developers/i2p.i2p
merge of '74266b0afe4ef1abef923c8389fb47263b9a39e1'
and '9a62d1aa11b74d835ec795c0a303bf5c2ebc2793'
This commit is contained in:
@ -463,14 +463,15 @@ public class Router {
|
||||
ri.addCapability(CAPABILITY_BW256);
|
||||
}
|
||||
|
||||
if (FloodfillNetworkDatabaseFacade.floodfillEnabled(_context))
|
||||
// if prop set to true, don't tell people we are ff even if we are
|
||||
if (FloodfillNetworkDatabaseFacade.floodfillEnabled(_context) &&
|
||||
!Boolean.valueOf(_context.getProperty("router.hideFloodfillParticipant")).booleanValue())
|
||||
ri.addCapability(FloodfillNetworkDatabaseFacade.CAPACITY_FLOODFILL);
|
||||
|
||||
if("true".equalsIgnoreCase(_context.getProperty(Router.PROP_HIDDEN, "false")))
|
||||
if(Boolean.valueOf(_context.getProperty(PROP_HIDDEN)).booleanValue())
|
||||
ri.addCapability(RouterInfo.CAPABILITY_HIDDEN);
|
||||
|
||||
String forceUnreachable = _context.getProperty(PROP_FORCE_UNREACHABLE);
|
||||
if ( (forceUnreachable != null) && ("true".equalsIgnoreCase(forceUnreachable)) ) {
|
||||
if (Boolean.valueOf(_context.getProperty(PROP_FORCE_UNREACHABLE)).booleanValue()) {
|
||||
ri.addCapability(CAPABILITY_UNREACHABLE);
|
||||
return;
|
||||
}
|
||||
@ -582,7 +583,13 @@ public class Router {
|
||||
//_context.inNetMessagePool().registerHandlerJobBuilder(TunnelMessage.MESSAGE_TYPE, new TunnelMessageHandler(_context));
|
||||
}
|
||||
|
||||
/**
|
||||
* this is for oldconsole.jsp, pretty much unused except as a way to get memory info,
|
||||
* so let's comment out the rest, it is available elsewhere, and we don't really
|
||||
* want to spend a minute rendering a multi-megabyte page in memory.
|
||||
*/
|
||||
public void renderStatusHTML(Writer out) throws IOException {
|
||||
/****************
|
||||
out.write("<h1>Router console</h1>\n" +
|
||||
"<i><a href=\"/oldconsole.jsp\">console</a> | <a href=\"/oldstats.jsp\">stats</a></i><br>\n" +
|
||||
"<form action=\"/oldconsole.jsp\">" +
|
||||
@ -599,21 +606,25 @@ public class Router {
|
||||
"<option value=\"/oldconsole.jsp#logs\">Log messages</option>\n" +
|
||||
"</select> <input type=\"submit\" value=\"GO\" /> </form>" +
|
||||
"<hr>\n");
|
||||
**************/
|
||||
|
||||
StringBuilder buf = new StringBuilder(32*1024);
|
||||
StringBuilder buf = new StringBuilder(4*1024);
|
||||
|
||||
// Please don't change the text or formatting, tino matches it in his scripts
|
||||
if ( (_routerInfo != null) && (_routerInfo.getIdentity() != null) )
|
||||
buf.append("<b>Router: </b> ").append(_routerInfo.getIdentity().getHash().toBase64()).append("<br>\n");
|
||||
buf.append("<b>As of: </b> ").append(new Date(_context.clock().now())).append(" (uptime: ").append(DataHelper.formatDuration(getUptime())).append(") <br>\n");
|
||||
buf.append("<b>As of: </b> ").append(new Date(_context.clock().now())).append("<br>\n");
|
||||
buf.append("<b>RouterUptime: </b> " ).append(DataHelper.formatDuration(getUptime())).append(" <br>\n");
|
||||
buf.append("<b>Started on: </b> ").append(new Date(getWhenStarted())).append("<br>\n");
|
||||
buf.append("<b>Clock offset: </b> ").append(_context.clock().getOffset()).append("ms (OS time: ").append(new Date(_context.clock().now() - _context.clock().getOffset())).append(")<br>\n");
|
||||
buf.append("<b>RouterVersion:</b> ").append(RouterVersion.FULL_VERSION).append(" / SDK: ").append(CoreVersion.VERSION).append("<br>\n");
|
||||
long tot = Runtime.getRuntime().totalMemory()/1024;
|
||||
long free = Runtime.getRuntime().freeMemory()/1024;
|
||||
buf.append("<b>Memory:</b> In use: ").append((tot-free)).append("KB Free: ").append(free).append("KB <br>\n");
|
||||
buf.append("<b>Version:</b> Router: ").append(RouterVersion.VERSION).append(" / SDK: ").append(CoreVersion.VERSION).append("<br>\n");
|
||||
if (_higherVersionSeen)
|
||||
buf.append("<b><font color=\"red\">HIGHER VERSION SEEN</font><b> - please <a href=\"http://www.i2p.net/\">check</a> to see if there is a new release out<br>\n");
|
||||
|
||||
/*********
|
||||
buf.append("<hr><a name=\"bandwidth\"> </a><h2>Bandwidth</h2>\n");
|
||||
long sent = _context.bandwidthLimiter().getTotalAllocatedOutboundBytes();
|
||||
long received = _context.bandwidthLimiter().getTotalAllocatedInboundBytes();
|
||||
@ -768,6 +779,7 @@ public class Router {
|
||||
buf.append("</pre></td></tr>\n");
|
||||
}
|
||||
buf.append("</table>\n");
|
||||
***********/
|
||||
out.write(buf.toString());
|
||||
out.flush();
|
||||
}
|
||||
|
@ -122,8 +122,7 @@ public class StatisticsManager implements Service {
|
||||
//includeRate("jobQueue.jobRunSlow", stats, new long[] { 10*60*1000l, 60*60*1000l });
|
||||
//includeRate("crypto.elGamal.encrypt", stats, new long[] { 60*60*1000 });
|
||||
// total event count can be used to track uptime
|
||||
boolean hideTotals = ! RouterVersion.VERSION.equals("0.7.6");
|
||||
includeRate("tunnel.participatingTunnels", stats, new long[] { 60*60*1000 }, hideTotals);
|
||||
includeRate("tunnel.participatingTunnels", stats, new long[] { 60*60*1000 }, true);
|
||||
//includeRate("tunnel.testSuccessTime", stats, new long[] { 10*60*1000l });
|
||||
//includeRate("client.sendAckTime", stats, new long[] { 60*60*1000 }, true);
|
||||
//includeRate("udp.sendConfirmTime", stats, new long[] { 10*60*1000 });
|
||||
|
@ -50,7 +50,7 @@ class FloodfillMonitorJob extends JobImpl {
|
||||
// there's a lot of eligible non-floodfills, keep them from all jumping in at once
|
||||
// To do: somehow assess the size of the network to make this adaptive?
|
||||
if (!ff)
|
||||
delay *= 3;
|
||||
delay *= 7;
|
||||
requeue(delay);
|
||||
}
|
||||
|
||||
|
@ -94,9 +94,22 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
|
||||
}
|
||||
}
|
||||
|
||||
private static final int MAX_TO_FLOOD = 9;
|
||||
|
||||
/**
|
||||
* Send to a subset of all floodfill peers.
|
||||
* We do this to implement Kademlia within the floodfills, i.e.
|
||||
* we flood to those closest to the key.
|
||||
*/
|
||||
public void flood(DataStructure ds) {
|
||||
Hash key;
|
||||
if (ds instanceof LeaseSet)
|
||||
key = ((LeaseSet)ds).getDestination().calculateHash();
|
||||
else
|
||||
key = ((RouterInfo)ds).getIdentity().calculateHash();
|
||||
Hash rkey = _context.routingKeyGenerator().getRoutingKey(key);
|
||||
FloodfillPeerSelector sel = (FloodfillPeerSelector)getPeerSelector();
|
||||
List peers = sel.selectFloodfillParticipants(getKBuckets());
|
||||
List peers = sel.selectFloodfillParticipants(rkey, MAX_TO_FLOOD, getKBuckets());
|
||||
int flooded = 0;
|
||||
for (int i = 0; i < peers.size(); i++) {
|
||||
Hash peer = (Hash)peers.get(i);
|
||||
@ -107,12 +120,11 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
|
||||
continue;
|
||||
DatabaseStoreMessage msg = new DatabaseStoreMessage(_context);
|
||||
if (ds instanceof LeaseSet) {
|
||||
msg.setKey(((LeaseSet)ds).getDestination().calculateHash());
|
||||
msg.setLeaseSet((LeaseSet)ds);
|
||||
} else {
|
||||
msg.setKey(((RouterInfo)ds).getIdentity().calculateHash());
|
||||
msg.setRouterInfo((RouterInfo)ds);
|
||||
}
|
||||
msg.setKey(key);
|
||||
msg.setReplyGateway(null);
|
||||
msg.setReplyToken(0);
|
||||
msg.setReplyTunnel(null);
|
||||
@ -125,11 +137,11 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
|
||||
_context.commSystem().processMessage(m);
|
||||
flooded++;
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Flooding the entry for " + msg.getKey().toBase64() + " to " + peer.toBase64());
|
||||
_log.info("Flooding the entry for " + key.toBase64() + " to " + peer.toBase64());
|
||||
}
|
||||
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Flooded the to " + flooded + " peers");
|
||||
_log.info("Flooded the data to " + flooded + " of " + peers.size() + " peers");
|
||||
}
|
||||
|
||||
private static final int FLOOD_PRIORITY = 200;
|
||||
|
@ -32,16 +32,16 @@ class FloodfillPeerSelector extends PeerSelector {
|
||||
* @return List of Hash for the peers selected
|
||||
*/
|
||||
@Override
|
||||
public List selectMostReliablePeers(Hash key, int maxNumRouters, Set peersToIgnore, KBucketSet kbuckets) {
|
||||
public List<Hash> selectMostReliablePeers(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets) {
|
||||
return selectNearestExplicitThin(key, maxNumRouters, peersToIgnore, kbuckets, true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List selectNearestExplicitThin(Hash key, int maxNumRouters, Set peersToIgnore, KBucketSet kbuckets) {
|
||||
public List<Hash> selectNearestExplicitThin(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets) {
|
||||
return selectNearestExplicitThin(key, maxNumRouters, peersToIgnore, kbuckets, false);
|
||||
}
|
||||
|
||||
public List selectNearestExplicitThin(Hash key, int maxNumRouters, Set peersToIgnore, KBucketSet kbuckets, boolean preferConnected) {
|
||||
public List<Hash> selectNearestExplicitThin(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets, boolean preferConnected) {
|
||||
if (peersToIgnore == null)
|
||||
peersToIgnore = new HashSet(1);
|
||||
peersToIgnore.add(_context.routerHash());
|
||||
@ -56,30 +56,55 @@ class FloodfillPeerSelector extends PeerSelector {
|
||||
return rv;
|
||||
}
|
||||
|
||||
/** Returned list will not include our own hash */
|
||||
public List selectFloodfillParticipants(KBucketSet kbuckets) {
|
||||
/**
|
||||
* @return all floodfills not shitlisted forever. list will not include our own hash
|
||||
*
|
||||
*/
|
||||
public List<Hash> selectFloodfillParticipants(KBucketSet kbuckets) {
|
||||
if (kbuckets == null) return new ArrayList();
|
||||
FloodfillSelectionCollector matches = new FloodfillSelectionCollector(null, null, 0);
|
||||
kbuckets.getAll(matches);
|
||||
return matches.getFloodfillParticipants();
|
||||
}
|
||||
|
||||
/**
|
||||
* @return all floodfills not shitlisted foreverx
|
||||
* @param maxNumRouters max to return
|
||||
* Sorted by closest to the key if > maxNumRouters, otherwise not
|
||||
*/
|
||||
public List<Hash> selectFloodfillParticipants(Hash key, int maxNumRouters, KBucketSet kbuckets) {
|
||||
List<Hash> ffs = selectFloodfillParticipants(kbuckets);
|
||||
if (ffs.size() <= maxNumRouters)
|
||||
return ffs; // unsorted
|
||||
TreeMap<BigInteger, Hash> sorted = new TreeMap();
|
||||
for (int i = 0; i < ffs.size(); i++) {
|
||||
Hash h = ffs.get(i);
|
||||
BigInteger diff = getDistance(key, h);
|
||||
sorted.put(diff, h);
|
||||
}
|
||||
List<Hash> rv = new ArrayList(maxNumRouters);
|
||||
for (int i = 0; i < maxNumRouters; i++) {
|
||||
rv.add(sorted.remove(sorted.firstKey()));
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
|
||||
private class FloodfillSelectionCollector implements SelectionCollector {
|
||||
private TreeMap _sorted;
|
||||
private List _floodfillMatches;
|
||||
private TreeMap<BigInteger, Hash> _sorted;
|
||||
private List<Hash> _floodfillMatches;
|
||||
private Hash _key;
|
||||
private Set _toIgnore;
|
||||
private Set<Hash> _toIgnore;
|
||||
private int _matches;
|
||||
private int _wanted;
|
||||
public FloodfillSelectionCollector(Hash key, Set toIgnore, int wanted) {
|
||||
public FloodfillSelectionCollector(Hash key, Set<Hash> toIgnore, int wanted) {
|
||||
_key = key;
|
||||
_sorted = new TreeMap();
|
||||
_floodfillMatches = new ArrayList(1);
|
||||
_floodfillMatches = new ArrayList(8);
|
||||
_toIgnore = toIgnore;
|
||||
_matches = 0;
|
||||
_wanted = wanted;
|
||||
}
|
||||
public List getFloodfillParticipants() { return _floodfillMatches; }
|
||||
public List<Hash> getFloodfillParticipants() { return _floodfillMatches; }
|
||||
private static final int EXTRA_MATCHES = 100;
|
||||
public void add(Hash entry) {
|
||||
//if (_context.profileOrganizer().isFailing(entry))
|
||||
@ -115,15 +140,15 @@ class FloodfillPeerSelector extends PeerSelector {
|
||||
_matches++;
|
||||
}
|
||||
/** get the first $howMany entries matching */
|
||||
public List get(int howMany) {
|
||||
public List<Hash> get(int howMany) {
|
||||
return get(howMany, false);
|
||||
}
|
||||
|
||||
public List get(int howMany, boolean preferConnected) {
|
||||
public List<Hash> get(int howMany, boolean preferConnected) {
|
||||
Collections.shuffle(_floodfillMatches, _context.random());
|
||||
List rv = new ArrayList(howMany);
|
||||
List badff = new ArrayList(howMany);
|
||||
List unconnectedff = new ArrayList(howMany);
|
||||
List<Hash> rv = new ArrayList(howMany);
|
||||
List<Hash> badff = new ArrayList(howMany);
|
||||
List<Hash> unconnectedff = new ArrayList(howMany);
|
||||
int found = 0;
|
||||
long now = _context.clock().now();
|
||||
// Only add in "good" floodfills here...
|
||||
|
@ -17,10 +17,14 @@ import net.i2p.data.RouterInfo;
|
||||
import net.i2p.router.Job;
|
||||
import net.i2p.router.RouterContext;
|
||||
|
||||
/**
|
||||
* This extends StoreJob to fire off a FloodfillVerifyStoreJob after success.
|
||||
*
|
||||
*/
|
||||
class FloodfillStoreJob extends StoreJob {
|
||||
private FloodfillNetworkDatabaseFacade _facade;
|
||||
/**
|
||||
* Create a new search for the routingKey specified
|
||||
* Send a data structure to the floodfills
|
||||
*
|
||||
*/
|
||||
public FloodfillStoreJob(RouterContext context, FloodfillNetworkDatabaseFacade facade, Hash key, DataStructure data, Job onSuccess, Job onFailure, long timeoutMs) {
|
||||
@ -31,7 +35,7 @@ class FloodfillStoreJob extends StoreJob {
|
||||
* @param toSkip set of peer hashes of people we dont want to send the data to (e.g. we
|
||||
* already know they have it). This can be null.
|
||||
*/
|
||||
public FloodfillStoreJob(RouterContext context, FloodfillNetworkDatabaseFacade facade, Hash key, DataStructure data, Job onSuccess, Job onFailure, long timeoutMs, Set toSkip) {
|
||||
public FloodfillStoreJob(RouterContext context, FloodfillNetworkDatabaseFacade facade, Hash key, DataStructure data, Job onSuccess, Job onFailure, long timeoutMs, Set<Hash> toSkip) {
|
||||
super(context, facade, key, data, onSuccess, onFailure, timeoutMs, toSkip);
|
||||
_facade = facade;
|
||||
}
|
||||
|
@ -43,10 +43,9 @@ public class PeerSelector {
|
||||
* @return ordered list of Hash objects
|
||||
*/
|
||||
/* FIXME Exporting non-public type through public API FIXME */
|
||||
public List selectMostReliablePeers(Hash key, int numClosest, Set alreadyChecked, KBucketSet kbuckets) {// LINT -- Exporting non-public type through public API
|
||||
public List<Hash> selectMostReliablePeers(Hash key, int numClosest, Set<Hash> alreadyChecked, KBucketSet kbuckets) {// LINT -- Exporting non-public type through public API
|
||||
// get the peers closest to the key
|
||||
List nearest = selectNearestExplicit(key, numClosest, alreadyChecked, kbuckets);
|
||||
return nearest;
|
||||
return selectNearestExplicit(key, numClosest, alreadyChecked, kbuckets);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -57,10 +56,11 @@ public class PeerSelector {
|
||||
* @return List of Hash for the peers selected, ordered by bucket (but intra bucket order is not defined)
|
||||
*/
|
||||
/* FIXME Exporting non-public type through public API FIXME */
|
||||
public List selectNearestExplicit(Hash key, int maxNumRouters, Set peersToIgnore, KBucketSet kbuckets) {// LINT -- Exporting non-public type through public API
|
||||
if (true)
|
||||
public List<Hash> selectNearestExplicit(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets) {// LINT -- Exporting non-public type through public API
|
||||
//if (true)
|
||||
return selectNearestExplicitThin(key, maxNumRouters, peersToIgnore, kbuckets);
|
||||
|
||||
/******
|
||||
if (peersToIgnore == null)
|
||||
peersToIgnore = new HashSet(1);
|
||||
peersToIgnore.add(_context.routerHash());
|
||||
@ -84,6 +84,7 @@ public class PeerSelector {
|
||||
+ peerHashes + " (not including " + peersToIgnore + ") [allHashes.size = "
|
||||
+ allHashes.size() + "]");
|
||||
return peerHashes;
|
||||
******/
|
||||
}
|
||||
|
||||
/**
|
||||
@ -94,7 +95,7 @@ public class PeerSelector {
|
||||
* @return List of Hash for the peers selected, ordered by bucket (but intra bucket order is not defined)
|
||||
*/
|
||||
/* FIXME Exporting non-public type through public API FIXME */
|
||||
public List selectNearestExplicitThin(Hash key, int maxNumRouters, Set peersToIgnore, KBucketSet kbuckets) { // LINT -- Exporting non-public type through public API
|
||||
public List<Hash> selectNearestExplicitThin(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets) { // LINT -- Exporting non-public type through public API
|
||||
if (peersToIgnore == null)
|
||||
peersToIgnore = new HashSet(1);
|
||||
peersToIgnore.add(_context.routerHash());
|
||||
@ -109,11 +110,11 @@ public class PeerSelector {
|
||||
}
|
||||
|
||||
private class MatchSelectionCollector implements SelectionCollector {
|
||||
private TreeMap _sorted;
|
||||
private TreeMap<BigInteger, Hash> _sorted;
|
||||
private Hash _key;
|
||||
private Set _toIgnore;
|
||||
private Set<Hash> _toIgnore;
|
||||
private int _matches;
|
||||
public MatchSelectionCollector(Hash key, Set toIgnore) {
|
||||
public MatchSelectionCollector(Hash key, Set<Hash> toIgnore) {
|
||||
_key = key;
|
||||
_sorted = new TreeMap();
|
||||
_toIgnore = toIgnore;
|
||||
@ -135,8 +136,8 @@ public class PeerSelector {
|
||||
_matches++;
|
||||
}
|
||||
/** get the first $howMany entries matching */
|
||||
public List get(int howMany) {
|
||||
List rv = new ArrayList(howMany);
|
||||
public List<Hash> get(int howMany) {
|
||||
List<Hash> rv = new ArrayList(howMany);
|
||||
for (int i = 0; i < howMany; i++) {
|
||||
if (_sorted.size() <= 0)
|
||||
break;
|
||||
@ -151,6 +152,7 @@ public class PeerSelector {
|
||||
* strip out all of the peers that are failing
|
||||
*
|
||||
*/
|
||||
/********
|
||||
private void removeFailingPeers(Set peerHashes) {
|
||||
List failing = null;
|
||||
for (Iterator iter = peerHashes.iterator(); iter.hasNext(); ) {
|
||||
@ -184,6 +186,7 @@ public class PeerSelector {
|
||||
if (failing != null)
|
||||
peerHashes.removeAll(failing);
|
||||
}
|
||||
**********/
|
||||
|
||||
public static BigInteger getDistance(Hash targetKey, Hash routerInQuestion) {
|
||||
// plain XOR of the key and router
|
||||
@ -199,7 +202,7 @@ public class PeerSelector {
|
||||
* @return List of Hash for the peers selected, ordered by bucket (but intra bucket order is not defined)
|
||||
*/
|
||||
/* FIXME Exporting non-public type through public API FIXME */
|
||||
public List selectNearest(Hash key, int maxNumRouters, Set peersToIgnore, KBucketSet kbuckets) { // LINT -- Exporting non-public type through public API
|
||||
public List<Hash> selectNearest(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets) { // LINT -- Exporting non-public type through public API
|
||||
// sure, this may not be exactly correct per kademlia (peers on the border of a kbucket in strict kademlia
|
||||
// would behave differently) but I can see no reason to keep around an /additional/ more complicated algorithm.
|
||||
// later if/when selectNearestExplicit gets costly, we may revisit this (since kbuckets let us cache the distance()
|
||||
|
@ -56,7 +56,7 @@ class StoreJob extends JobImpl {
|
||||
private final static int STORE_PRIORITY = 100;
|
||||
|
||||
/**
|
||||
* Create a new search for the routingKey specified
|
||||
* Send a data structure to the floodfills
|
||||
*
|
||||
*/
|
||||
public StoreJob(RouterContext context, KademliaNetworkDatabaseFacade facade, Hash key,
|
||||
@ -69,7 +69,7 @@ class StoreJob extends JobImpl {
|
||||
* already know they have it). This can be null.
|
||||
*/
|
||||
public StoreJob(RouterContext context, KademliaNetworkDatabaseFacade facade, Hash key,
|
||||
DataStructure data, Job onSuccess, Job onFailure, long timeoutMs, Set toSkip) {
|
||||
DataStructure data, Job onSuccess, Job onFailure, long timeoutMs, Set<Hash> toSkip) {
|
||||
super(context);
|
||||
_log = context.logManager().getLog(StoreJob.class);
|
||||
getContext().statManager().createRateStat("netDb.storeRouterInfoSent", "How many routerInfo store messages have we sent?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
@ -146,7 +146,7 @@ class StoreJob extends JobImpl {
|
||||
// This will help minimize active connections for floodfill peers and allow
|
||||
// the network to scale.
|
||||
// Perhaps the ultimate solution is to send RouterInfos through a lease also.
|
||||
List closestHashes;
|
||||
List<Hash> closestHashes;
|
||||
if (_state.getData() instanceof RouterInfo)
|
||||
closestHashes = getMostReliableRouters(_state.getTarget(), toCheck, _state.getAttempted());
|
||||
else
|
||||
@ -165,8 +165,8 @@ class StoreJob extends JobImpl {
|
||||
//_state.addPending(closestHashes);
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info(getJobId() + ": Continue sending key " + _state.getTarget() + " after " + _state.getAttempted().size() + " tries to " + closestHashes);
|
||||
for (Iterator iter = closestHashes.iterator(); iter.hasNext(); ) {
|
||||
Hash peer = (Hash)iter.next();
|
||||
for (Iterator<Hash> iter = closestHashes.iterator(); iter.hasNext(); ) {
|
||||
Hash peer = iter.next();
|
||||
DataStructure ds = _facade.getDataStore().get(peer);
|
||||
if ( (ds == null) || !(ds instanceof RouterInfo) ) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
@ -215,7 +215,7 @@ class StoreJob extends JobImpl {
|
||||
*
|
||||
* @return ordered list of Hash objects
|
||||
*/
|
||||
private List getClosestRouters(Hash key, int numClosest, Set alreadyChecked) {
|
||||
private List<Hash> getClosestRouters(Hash key, int numClosest, Set<Hash> alreadyChecked) {
|
||||
Hash rkey = getContext().routingKeyGenerator().getRoutingKey(key);
|
||||
//if (_log.shouldLog(Log.DEBUG))
|
||||
// _log.debug(getJobId() + ": Current routing key for " + key + ": " + rkey);
|
||||
@ -225,7 +225,7 @@ class StoreJob extends JobImpl {
|
||||
return _peerSelector.selectNearestExplicit(rkey, numClosest, alreadyChecked, ks);
|
||||
}
|
||||
|
||||
private List getMostReliableRouters(Hash key, int numClosest, Set alreadyChecked) {
|
||||
private List<Hash> getMostReliableRouters(Hash key, int numClosest, Set<Hash> alreadyChecked) {
|
||||
Hash rkey = getContext().routingKeyGenerator().getRoutingKey(key);
|
||||
KBucketSet ks = _facade.getKBuckets();
|
||||
if (ks == null) return new ArrayList();
|
||||
|
@ -15,12 +15,12 @@ class StoreState {
|
||||
private RouterContext _context;
|
||||
private Hash _key;
|
||||
private DataStructure _data;
|
||||
private final HashSet _pendingPeers;
|
||||
private HashMap _pendingPeerTimes;
|
||||
private final HashSet _successfulPeers;
|
||||
private final HashSet _successfulExploratoryPeers;
|
||||
private final HashSet _failedPeers;
|
||||
private final HashSet _attemptedPeers;
|
||||
private final HashSet<Hash> _pendingPeers;
|
||||
private HashMap<Hash, Long> _pendingPeerTimes;
|
||||
private final HashSet<Hash> _successfulPeers;
|
||||
private final HashSet<Hash> _successfulExploratoryPeers;
|
||||
private final HashSet<Hash> _failedPeers;
|
||||
private final HashSet<Hash> _attemptedPeers;
|
||||
private int _completeCount;
|
||||
private volatile long _completed;
|
||||
private volatile long _started;
|
||||
@ -28,7 +28,7 @@ class StoreState {
|
||||
public StoreState(RouterContext ctx, Hash key, DataStructure data) {
|
||||
this(ctx, key, data, null);
|
||||
}
|
||||
public StoreState(RouterContext ctx, Hash key, DataStructure data, Set toSkip) {
|
||||
public StoreState(RouterContext ctx, Hash key, DataStructure data, Set<Hash> toSkip) {
|
||||
_context = ctx;
|
||||
_key = key;
|
||||
_data = data;
|
||||
@ -48,29 +48,29 @@ class StoreState {
|
||||
|
||||
public Hash getTarget() { return _key; }
|
||||
public DataStructure getData() { return _data; }
|
||||
public Set getPending() {
|
||||
public Set<Hash> getPending() {
|
||||
synchronized (_pendingPeers) {
|
||||
return (Set)_pendingPeers.clone();
|
||||
return (Set<Hash>)_pendingPeers.clone();
|
||||
}
|
||||
}
|
||||
public Set getAttempted() {
|
||||
public Set<Hash> getAttempted() {
|
||||
synchronized (_attemptedPeers) {
|
||||
return (Set)_attemptedPeers.clone();
|
||||
return (Set<Hash>)_attemptedPeers.clone();
|
||||
}
|
||||
}
|
||||
public Set getSuccessful() {
|
||||
public Set<Hash> getSuccessful() {
|
||||
synchronized (_successfulPeers) {
|
||||
return (Set)_successfulPeers.clone();
|
||||
return (Set<Hash>)_successfulPeers.clone();
|
||||
}
|
||||
}
|
||||
public Set getSuccessfulExploratory() {
|
||||
public Set<Hash> getSuccessfulExploratory() {
|
||||
synchronized (_successfulExploratoryPeers) {
|
||||
return (Set)_successfulExploratoryPeers.clone();
|
||||
return (Set<Hash>)_successfulExploratoryPeers.clone();
|
||||
}
|
||||
}
|
||||
public Set getFailed() {
|
||||
public Set<Hash> getFailed() {
|
||||
synchronized (_failedPeers) {
|
||||
return (Set)_failedPeers.clone();
|
||||
return (Set<Hash>)_failedPeers.clone();
|
||||
}
|
||||
}
|
||||
public boolean completed() { return _completed != -1; }
|
||||
@ -92,10 +92,10 @@ class StoreState {
|
||||
_attemptedPeers.add(peer);
|
||||
}
|
||||
}
|
||||
public void addPending(Collection pending) {
|
||||
public void addPending(Collection<Hash> pending) {
|
||||
synchronized (_pendingPeers) {
|
||||
_pendingPeers.addAll(pending);
|
||||
for (Iterator iter = pending.iterator(); iter.hasNext(); )
|
||||
for (Iterator<Hash> iter = pending.iterator(); iter.hasNext(); )
|
||||
_pendingPeerTimes.put(iter.next(), new Long(_context.clock().now()));
|
||||
}
|
||||
synchronized (_attemptedPeers) {
|
||||
@ -113,7 +113,7 @@ class StoreState {
|
||||
long rv = -1;
|
||||
synchronized (_pendingPeers) {
|
||||
_pendingPeers.remove(peer);
|
||||
Long when = (Long)_pendingPeerTimes.remove(peer);
|
||||
Long when = _pendingPeerTimes.remove(peer);
|
||||
if (when != null)
|
||||
rv = _context.clock().now() - when.longValue();
|
||||
}
|
||||
@ -128,7 +128,7 @@ class StoreState {
|
||||
long rv = -1;
|
||||
synchronized (_pendingPeers) {
|
||||
_pendingPeers.remove(peer);
|
||||
Long when = (Long)_pendingPeerTimes.remove(peer);
|
||||
Long when = _pendingPeerTimes.remove(peer);
|
||||
if (when != null)
|
||||
rv = _context.clock().now() - when.longValue();
|
||||
}
|
||||
@ -159,43 +159,43 @@ class StoreState {
|
||||
buf.append(" Attempted: ");
|
||||
synchronized (_attemptedPeers) {
|
||||
buf.append(_attemptedPeers.size()).append(' ');
|
||||
for (Iterator iter = _attemptedPeers.iterator(); iter.hasNext(); ) {
|
||||
Hash peer = (Hash)iter.next();
|
||||
for (Iterator<Hash> iter = _attemptedPeers.iterator(); iter.hasNext(); ) {
|
||||
Hash peer = iter.next();
|
||||
buf.append(peer.toBase64()).append(" ");
|
||||
}
|
||||
}
|
||||
buf.append(" Pending: ");
|
||||
synchronized (_pendingPeers) {
|
||||
buf.append(_pendingPeers.size()).append(' ');
|
||||
for (Iterator iter = _pendingPeers.iterator(); iter.hasNext(); ) {
|
||||
Hash peer = (Hash)iter.next();
|
||||
for (Iterator<Hash> iter = _pendingPeers.iterator(); iter.hasNext(); ) {
|
||||
Hash peer = iter.next();
|
||||
buf.append(peer.toBase64()).append(" ");
|
||||
}
|
||||
}
|
||||
buf.append(" Failed: ");
|
||||
synchronized (_failedPeers) {
|
||||
buf.append(_failedPeers.size()).append(' ');
|
||||
for (Iterator iter = _failedPeers.iterator(); iter.hasNext(); ) {
|
||||
Hash peer = (Hash)iter.next();
|
||||
for (Iterator<Hash> iter = _failedPeers.iterator(); iter.hasNext(); ) {
|
||||
Hash peer = iter.next();
|
||||
buf.append(peer.toBase64()).append(" ");
|
||||
}
|
||||
}
|
||||
buf.append(" Successful: ");
|
||||
synchronized (_successfulPeers) {
|
||||
buf.append(_successfulPeers.size()).append(' ');
|
||||
for (Iterator iter = _successfulPeers.iterator(); iter.hasNext(); ) {
|
||||
Hash peer = (Hash)iter.next();
|
||||
for (Iterator<Hash> iter = _successfulPeers.iterator(); iter.hasNext(); ) {
|
||||
Hash peer = iter.next();
|
||||
buf.append(peer.toBase64()).append(" ");
|
||||
}
|
||||
}
|
||||
buf.append(" Successful Exploratory: ");
|
||||
synchronized (_successfulExploratoryPeers) {
|
||||
buf.append(_successfulExploratoryPeers.size()).append(' ');
|
||||
for (Iterator iter = _successfulExploratoryPeers.iterator(); iter.hasNext(); ) {
|
||||
Hash peer = (Hash)iter.next();
|
||||
for (Iterator<Hash> iter = _successfulExploratoryPeers.iterator(); iter.hasNext(); ) {
|
||||
Hash peer = iter.next();
|
||||
buf.append(peer.toBase64()).append(" ");
|
||||
}
|
||||
}
|
||||
return buf.toString();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -250,8 +250,4 @@ class PeerManager {
|
||||
return rv;
|
||||
}
|
||||
}
|
||||
|
||||
public void renderStatusHTML(Writer out) throws IOException {
|
||||
_organizer.renderStatusHTML(out);
|
||||
}
|
||||
}
|
||||
|
@ -78,8 +78,8 @@ public class PeerManagerFacadeImpl implements PeerManagerFacade {
|
||||
return _manager.getPeersByCapability(capability);
|
||||
}
|
||||
|
||||
/** @deprecated, moved to routerconsole */
|
||||
public void renderStatusHTML(Writer out) throws IOException {
|
||||
_manager.renderStatusHTML(out);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -139,7 +139,7 @@ public class ProfileOrganizer {
|
||||
}
|
||||
|
||||
public void setUs(Hash us) { _us = us; }
|
||||
Hash getUs() { return _us; }
|
||||
public Hash getUs() { return _us; }
|
||||
|
||||
public double getSpeedThreshold() { return _thresholdSpeedValue; }
|
||||
public double getCapacityThreshold() { return _thresholdCapacityValue; }
|
||||
@ -258,11 +258,6 @@ public class ProfileOrganizer {
|
||||
_persistenceHelper.writeProfile(prof, out);
|
||||
}
|
||||
|
||||
public void renderStatusHTML(Writer out) throws IOException {
|
||||
ProfileOrganizerRenderer rend = new ProfileOrganizerRenderer(this, _context);
|
||||
rend.renderStatusHTML(out);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return a set of Hashes for peers that are both fast and reliable. If an insufficient
|
||||
* number of peers are both fast and reliable, fall back onto high capacity peers, and if that
|
||||
|
@ -1,319 +0,0 @@
|
||||
package net.i2p.router.peermanager;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Writer;
|
||||
import java.text.DecimalFormat;
|
||||
import java.util.Comparator;
|
||||
import java.util.Iterator;
|
||||
import java.util.Set;
|
||||
import java.util.TreeSet;
|
||||
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.RouterInfo;
|
||||
import net.i2p.router.RouterContext;
|
||||
import net.i2p.stat.Rate;
|
||||
import net.i2p.stat.RateStat;
|
||||
|
||||
/**
|
||||
* Helper class to refactor the HTML rendering from out of the ProfileOrganizer
|
||||
*
|
||||
*/
|
||||
class ProfileOrganizerRenderer {
|
||||
private RouterContext _context;
|
||||
private ProfileOrganizer _organizer;
|
||||
private ProfileComparator _comparator;
|
||||
|
||||
public ProfileOrganizerRenderer(ProfileOrganizer organizer, RouterContext context) {
|
||||
_context = context;
|
||||
_organizer = organizer;
|
||||
_comparator = new ProfileComparator();
|
||||
}
|
||||
public void renderStatusHTML(Writer out) throws IOException {
|
||||
Set peers = _organizer.selectAllPeers();
|
||||
|
||||
long now = _context.clock().now();
|
||||
long hideBefore = now - 90*60*1000;
|
||||
|
||||
TreeSet order = new TreeSet(_comparator);
|
||||
TreeSet integratedPeers = new TreeSet(_comparator);
|
||||
for (Iterator iter = peers.iterator(); iter.hasNext();) {
|
||||
Hash peer = (Hash)iter.next();
|
||||
if (_organizer.getUs().equals(peer)) continue;
|
||||
PeerProfile prof = _organizer.getProfile(peer);
|
||||
if (_organizer.isWellIntegrated(peer)) {
|
||||
integratedPeers.add(prof);
|
||||
} else {
|
||||
RouterInfo info = _context.netDb().lookupRouterInfoLocally(peer);
|
||||
if (info != null && info.getCapabilities().indexOf("f") >= 0)
|
||||
integratedPeers.add(prof);
|
||||
}
|
||||
if (prof.getLastSendSuccessful() <= hideBefore) continue;
|
||||
order.add(prof);
|
||||
}
|
||||
|
||||
int fast = 0;
|
||||
int reliable = 0;
|
||||
int integrated = 0;
|
||||
int failing = 0;
|
||||
StringBuilder buf = new StringBuilder(16*1024);
|
||||
buf.append("<h2>Peer Profiles</h2>\n");
|
||||
buf.append("<p>Showing ").append(order.size()).append(" recent profiles, hiding ").append(peers.size()-order.size()).append(" older profiles</p>");
|
||||
buf.append("<table>" +
|
||||
"<tr>" +
|
||||
"<th>Peer</th>" +
|
||||
"<th>Groups (Caps)</th>" +
|
||||
"<th>Speed</th>" +
|
||||
"<th>Capacity</th>" +
|
||||
"<th>Integration</th>" +
|
||||
"<th>Status</th>" +
|
||||
"<th> </th>" +
|
||||
"</tr>");
|
||||
int prevTier = 1;
|
||||
for (Iterator iter = order.iterator(); iter.hasNext();) {
|
||||
PeerProfile prof = (PeerProfile)iter.next();
|
||||
Hash peer = prof.getPeer();
|
||||
|
||||
int tier = 0;
|
||||
boolean isIntegrated = false;
|
||||
if (_organizer.isFast(peer)) {
|
||||
tier = 1;
|
||||
fast++;
|
||||
reliable++;
|
||||
} else if (_organizer.isHighCapacity(peer)) {
|
||||
tier = 2;
|
||||
reliable++;
|
||||
} else if (_organizer.isFailing(peer)) {
|
||||
failing++;
|
||||
} else {
|
||||
tier = 3;
|
||||
}
|
||||
|
||||
if (_organizer.isWellIntegrated(peer)) {
|
||||
isIntegrated = true;
|
||||
integrated++;
|
||||
}
|
||||
|
||||
if (tier != prevTier)
|
||||
buf.append("<tr><td colspan=\"7\"><hr></td></tr>\n");
|
||||
prevTier = tier;
|
||||
|
||||
buf.append("<tr><td align=\"center\" nowrap>");
|
||||
buf.append(_context.commSystem().renderPeerHTML(peer));
|
||||
buf.append("</td><td align=\"center\">");
|
||||
|
||||
switch (tier) {
|
||||
case 1: buf.append("Fast, High Capacity"); break;
|
||||
case 2: buf.append("High Capacity"); break;
|
||||
case 3: buf.append("Not Failing"); break;
|
||||
default: buf.append("Failing"); break;
|
||||
}
|
||||
if (isIntegrated) buf.append(", Integrated");
|
||||
RouterInfo info = _context.netDb().lookupRouterInfoLocally(peer);
|
||||
if (info != null) {
|
||||
// prevent HTML injection in the caps and version
|
||||
buf.append(" (").append(DataHelper.stripHTML(info.getCapabilities()));
|
||||
String v = info.getOption("router.version");
|
||||
if (v != null)
|
||||
buf.append(' ').append(DataHelper.stripHTML(v));
|
||||
buf.append(')');
|
||||
}
|
||||
|
||||
buf.append("<td align=\"right\">").append(num(prof.getSpeedValue()));
|
||||
long bonus = prof.getSpeedBonus();
|
||||
if (bonus != 0) {
|
||||
if (bonus > 0)
|
||||
buf.append(" (+");
|
||||
else
|
||||
buf.append(" (");
|
||||
buf.append(bonus).append(')');
|
||||
}
|
||||
buf.append("</td><td align=\"right\">").append(num(prof.getCapacityValue()));
|
||||
bonus = prof.getCapacityBonus();
|
||||
if (bonus != 0) {
|
||||
if (bonus > 0)
|
||||
buf.append(" (+");
|
||||
else
|
||||
buf.append(" (");
|
||||
buf.append(bonus).append(')');
|
||||
}
|
||||
buf.append("</td><td align=\"right\">").append(num(prof.getIntegrationValue()));
|
||||
buf.append("</td><td align=\"center\">");
|
||||
if (_context.shitlist().isShitlisted(peer)) buf.append("Banned");
|
||||
if (prof.getIsFailing()) buf.append(" Failing");
|
||||
if (_context.commSystem().wasUnreachable(peer)) buf.append(" Unreachable");
|
||||
Rate failed = prof.getTunnelHistory().getFailedRate().getRate(30*60*1000);
|
||||
long fails = failed.getCurrentEventCount() + failed.getLastEventCount();
|
||||
if (fails > 0) {
|
||||
Rate accepted = prof.getTunnelCreateResponseTime().getRate(30*60*1000);
|
||||
long total = fails + accepted.getCurrentEventCount() + accepted.getLastEventCount();
|
||||
if (total / fails <= 10) // hide if < 10%
|
||||
buf.append(' ').append(fails).append('/').append(total).append(" Test Fails");
|
||||
}
|
||||
buf.append(" </td>");
|
||||
buf.append("<td nowrap align=\"center\"><a target=\"_blank\" href=\"dumpprofile.jsp?peer=").append(peer.toBase64().substring(0,6)).append("\">profile</a>");
|
||||
buf.append(" <a href=\"configpeer.jsp?peer=").append(peer.toBase64()).append("\">+-</a></td>\n");
|
||||
buf.append("</tr>");
|
||||
// let's not build the whole page in memory (~500 bytes per peer)
|
||||
out.write(buf.toString());
|
||||
buf.setLength(0);
|
||||
}
|
||||
buf.append("</table>");
|
||||
|
||||
buf.append("<h2>Floodfill and Integrated Peers</h2>\n" +
|
||||
"<table>" +
|
||||
"<tr>" +
|
||||
"<th class=\"smallhead\">Peer</th>" +
|
||||
"<th class=\"smallhead\">Caps</th>" +
|
||||
"<th class=\"smallhead\">Integ. Value</th>" +
|
||||
"<th class=\"smallhead\">Last Heard About</th>" +
|
||||
"<th class=\"smallhead\">Last Heard From</th>" +
|
||||
// "<th class=\"smallhead\">Last Successful Send</th>" +
|
||||
"<th class=\"smallhead\">Last Good Send</th>" +
|
||||
// "<th class=\"smallhead\">Last Failed Send</th>" +
|
||||
"<th class=\"smallhead\">Last Bad Send</th>" +
|
||||
"<th class=\"smallhead\">10m Resp. Time</th>" +
|
||||
"<th class=\"smallhead\">1h Resp. Time</th>" +
|
||||
"<th class=\"smallhead\">1d Resp. Time</th>" +
|
||||
// "<th class=\"smallhead\">Successful Lookups</th>" +
|
||||
"<th class=\"smallhead\">Good Lookups</th>" +
|
||||
// "<th>Failed Lookups</th>" +
|
||||
"<th class=\"smallhead\">Bad Lookups</th>" +
|
||||
"<th class=\"smallhead\">New Stores</th>" +
|
||||
"<th class=\"smallhead\">Old Stores</th>" +
|
||||
"<th class=\"smallhead\">1h Fail Rate</th>" +
|
||||
"<th class=\"smallhead\">1d Fail Rate</th>" +
|
||||
"</tr>");
|
||||
for (Iterator iter = integratedPeers.iterator(); iter.hasNext();) {
|
||||
PeerProfile prof = (PeerProfile)iter.next();
|
||||
Hash peer = prof.getPeer();
|
||||
|
||||
buf.append("<tr><td align=\"center\" nowrap>");
|
||||
buf.append(_context.commSystem().renderPeerHTML(peer));
|
||||
buf.append("</td>");
|
||||
RouterInfo info = _context.netDb().lookupRouterInfoLocally(peer);
|
||||
if (info != null)
|
||||
buf.append("<td align=\"center\">").append(DataHelper.stripHTML(info.getCapabilities())).append("</td>");
|
||||
else
|
||||
buf.append("<td> </td>");
|
||||
buf.append("</code></td>");
|
||||
buf.append("<td align=\"right\">").append(num(prof.getIntegrationValue())).append("</td>");
|
||||
long time;
|
||||
time = now - prof.getLastHeardAbout();
|
||||
buf.append("<td align=\"right\">").append(DataHelper.formatDuration(time)).append("</td>");
|
||||
time = now - prof.getLastHeardFrom();
|
||||
buf.append("<td align=\"right\">").append(DataHelper.formatDuration(time)).append("</td>");
|
||||
time = now - prof.getLastSendSuccessful();
|
||||
buf.append("<td align=\"right\">").append(DataHelper.formatDuration(time)).append("</td>");
|
||||
time = now - prof.getLastSendFailed();
|
||||
buf.append("<td align=\"right\">").append(DataHelper.formatDuration(time)).append("</td>");
|
||||
buf.append("<td align=\"right\">").append(avg(prof, 10*60*1000l)).append("</td>");
|
||||
buf.append("<td align=\"right\">").append(avg(prof, 60*60*1000l)).append("</td>");
|
||||
buf.append("<td align=\"right\">").append(avg(prof, 24*60*60*1000l)).append("</td>");
|
||||
DBHistory dbh = prof.getDBHistory();
|
||||
if (dbh != null) {
|
||||
buf.append("<td align=\"right\">").append(dbh.getSuccessfulLookups()).append("</td>");
|
||||
buf.append("<td align=\"right\">").append(dbh.getFailedLookups()).append("</td>");
|
||||
buf.append("<td align=\"right\">").append(dbh.getUnpromptedDbStoreNew()).append("</td>");
|
||||
buf.append("<td align=\"right\">").append(dbh.getUnpromptedDbStoreOld()).append("</td>");
|
||||
buf.append("<td align=\"right\">").append(davg(dbh, 60*60*1000l)).append("</td>");
|
||||
buf.append("<td align=\"right\">").append(davg(dbh, 24*60*60*1000l)).append("</td>");
|
||||
}
|
||||
}
|
||||
buf.append("</table>");
|
||||
|
||||
buf.append("<h3>Thresholds:</h3>");
|
||||
buf.append("<b>Speed:</b> ").append(num(_organizer.getSpeedThreshold())).append(" (").append(fast).append(" fast peers)<br>");
|
||||
buf.append("<b>Capacity:</b> ").append(num(_organizer.getCapacityThreshold())).append(" (").append(reliable).append(" high capacity peers)<br>");
|
||||
buf.append("<b>Integration:</b> ").append(num(_organizer.getIntegrationThreshold())).append(" (").append(integrated).append(" well integrated peers)");
|
||||
buf.append("<h3>Definitions:</h3><ul>" +
|
||||
"<li><b>groups</b>: as determined by the profile organizer</li>" +
|
||||
"<li><b>caps</b>: capabilities in the netDb, not used to determine profiles</li>" +
|
||||
"<li><b>speed</b>: peak throughput (bytes per second) over a 1 minute period that the peer has sustained in a single tunnel</li>" +
|
||||
"<li><b>capacity</b>: how many tunnels can we ask them to join in an hour?</li>" +
|
||||
"<li><b>integration</b>: how many new peers have they told us about lately?</li>" +
|
||||
"<li><b>status</b>: is the peer banned, or unreachable, or failing tunnel tests?</li>" +
|
||||
"</ul></i>");
|
||||
out.write(buf.toString());
|
||||
out.flush();
|
||||
}
|
||||
|
||||
private class ProfileComparator implements Comparator {
|
||||
public int compare(Object lhs, Object rhs) {
|
||||
if ( (lhs == null) || (rhs == null) )
|
||||
throw new NullPointerException("lhs=" + lhs + " rhs=" + rhs);
|
||||
if ( !(lhs instanceof PeerProfile) || !(rhs instanceof PeerProfile) )
|
||||
throw new ClassCastException("lhs=" + lhs.getClass().getName() + " rhs=" + rhs.getClass().getName());
|
||||
|
||||
PeerProfile left = (PeerProfile)lhs;
|
||||
PeerProfile right = (PeerProfile)rhs;
|
||||
|
||||
if (_context.profileOrganizer().isFast(left.getPeer())) {
|
||||
if (_context.profileOrganizer().isFast(right.getPeer())) {
|
||||
return compareHashes(left, right);
|
||||
} else {
|
||||
return -1; // fast comes first
|
||||
}
|
||||
} else if (_context.profileOrganizer().isHighCapacity(left.getPeer())) {
|
||||
if (_context.profileOrganizer().isFast(right.getPeer())) {
|
||||
return 1;
|
||||
} else if (_context.profileOrganizer().isHighCapacity(right.getPeer())) {
|
||||
return compareHashes(left, right);
|
||||
} else {
|
||||
return -1;
|
||||
}
|
||||
} else if (_context.profileOrganizer().isFailing(left.getPeer())) {
|
||||
if (_context.profileOrganizer().isFailing(right.getPeer())) {
|
||||
return compareHashes(left, right);
|
||||
} else {
|
||||
return 1;
|
||||
}
|
||||
} else {
|
||||
// left is not failing
|
||||
if (_context.profileOrganizer().isFast(right.getPeer())) {
|
||||
return 1;
|
||||
} else if (_context.profileOrganizer().isHighCapacity(right.getPeer())) {
|
||||
return 1;
|
||||
} else if (_context.profileOrganizer().isFailing(right.getPeer())) {
|
||||
return -1;
|
||||
} else {
|
||||
return compareHashes(left, right);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private int compareHashes(PeerProfile left, PeerProfile right) {
|
||||
return left.getPeer().toBase64().compareTo(right.getPeer().toBase64());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private final static DecimalFormat _fmt = new DecimalFormat("###,##0.00");
|
||||
private final static String num(double num) { synchronized (_fmt) { return _fmt.format(num); } }
|
||||
private final static String na = "n/a";
|
||||
|
||||
private static String avg (PeerProfile prof, long rate) {
|
||||
RateStat rs = prof.getDbResponseTime();
|
||||
if (rs == null)
|
||||
return na;
|
||||
Rate r = rs.getRate(rate);
|
||||
if (r == null)
|
||||
return na;
|
||||
long c = r.getCurrentEventCount() + r.getLastEventCount();
|
||||
if (c == 0)
|
||||
return na;
|
||||
double d = r.getCurrentTotalValue() + r.getLastTotalValue();
|
||||
return Math.round(d/c) + "ms";
|
||||
}
|
||||
|
||||
private static String davg (DBHistory dbh, long rate) {
|
||||
RateStat rs = dbh.getFailedLookupRate();
|
||||
if (rs == null)
|
||||
return na;
|
||||
Rate r = rs.getRate(rate);
|
||||
if (r == null)
|
||||
return na;
|
||||
long c = r.getCurrentEventCount() + r.getLastEventCount();
|
||||
return "" + c;
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user