* ExploratoryPeerSelector: Try NonFailing even more
* HostsTxtNamingService: Add reverse lookup support * Outbound message: Minor cleanup * i2psnark TrackerCLient: Minor cleanup * checklist.txt: Minor edit * hosts.txt: Add perv.i2p, false.i2p, mtn.i2p2.i2p * i2ptunnel.config: Change CVS client to mtn * netdb.jsp: Show leaseSet destinations using reverse lookup * profiles.jsp: First cut at showing floodfill data
This commit is contained in:
@ -17,7 +17,7 @@ import net.i2p.CoreVersion;
|
||||
public class RouterVersion {
|
||||
public final static String ID = "$Revision: 1.548 $ $Date: 2008-02-10 15:00:00 $";
|
||||
public final static String VERSION = "0.6.1.32";
|
||||
public final static long BUILD = 13;
|
||||
public final static long BUILD = 14;
|
||||
public static void main(String args[]) {
|
||||
System.out.println("I2P Router version: " + VERSION + "-" + BUILD);
|
||||
System.out.println("Router ID: " + RouterVersion.ID);
|
||||
|
@ -228,6 +228,9 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
* simultaneously talking to the same dest is probably rare enough
|
||||
* to not bother separating out.
|
||||
*
|
||||
* We're going to use the lease until it expires, not even looking for a newer lease.
|
||||
* So if the inbound tunnel fails and the dest publishes a new lease, we won't know about it.
|
||||
*
|
||||
* If not found,
|
||||
* fetch the next lease that we should try sending through, randomly chosen
|
||||
* from within the sorted leaseSet (NOT sorted by # of failures through each
|
||||
@ -244,8 +247,9 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
return false;
|
||||
}
|
||||
long now = getContext().clock().now();
|
||||
|
||||
|
||||
// Use the same lease if it's still good
|
||||
// Even if _leaseSet changed, _leaseSet.getEncryptionKey() didn't...
|
||||
synchronized (_leaseCache) {
|
||||
if (now - _cleanTime > 5*60*1000) { // clean out periodically
|
||||
cleanLeaseCache(_leaseCache);
|
||||
@ -254,12 +258,12 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
_lease = (Lease) _leaseCache.get(_to);
|
||||
if (_lease != null) {
|
||||
if (!_lease.isExpired()) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Found in cache - lease for dest " + _to.calculateHash().toBase64());
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Found in cache - lease for " + _toString);
|
||||
return true;
|
||||
} else {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Expired from cache - lease for dest " + _to.calculateHash().toBase64());
|
||||
_log.warn("Expired from cache - lease for " + _toString);
|
||||
_leaseCache.remove(_to);
|
||||
}
|
||||
}
|
||||
@ -288,6 +292,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
// sort are randomly ordered)
|
||||
Collections.shuffle(leases);
|
||||
|
||||
/****
|
||||
if (false) {
|
||||
// ordered by lease number of failures
|
||||
TreeMap orderedLeases = new TreeMap();
|
||||
@ -303,13 +308,14 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
|
||||
_lease = (Lease)orderedLeases.get(orderedLeases.firstKey());
|
||||
} else {
|
||||
****/
|
||||
_lease = (Lease)leases.get(0);
|
||||
}
|
||||
// }
|
||||
synchronized (_leaseCache) {
|
||||
_leaseCache.put(_to, _lease);
|
||||
}
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Added to cache - lease for dest " + _to.calculateHash().toBase64());
|
||||
_log.warn("Added to cache - lease for " + _toString);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -538,7 +544,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
if (getContext().tunnelManager().isValidTunnel(_from.calculateHash(), tunnel)) {
|
||||
if (!getContext().commSystem().isBacklogged(tunnel.getPeer(1))) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Switching back to tunnel " + tunnel + " for dest " + to.calculateHash().toBase64());
|
||||
_log.warn("Switching back to tunnel " + tunnel + " for " + _toString);
|
||||
_backloggedTunnelCache.remove(to);
|
||||
_tunnelCache.put(to, tunnel);
|
||||
return tunnel;
|
||||
@ -554,7 +560,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
return tunnel;
|
||||
// backlogged
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Switching from backlogged " + tunnel + " for dest " + to.calculateHash().toBase64());
|
||||
_log.warn("Switching from backlogged " + tunnel + " for " + _toString);
|
||||
_backloggedTunnelCache.put(to, tunnel);
|
||||
} // else no longer valid
|
||||
_tunnelCache.remove(to);
|
||||
|
@ -21,9 +21,11 @@ import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.TreeMap;
|
||||
|
||||
import net.i2p.data.Base64;
|
||||
import net.i2p.data.DataFormatException;
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.data.DataStructure;
|
||||
import net.i2p.data.Destination;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.Lease;
|
||||
import net.i2p.data.LeaseSet;
|
||||
@ -39,6 +41,7 @@ import net.i2p.router.networkdb.DatabaseLookupMessageHandler;
|
||||
import net.i2p.router.networkdb.DatabaseStoreMessageHandler;
|
||||
import net.i2p.router.networkdb.PublishLocalRouterInfoJob;
|
||||
import net.i2p.router.peermanager.PeerProfile;
|
||||
import net.i2p.router.TunnelPoolSettings;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
/**
|
||||
@ -934,8 +937,28 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
long now = _context.clock().now();
|
||||
for (Iterator iter = leases.iterator(); iter.hasNext(); ) {
|
||||
LeaseSet ls = (LeaseSet)iter.next();
|
||||
Hash key = ls.getDestination().calculateHash();
|
||||
buf.append("<b>LeaseSet: ").append(key.toBase64()).append("</b><br />\n");
|
||||
Destination dest = ls.getDestination();
|
||||
Hash key = dest.calculateHash();
|
||||
buf.append("<b>LeaseSet: ").append(key.toBase64());
|
||||
if (_context.clientManager().isLocal(dest)) {
|
||||
buf.append(" (<a href=\"tunnels.jsp#" + key.toBase64().substring(0,4) + "\">Local</a> ");
|
||||
if (! _context.clientManager().shouldPublishLeaseSet(key))
|
||||
buf.append("Unpublished ");
|
||||
buf.append("Destination ");
|
||||
TunnelPoolSettings in = _context.tunnelManager().getInboundSettings(key);
|
||||
if (in != null && in.getDestinationNickname() != null)
|
||||
buf.append(in.getDestinationNickname());
|
||||
else
|
||||
buf.append(dest.toBase64().substring(0, 6));
|
||||
} else {
|
||||
buf.append(" (Destination ");
|
||||
String host = _context.namingService().reverseLookup(dest);
|
||||
if (host != null)
|
||||
buf.append(host);
|
||||
else
|
||||
buf.append(dest.toBase64().substring(0, 6));
|
||||
}
|
||||
buf.append(")</b><br />\n");
|
||||
long exp = ls.getEarliestLeaseDate()-now;
|
||||
if (exp > 0)
|
||||
buf.append("Earliest expiration date in: <i>").append(DataHelper.formatDuration(exp)).append("</i><br />\n");
|
||||
|
@ -3,6 +3,8 @@ package net.i2p.router.peermanager;
|
||||
import java.io.IOException;
|
||||
import java.io.Writer;
|
||||
|
||||
import java.lang.Math;
|
||||
|
||||
import java.text.DecimalFormat;
|
||||
import java.text.DecimalFormatSymbols;
|
||||
|
||||
@ -12,9 +14,13 @@ import java.util.Locale;
|
||||
import java.util.Set;
|
||||
import java.util.TreeSet;
|
||||
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.RouterInfo;
|
||||
import net.i2p.router.RouterContext;
|
||||
import net.i2p.router.peermanager.DBHistory;
|
||||
import net.i2p.stat.Rate;
|
||||
import net.i2p.stat.RateStat;
|
||||
|
||||
/**
|
||||
* Helper class to refactor the HTML rendering from out of the ProfileOrganizer
|
||||
@ -33,13 +39,22 @@ class ProfileOrganizerRenderer {
|
||||
public void renderStatusHTML(Writer out) throws IOException {
|
||||
Set peers = _organizer.selectAllPeers();
|
||||
|
||||
long hideBefore = _context.clock().now() - 3*60*60*1000;
|
||||
long now = _context.clock().now();
|
||||
long hideBefore = now - 3*60*60*1000;
|
||||
|
||||
TreeSet order = new TreeSet(_comparator);
|
||||
TreeSet integratedPeers = new TreeSet(_comparator);
|
||||
for (Iterator iter = peers.iterator(); iter.hasNext();) {
|
||||
Hash peer = (Hash)iter.next();
|
||||
if (_organizer.getUs().equals(peer)) continue;
|
||||
PeerProfile prof = _organizer.getProfile(peer);
|
||||
if (_organizer.isWellIntegrated(peer)) {
|
||||
integratedPeers.add(prof);
|
||||
} else {
|
||||
RouterInfo info = _context.netDb().lookupRouterInfoLocally(peer);
|
||||
if (info != null && info.getCapabilities().indexOf("f") >= 0)
|
||||
integratedPeers.add(prof);
|
||||
}
|
||||
if (prof.getLastSendSuccessful() <= hideBefore) continue;
|
||||
order.add(prof);
|
||||
}
|
||||
@ -127,6 +142,75 @@ class ProfileOrganizerRenderer {
|
||||
buf.append("</tr>");
|
||||
}
|
||||
buf.append("</table>");
|
||||
|
||||
buf.append("<h2>Floodfill and Integrated Peers</h2>\n");
|
||||
buf.append("<table border=\"1\">");
|
||||
buf.append("<tr>");
|
||||
buf.append("<td><b>Peer</b></td>");
|
||||
buf.append("<td><b>Caps</b></td>");
|
||||
buf.append("<td><b>Integ. Value</b></td>");
|
||||
buf.append("<td><b>Last Heard About</b></td>");
|
||||
buf.append("<td><b>Last Heard From</b></td>");
|
||||
buf.append("<td><b>Last Successful Send</b></td>");
|
||||
buf.append("<td><b>Last Failed Send</b></td>");
|
||||
buf.append("<td><b>10m Resp. Time</b></td>");
|
||||
buf.append("<td><b>1h Resp. Time</b></td>");
|
||||
buf.append("<td><b>1d Resp. Time</b></td>");
|
||||
buf.append("<td><b>Successful Lookups</b></td>");
|
||||
buf.append("<td><b>Failed Lookups</b></td>");
|
||||
buf.append("<td><b>New Stores</b></td>");
|
||||
buf.append("<td><b>Old Stores</b></td>");
|
||||
buf.append("<td><b>1m Fail Rate</b></td>");
|
||||
buf.append("<td><b>1h Fail Rate</b></td>");
|
||||
buf.append("<td><b>1d Fail Rate</b></td>");
|
||||
buf.append("</tr>");
|
||||
for (Iterator iter = integratedPeers.iterator(); iter.hasNext();) {
|
||||
PeerProfile prof = (PeerProfile)iter.next();
|
||||
Hash peer = prof.getPeer();
|
||||
|
||||
buf.append("<tr>");
|
||||
buf.append("<td><code>");
|
||||
if (prof.getIsFailing()) {
|
||||
buf.append("<font color=\"red\">-- ").append(peer.toBase64().substring(0,6)).append("</font>");
|
||||
} else {
|
||||
if (prof.getIsActive()) {
|
||||
buf.append("<font color=\"blue\">++ ").append(peer.toBase64().substring(0,6)).append("</font>");
|
||||
} else {
|
||||
buf.append(" ").append(peer.toBase64().substring(0,6));
|
||||
}
|
||||
}
|
||||
RouterInfo info = _context.netDb().lookupRouterInfoLocally(peer);
|
||||
if (info != null)
|
||||
buf.append("<td align=\"center\">" + info.getCapabilities() + "</td>");
|
||||
else
|
||||
buf.append("<td> </td>");
|
||||
buf.append("</code></td>");
|
||||
buf.append("<td align=\"right\">").append(num(prof.getIntegrationValue())).append("</td>");
|
||||
long time;
|
||||
time = now - prof.getLastHeardAbout();
|
||||
buf.append("<td align=\"right\">").append(DataHelper.formatDuration(time)).append("</td>");
|
||||
time = now - prof.getLastHeardFrom();
|
||||
buf.append("<td align=\"right\">").append(DataHelper.formatDuration(time)).append("</td>");
|
||||
time = now - prof.getLastSendSuccessful();
|
||||
buf.append("<td align=\"right\">").append(DataHelper.formatDuration(time)).append("</td>");
|
||||
time = now - prof.getLastSendFailed();
|
||||
buf.append("<td align=\"right\">").append(DataHelper.formatDuration(time)).append("</td>");
|
||||
buf.append("<td align=\"right\">").append(avg(prof, 10*60*1000l)).append("</td>");
|
||||
buf.append("<td align=\"right\">").append(avg(prof, 60*60*1000l)).append("</td>");
|
||||
buf.append("<td align=\"right\">").append(avg(prof, 24*60*60*1000l)).append("</td>");
|
||||
DBHistory dbh = prof.getDBHistory();
|
||||
if (dbh != null) {
|
||||
buf.append("<td align=\"right\">").append(dbh.getSuccessfulLookups()).append("</td>");
|
||||
buf.append("<td align=\"right\">").append(dbh.getFailedLookups()).append("</td>");
|
||||
buf.append("<td align=\"right\">").append(dbh.getUnpromptedDbStoreNew()).append("</td>");
|
||||
buf.append("<td align=\"right\">").append(dbh.getUnpromptedDbStoreOld()).append("</td>");
|
||||
buf.append("<td align=\"right\">").append(davg(dbh, 60*1000l)).append("</td>");
|
||||
buf.append("<td align=\"right\">").append(davg(dbh, 60*60*1000l)).append("</td>");
|
||||
buf.append("<td align=\"right\">").append(davg(dbh, 24*60*60*1000l)).append("</td>");
|
||||
}
|
||||
}
|
||||
buf.append("</table>");
|
||||
|
||||
buf.append("<p><i>Definitions:<ul>");
|
||||
buf.append("<li><b>groups</b>: as determined by the profile organizer</li>");
|
||||
buf.append("<li><b>caps</b>: capabilities in the netDb, not used to determine profiles</li>");
|
||||
@ -198,4 +282,29 @@ class ProfileOrganizerRenderer {
|
||||
|
||||
private final static DecimalFormat _fmt = new DecimalFormat("###,##0.00", new DecimalFormatSymbols(Locale.UK));
|
||||
private final static String num(double num) { synchronized (_fmt) { return _fmt.format(num); } }
|
||||
|
||||
String avg (PeerProfile prof, long rate) {
|
||||
RateStat rs = prof.getDbResponseTime();
|
||||
if (rs == null)
|
||||
return num(0d);
|
||||
Rate r = rs.getRate(rate);
|
||||
if (r == null)
|
||||
return num(0d);
|
||||
long c = r.getCurrentEventCount() + r.getLastEventCount();
|
||||
if (c == 0)
|
||||
return num(0d);
|
||||
double d = r.getCurrentTotalValue() + r.getLastTotalValue();
|
||||
return Math.round(d/c) + "ms";
|
||||
}
|
||||
|
||||
String davg (DBHistory dbh, long rate) {
|
||||
RateStat rs = dbh.getFailedLookupRate();
|
||||
if (rs == null)
|
||||
return num(0d);
|
||||
Rate r = rs.getRate(rate);
|
||||
if (r == null)
|
||||
return num(0d);
|
||||
long c = r.getCurrentEventCount() + r.getLastEventCount();
|
||||
return "" + c;
|
||||
}
|
||||
}
|
||||
|
@ -77,7 +77,7 @@ class ExploratoryPeerSelector extends TunnelPeerSelector {
|
||||
failPct = getExploratoryFailPercentage(ctx);
|
||||
Log l = ctx.logManager().getLog(getClass());
|
||||
if (l.shouldLog(Log.DEBUG))
|
||||
l.debug("Fail pct: " + failPct);
|
||||
l.debug("Normalized Fail pct: " + failPct);
|
||||
// always try a little, this helps keep the failPct stat accurate too
|
||||
if (failPct > 100 - MIN_NONFAILING_PCT)
|
||||
failPct = 100 - MIN_NONFAILING_PCT;
|
||||
@ -86,14 +86,32 @@ class ExploratoryPeerSelector extends TunnelPeerSelector {
|
||||
}
|
||||
|
||||
// We should really use the difference between the exploratory fail rate
|
||||
// and the client fail rate.
|
||||
// (return 100 * ((Efail - Cfail) / (1 - Cfail)))
|
||||
// and the high capacity fail rate - but we don't have a stat for high cap,
|
||||
// so use the fast (== client) fail rate, it should be close
|
||||
// if the expl. and client tunnel lengths aren't too different.
|
||||
// So calculate the difference between the exploratory fail rate
|
||||
// and the client fail rate, normalized to 100:
|
||||
// 100 * ((Efail - Cfail) / (100 - Cfail))
|
||||
// Even this isn't the "true" rate for the NonFailingPeers pool, since we
|
||||
// are often building exploratory tunnels using the HighCapacity pool.
|
||||
private int getExploratoryFailPercentage(RouterContext ctx) {
|
||||
int timeout = getEvents(ctx, "tunnel.buildExploratoryExpire", 10*60*1000);
|
||||
int reject = getEvents(ctx, "tunnel.buildExploratoryReject", 10*60*1000);
|
||||
int accept = getEvents(ctx, "tunnel.buildExploratorySuccess", 10*60*1000);
|
||||
int c = getFailPercentage(ctx, "Client");
|
||||
int e = getFailPercentage(ctx, "Exploratory");
|
||||
Log l = ctx.logManager().getLog(getClass());
|
||||
if (l.shouldLog(Log.DEBUG))
|
||||
l.debug("Client, Expl. Fail pct: " + c + ", " + e);
|
||||
if (e <= c || e <= 25) // doing very well (unlikely)
|
||||
return 0;
|
||||
if (c >= 90) // doing very badly
|
||||
return 100 - MIN_NONFAILING_PCT;
|
||||
return (100 * (e-c)) / (100-c);
|
||||
}
|
||||
|
||||
private int getFailPercentage(RouterContext ctx, String t) {
|
||||
String pfx = "tunnel.build" + t;
|
||||
int timeout = getEvents(ctx, pfx + "Expire", 10*60*1000);
|
||||
int reject = getEvents(ctx, pfx + "Reject", 10*60*1000);
|
||||
int accept = getEvents(ctx, pfx + "Success", 10*60*1000);
|
||||
if (accept + reject + timeout <= 0)
|
||||
return 0;
|
||||
double pct = (double)(reject + timeout) / (accept + reject + timeout);
|
||||
|
Reference in New Issue
Block a user