* ProfileOrganizer:

- Use more recent stats to calculate integrationory.txt
       - Show that fast peers are also high-capacity on profiles.jsp
    * readme.html: Update Syndie link
    * TunnelPool: Update comments
    * netDb: Report 1-2h uptime as 90m to further frustrate tracking,
      get rid of the 60s tunnel stats
      (effective as of .33 to provide cover)
This commit is contained in:
zzz
2008-03-13 23:13:32 +00:00
parent 4fa4357bf1
commit e7cdb965ba
9 changed files with 66 additions and 62 deletions

View File

@ -17,7 +17,7 @@ import net.i2p.CoreVersion;
public class RouterVersion {
public final static String ID = "$Revision: 1.548 $ $Date: 2008-02-10 15:00:00 $";
public final static String VERSION = "0.6.1.32";
public final static long BUILD = 5;
public final static long BUILD = 6;
public static void main(String args[]) {
System.out.println("I2P Router version: " + VERSION + "-" + BUILD);
System.out.println("Router ID: " + RouterVersion.ID);

View File

@ -128,6 +128,7 @@ public class StatisticsManager implements Service {
if (false)
stats.putAll(_context.profileManager().summarizePeers(_publishedStats));
boolean commentMeOutInDot33 = RouterVersion.VERSION.equals("0.6.1.32");
includeThroughput(stats);
//includeRate("router.invalidMessageTime", stats, new long[] { 10*60*1000 });
//includeRate("router.duplicateMessageId", stats, new long[] { 24*60*60*1000 });
@ -162,7 +163,11 @@ public class StatisticsManager implements Service {
//includeRate("transport.sendProcessingTime", stats, new long[] { 60*60*1000 });
//includeRate("jobQueue.jobRunSlow", stats, new long[] { 10*60*1000l, 60*60*1000l });
if (commentMeOutInDot33) { // get rid of 60s stats
includeRate("crypto.elGamal.encrypt", stats, new long[] { 60*1000, 60*60*1000 });
} else {
includeRate("crypto.elGamal.encrypt", stats, new long[] { 60*60*1000 });
}
includeRate("tunnel.participatingTunnels", stats, new long[] { 5*60*1000, 60*60*1000 });
//includeRate("tunnel.testSuccessTime", stats, new long[] { 10*60*1000l });
includeRate("client.sendAckTime", stats, new long[] { 60*60*1000 }, true);
@ -174,12 +179,15 @@ public class StatisticsManager implements Service {
//includeRate("stream.con.receiveDuplicateSize", stats, new long[] { 60*60*1000 });
// Round smaller uptimes to 1 hour, to frustrate uptime tracking
// Round 2nd hour to 90m since peers use 2h minimum to route
long publishedUptime = _context.router().getUptime();
if (publishedUptime < 60*60*1000) publishedUptime = 60*60*1000;
else if (publishedUptime < 2*60*60*1000 && !commentMeOutInDot33) publishedUptime = 90*60*1000;
stats.setProperty("stat_uptime", DataHelper.formatDuration(publishedUptime));
//stats.setProperty("stat__rateKey", "avg;maxAvg;pctLifetime;[sat;satLim;maxSat;maxSatLim;][num;lifetimeFreq;maxFreq]");
if (commentMeOutInDot33) { // get rid of 60s stats
includeRate("tunnel.buildRequestTime", stats, new long[] { 60*1000, 10*60*1000 });
//includeRate("tunnel.decryptRequestTime", stats, new long[] { 60*1000, 10*60*1000 });
includeRate("tunnel.buildClientExpire", stats, new long[] { 60*1000, 10*60*1000 });
@ -194,6 +202,18 @@ public class StatisticsManager implements Service {
includeRate("tunnel.rejectOverloaded", stats, new long[] { 60*1000, 10*60*1000 });
includeRate("tunnel.acceptLoad", stats, new long[] { 60*1000, 10*60*1000 });
} else {
includeRate("tunnel.buildRequestTime", stats, new long[] { 10*60*1000 });
includeRate("tunnel.buildClientExpire", stats, new long[] { 10*60*1000 });
includeRate("tunnel.buildClientReject", stats, new long[] { 10*60*1000 });
includeRate("tunnel.buildClientSuccess", stats, new long[] { 10*60*1000 });
includeRate("tunnel.buildExploratoryExpire", stats, new long[] { 10*60*1000 });
includeRate("tunnel.buildExploratoryReject", stats, new long[] { 10*60*1000 });
includeRate("tunnel.buildExploratorySuccess", stats, new long[] { 10*60*1000 });
includeRate("tunnel.rejectTimeout", stats, new long[] { 10*60*1000 });
includeRate("tunnel.rejectOverloaded", stats, new long[] { 10*60*1000 });
includeRate("tunnel.acceptLoad", stats, new long[] { 10*60*1000 });
}
if (FloodfillNetworkDatabaseFacade.isFloodfill(_context.router().getRouterInfo())) {
stats.setProperty("netdb.knownRouters", ""+_context.netDb().getKnownRouters());

View File

@ -18,7 +18,11 @@ public class IntegrationCalculator extends Calculator {
}
public double calc(PeerProfile profile) {
// give more weight to recent counts
long val = profile.getDbIntroduction().getRate(24*60*60*1000l).getCurrentEventCount();
val += 2 * 4 * profile.getDbIntroduction().getRate(6*60*60*1000l).getCurrentEventCount();
val += 4 * 24 * profile.getDbIntroduction().getRate(60*60*1000l).getCurrentEventCount();
val /= 7;
val += profile.getIntegrationBonus();
return val;
}

View File

@ -371,7 +371,7 @@ public class PeerProfile {
if (_commError == null)
_commError = new RateStat("commErrorRate", "how long between communication errors with the peer (e.g. disconnection)", group, new long[] { 60*1000l, 10*60*1000l, 60*60*1000l, 24*60*60*1000 } );
if (_dbIntroduction == null)
_dbIntroduction = new RateStat("dbIntroduction", "how many new peers we get from dbSearchReplyMessages or dbStore messages", group, new long[] { 60*60*1000l, 24*60*60*1000l, 7*24*60*60*1000l });
_dbIntroduction = new RateStat("dbIntroduction", "how many new peers we get from dbSearchReplyMessages or dbStore messages", group, new long[] { 60*60*1000l, 6*60*60*1000l, 24*60*60*1000l });
if (_tunnelHistory == null)
_tunnelHistory = new TunnelHistory(_context, group);

View File

@ -104,7 +104,7 @@ class ProfileOrganizerRenderer {
buf.append("<td>");
switch (tier) {
case 1: buf.append("Fast"); break;
case 1: buf.append("Fast, High Capacity"); break;
case 2: buf.append("High Capacity"); break;
case 3: buf.append("Not Failing"); break;
default: buf.append("Failing"); break;

View File

@ -484,15 +484,16 @@ public class TunnelPool {
/**
* This algorithm builds based on the previous average length of time it takes
* to build a tunnel. This average is kept in the _buildRateName stat.
* It is a separate stat for each pool, since in and out building use different methods,
* It is a separate stat for each type of pool, since in and out building use different methods,
* as do exploratory and client pools,
* and each pool can have separate length and length variance settings.
* We add one minute to the stat for safety.
* We add one minute to the stat for safety (two for exploratory tunnels).
*
* We linearly increase the number of builds per expiring tunnel from
* 1 to PANIC_FACTOR as the time-to-expire gets shorter.
*
* The stat will be 0 for first 10m of uptime so we will use the conservative algorithm
* further below instead. It will take about 30m of uptime to settle down.
* The stat will be 0 for first 10m of uptime so we will use the older, conservative algorithm
* below instead. This algorithm will take about 30m of uptime to settle down.
* Or, if we are building more than 33% of the time something is seriously wrong,
* we also use the conservative algorithm instead
*
@ -515,7 +516,7 @@ public class TunnelPool {
avg = (int) ( TUNNEL_LIFETIME * r.getAverageValue() / wanted);
}
if (avg > 0 && avg < TUNNEL_LIFETIME / 3) {
if (avg > 0 && avg < TUNNEL_LIFETIME / 3) { // if we're taking less than 200s per tunnel to build
final int PANIC_FACTOR = 4; // how many builds to kick off when time gets short
avg += 60*1000; // one minute safety factor
if (_settings.isExploratory())
@ -644,6 +645,7 @@ public class TunnelPool {
}
/**
* Helper function for the old conservative algorithm.
* This is the big scary function determining how many new tunnels we want to try to build at this
* point in time, as used by the BuildExecutor
*