2004-10-03 jrandom

* Add a new stat logging component to optionally dump the raw stats to
      disk as they are generated, rather than rely upon the summarized data.
      By default, this is off, but the router property "stat.logFilters" can
      be set to a comma delimited list of stats (e.g. "client.sendAckTime")
      which will be written to the file "stats.log" (or whatever the property
      "stat.logFile" is set to).  This can also log profile related stats,
      such as "dbResponseTime" or "tunnelTestResponseTime".
This commit is contained in:
jrandom
2004-10-03 20:48:43 +00:00
committed by zzz
parent d9f0a0fd74
commit 98d2d661a8
16 changed files with 252 additions and 56 deletions

View File

@ -0,0 +1,146 @@
package net.i2p.stat;
import java.io.BufferedWriter;
import java.io.FileWriter;
import java.io.IOException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.StringTokenizer;
import net.i2p.I2PAppContext;
import net.i2p.util.I2PThread;
/**
*
*/
public class BufferedStatLog implements StatLog {
private I2PAppContext _context;
private List _events;
/** flush stat events to disk after this many events (or 30s)*/
private int _flushFrequency;
private List _statFilters;
private BufferedWriter _out;
private String _outFile;
public BufferedStatLog(I2PAppContext ctx) {
_context = ctx;
_events = new ArrayList(1000);
_statFilters = new ArrayList(10);
_flushFrequency = 1000;
I2PThread writer = new I2PThread(new StatLogWriter(), "StatLogWriter");
writer.setDaemon(true);
writer.start();
}
public void addData(String scope, String stat, long value, long duration) {
synchronized (_events) {
_events.add(new StatEvent(scope, stat, value, duration));
if (_events.size() > _flushFrequency)
_events.notifyAll();
}
}
private boolean shouldLog(String stat) {
synchronized (_statFilters) {
return _statFilters.contains(stat);
}
}
private void updateFilters() {
String val = _context.getProperty("stat.logFilters");
if (val != null) {
StringTokenizer tok = new StringTokenizer(val, ",");
synchronized (_statFilters) {
_statFilters.clear();
while (tok.hasMoreTokens())
_statFilters.add(tok.nextToken().trim());
}
} else {
synchronized (_statFilters) { _statFilters.clear(); }
}
String filename = _context.getProperty("stat.logFile");
if (filename == null)
filename = "stats.log";
if ( (_outFile != null) && (_outFile.equals(filename)) ) {
// noop
} else {
if (_out != null) try { _out.close(); } catch (IOException ioe) {}
_outFile = filename;
try {
_out = new BufferedWriter(new FileWriter(_outFile));
} catch (IOException ioe) { ioe.printStackTrace(); }
}
}
private class StatLogWriter implements Runnable {
private SimpleDateFormat _fmt = new SimpleDateFormat("yyyyMMdd hh:mm:ss.SSS");
public void run() {
List cur = new ArrayList(1000);
while (true) {
synchronized (_events) {
if (_events.size() < _flushFrequency) {
try { _events.wait(30*1000); } catch (InterruptedException ie) {}
}
cur.addAll(_events);
_events.clear();
}
if (cur.size() > 0) {
writeEvents(cur);
cur.clear();
}
}
}
private void writeEvents(List events) {
try {
updateFilters();
for (int i = 0; i < events.size(); i++) {
StatEvent evt = (StatEvent)events.get(i);
if (!shouldLog(evt.getStat())) continue;
String when = null;
synchronized (_fmt) {
when = _fmt.format(new Date(evt.getTime()));
}
_out.write(when);
_out.write(" ");
if (evt.getScope() == null)
_out.write("noScope ");
else
_out.write(evt.getScope() + " ");
_out.write(evt.getStat()+" ");
_out.write(evt.getValue()+" ");
_out.write(evt.getDuration()+"\n");
}
_out.flush();
} catch (IOException ioe) {
ioe.printStackTrace();
}
}
}
private class StatEvent {
private long _time;
private String _scope;
private String _stat;
private long _value;
private long _duration;
public StatEvent(String scope, String stat, long value, long duration) {
_scope = scope;
_stat = stat;
_value = value;
_duration = duration;
_time = _context.clock().now();
}
public long getTime() { return _time; }
public String getScope() { return _scope; }
public String getStat() { return _stat; }
public long getValue() { return _value; }
public long getDuration() { return _duration; }
}
}

View File

@ -19,6 +19,8 @@ public class RateStat {
private String _description;
/** actual rate objects for this statistic */
private Rate _rates[];
/** component we tell about events as they occur */
private StatLog _statLog;
public RateStat(String name, String description, String group, long periods[]) {
_statName = name;
@ -28,11 +30,13 @@ public class RateStat {
for (int i = 0; i < periods.length; i++)
_rates[i] = new Rate(periods[i]);
}
public void setStatLog(StatLog sl) { _statLog = sl; }
/**
* update all of the rates for the various periods with the given value.
*/
public void addData(long value, long eventDuration) {
if (_statLog != null) _statLog.addData(_groupName, _statName, value, eventDuration);
for (int i = 0; i < _rates.length; i++)
_rates[i].addData(value, eventDuration);
}

View File

@ -0,0 +1,8 @@
package net.i2p.stat;
/**
* Component to be notified when a particular event occurs
*/
public interface StatLog {
public void addData(String scope, String stat, long value, long duration);
}

View File

@ -27,6 +27,7 @@ public class StatManager {
private Map _frequencyStats;
/** stat name to RateStat */
private Map _rateStats;
private StatLog _statLog;
/**
* The stat manager should only be constructed and accessed through the
@ -39,6 +40,18 @@ public class StatManager {
_context = context;
_frequencyStats = Collections.synchronizedMap(new HashMap(128));
_rateStats = Collections.synchronizedMap(new HashMap(128));
_statLog = new BufferedStatLog(context);
}
public StatLog getStatLog() { return _statLog; }
public void setStatLog(StatLog log) {
_statLog = log;
synchronized (_rateStats) {
for (Iterator iter = _rateStats.values().iterator(); iter.hasNext(); ) {
RateStat rs = (RateStat)iter.next();
rs.setStatLog(log);
}
}
}
/**
@ -64,7 +77,9 @@ public class StatManager {
*/
public void createRateStat(String name, String description, String group, long periods[]) {
if (_rateStats.containsKey(name)) return;
_rateStats.put(name, new RateStat(name, description, group, periods));
RateStat rs = new RateStat(name, description, group, periods);
if (_statLog != null) rs.setStatLog(_statLog);
_rateStats.put(name, rs);
}
/** update the given frequency statistic, taking note that an event occurred (and recalculating all frequencies) */

View File

@ -1,4 +1,13 @@
$Id: history.txt,v 1.29 2004/10/02 07:31:16 jrandom Exp $
$Id: history.txt,v 1.30 2004/10/02 14:05:24 jrandom Exp $
2004-10-03 jrandom
* Add a new stat logging component to optionally dump the raw stats to
disk as they are generated, rather than rely upon the summarized data.
By default, this is off, but the router property "stat.logFilters" can
be set to a comma delimited list of stats (e.g. "client.sendAckTime")
which will be written to the file "stats.log" (or whatever the property
"stat.logFile" is set to). This can also log profile related stats,
such as "dbResponseTime" or "tunnelTestResponseTime".
2004-10-02 jrandom
* Assure that we quickly fail messages bound for shitlisted peers.

View File

@ -15,9 +15,9 @@ import net.i2p.CoreVersion;
*
*/
public class RouterVersion {
public final static String ID = "$Revision: 1.39 $ $Date: 2004/10/02 07:31:15 $";
public final static String ID = "$Revision: 1.40 $ $Date: 2004/10/02 14:05:24 $";
public final static String VERSION = "0.4.1.1";
public final static long BUILD = 5;
public final static long BUILD = 6;
public static void main(String args[]) {
System.out.println("I2P Router version: " + VERSION);
System.out.println("Router ID: " + RouterVersion.ID);

View File

@ -52,7 +52,7 @@ public class ClientManager {
_log = context.logManager().getLog(ClientManager.class);
_context.statManager().createRateStat("client.receiveMessageSize",
"How large are messages received by the client?",
"Client Messages",
"ClientMessages",
new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
_runners = new HashMap();
_pendingRunners = new HashSet();

View File

@ -100,14 +100,14 @@ public class OutboundClientMessageJob extends JobImpl {
super(ctx);
_log = ctx.logManager().getLog(OutboundClientMessageJob.class);
ctx.statManager().createFrequencyStat("client.sendMessageFailFrequency", "How often does a client fail to send a message?", "Client Messages", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.sendMessageSize", "How large are messages sent by the client?", "Client Messages", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.sendAttemptAverage", "How many different tunnels do we have to try when sending a client message?", "Client Messages", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.sendAckTime", "How long does it take to get an ACK back from a message?", "Client Messages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.sendsPerFailure", "How many send attempts do we make when they all fail?", "Client Messages", new long[] { 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.timeoutCongestionTunnel", "How lagged our tunnels are when a send times out?", "Client Messages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.timeoutCongestionMessage", "How fast we process messages locally when a send times out?", "Client Messages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.timeoutCongestionInbound", "How much faster we are receiving data than our average bps when a send times out?", "Client Messages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createFrequencyStat("client.sendMessageFailFrequency", "How often does a client fail to send a message?", "ClientMessages", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.sendMessageSize", "How large are messages sent by the client?", "ClientMessages", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.sendAttemptAverage", "How many different tunnels do we have to try when sending a client message?", "ClientMessages", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.sendAckTime", "How long does it take to get an ACK back from a message?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.sendsPerFailure", "How many send attempts do we make when they all fail?", "ClientMessages", new long[] { 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.timeoutCongestionTunnel", "How lagged our tunnels are when a send times out?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.timeoutCongestionMessage", "How fast we process messages locally when a send times out?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.timeoutCongestionInbound", "How much faster we are receiving data than our average bps when a send times out?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
long timeoutMs = OVERALL_TIMEOUT_MS_DEFAULT;

View File

@ -27,8 +27,8 @@ public class DatabaseLookupMessageHandler implements HandlerJobBuilder {
public DatabaseLookupMessageHandler(RouterContext context) {
_context = context;
_log = context.logManager().getLog(DatabaseLookupMessageHandler.class);
_context.statManager().createRateStat("netDb.lookupsReceived", "How many netDb lookups have we received?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("netDb.lookupsDropped", "How many netDb lookups did we drop due to throttling?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("netDb.lookupsReceived", "How many netDb lookups have we received?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("netDb.lookupsDropped", "How many netDb lookups did we drop due to throttling?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
}
public Job createJob(I2NPMessage receivedMessage, RouterIdentity from, Hash fromHash) {

View File

@ -51,8 +51,8 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
public HandleDatabaseLookupMessageJob(RouterContext ctx, DatabaseLookupMessage receivedMessage, RouterIdentity from, Hash fromHash) {
super(ctx);
_log = getContext().logManager().getLog(HandleDatabaseLookupMessageJob.class);
getContext().statManager().createRateStat("netDb.lookupsHandled", "How many netDb lookups have we handled?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.lookupsMatched", "How many netDb lookups did we have the data for?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.lookupsHandled", "How many netDb lookups have we handled?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.lookupsMatched", "How many netDb lookups did we have the data for?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
_message = receivedMessage;
_from = from;
_fromHash = fromHash;

View File

@ -38,7 +38,7 @@ public class HandleDatabaseStoreMessageJob extends JobImpl {
public HandleDatabaseStoreMessageJob(RouterContext ctx, DatabaseStoreMessage receivedMessage, RouterIdentity from, Hash fromHash) {
super(ctx);
_log = ctx.logManager().getLog(HandleDatabaseStoreMessageJob.class);
ctx.statManager().createRateStat("netDb.storeHandled", "How many netDb store messages have we handled?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("netDb.storeHandled", "How many netDb store messages have we handled?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
_message = receivedMessage;
_from = from;
_fromHash = fromHash;

View File

@ -82,15 +82,15 @@ class SearchJob extends JobImpl {
_isLease = isLease;
_peerSelector = new PeerSelector(getContext());
_expiration = getContext().clock().now() + timeoutMs;
getContext().statManager().createRateStat("netDb.successTime", "How long a successful search takes", "Network Database", new long[] { 60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.failedTime", "How long a failed search takes", "Network Database", new long[] { 60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.successPeers", "How many peers are contacted in a successful search", "Network Database", new long[] { 60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.failedPeers", "How many peers fail to respond to a lookup?", "Network Database", new long[] { 60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.searchCount", "Overall number of searches sent", "Network Database", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.searchMessageCount", "Overall number of mesages for all searches sent", "Network Database", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.searchReplyValidated", "How many search replies we get that we are able to validate (fetch)", "Network Database", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.searchReplyNotValidated", "How many search replies we get that we are NOT able to validate (fetch)", "Network Database", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.searchReplyValidationSkipped", "How many search replies we get from unreliable peers that we skip?", "Network Database", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.successTime", "How long a successful search takes", "NetworkDatabase", new long[] { 60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.failedTime", "How long a failed search takes", "NetworkDatabase", new long[] { 60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.successPeers", "How many peers are contacted in a successful search", "NetworkDatabase", new long[] { 60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.failedPeers", "How many peers fail to respond to a lookup?", "NetworkDatabase", new long[] { 60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.searchCount", "Overall number of searches sent", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.searchMessageCount", "Overall number of mesages for all searches sent", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.searchReplyValidated", "How many search replies we get that we are able to validate (fetch)", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.searchReplyNotValidated", "How many search replies we get that we are NOT able to validate (fetch)", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.searchReplyValidationSkipped", "How many search replies we get from unreliable peers that we skip?", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
if (_log.shouldLog(Log.DEBUG))
_log.debug("Search (" + getClass().getName() + " for " + key.toBase64(), new Exception("Search enqueued by"));
}

View File

@ -73,9 +73,9 @@ class StoreJob extends JobImpl {
DataStructure data, Job onSuccess, Job onFailure, long timeoutMs, Set toSkip) {
super(context);
_log = context.logManager().getLog(StoreJob.class);
getContext().statManager().createRateStat("netDb.storeSent", "How many netDb store messages have we sent?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.storePeers", "How many peers each netDb must be sent to before success?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.ackTime", "How long does it take for a peer to ack a netDb store?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.storeSent", "How many netDb store messages have we sent?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.storePeers", "How many peers each netDb must be sent to before success?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.ackTime", "How long does it take for a peer to ack a netDb store?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
_facade = facade;
_state = new StoreState(getContext(), key, data, toSkip);
_onSuccess = onSuccess;

View File

@ -28,10 +28,12 @@ public class DBHistory {
private long _lastLookupReceived;
private long _unpromptedDbStoreNew;
private long _unpromptedDbStoreOld;
private String _statGroup;
public DBHistory(RouterContext context) {
public DBHistory(RouterContext context, String statGroup) {
_context = context;
_log = context.logManager().getLog(DBHistory.class);
_statGroup = statGroup;
_successfulLookups = 0;
_failedLookups = 0;
_failedLookupRate = null;
@ -45,7 +47,7 @@ public class DBHistory {
_lastLookupReceived = -1;
_unpromptedDbStoreNew = 0;
_unpromptedDbStoreOld = 0;
createRates();
createRates(statGroup);
}
/** how many times we have sent them a db lookup and received the value back from them */
@ -212,15 +214,17 @@ public class DBHistory {
_invalidReplyRate.load(props, "dbHistory.invalidReplyRate", true);
} catch (IllegalArgumentException iae) {
_log.warn("DB History invalid reply rate is corrupt, resetting", iae);
createRates();
createRates(_statGroup);
}
}
private void createRates() {
private void createRates(String statGroup) {
if (_failedLookupRate == null)
_failedLookupRate = new RateStat("dbHistory.failedLookupRate", "How often does this peer to respond to a lookup?", "dbHistory", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
_failedLookupRate = new RateStat("dbHistory.failedLookupRate", "How often does this peer to respond to a lookup?", statGroup, new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
if (_invalidReplyRate == null)
_invalidReplyRate = new RateStat("dbHistory.invalidReplyRate", "How often does this peer give us a bad (nonexistant, forged, etc) peer?", "dbHistory", new long[] { 30*60*1000l, 60*60*1000l, 24*60*60*1000l });
_invalidReplyRate = new RateStat("dbHistory.invalidReplyRate", "How often does this peer give us a bad (nonexistant, forged, etc) peer?", statGroup, new long[] { 30*60*1000l, 60*60*1000l, 24*60*60*1000l });
_failedLookupRate.setStatLog(_context.statManager().getStatLog());
_invalidReplyRate.setStatLog(_context.statManager().getStatLog());
}
private final static long getLong(Properties props, String key) {

View File

@ -45,9 +45,6 @@ public class PeerProfile {
// does this peer profile contain expanded data, or just the basics?
private boolean _expanded;
public PeerProfile(RouterContext context) {
this(context, null, true);
}
public PeerProfile(RouterContext context, Hash peer) {
this(context, peer, true);
}
@ -236,28 +233,37 @@ public class PeerProfile {
*
*/
public void expandProfile() {
String group = (null == _peer ? "profileUnknown" : _peer.toBase64().substring(0,6));
if (_sendSuccessSize == null)
_sendSuccessSize = new RateStat("sendSuccessSize", "How large successfully sent messages are", "profile", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
_sendSuccessSize = new RateStat("sendSuccessSize", "How large successfully sent messages are", group, new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
if (_sendFailureSize == null)
_sendFailureSize = new RateStat("sendFailureSize", "How large messages that could not be sent were", "profile", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000 } );
_sendFailureSize = new RateStat("sendFailureSize", "How large messages that could not be sent were", group, new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000 } );
if (_receiveSize == null)
_receiveSize = new RateStat("receiveSize", "How large received messages are", "profile", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000 } );
_receiveSize = new RateStat("receiveSize", "How large received messages are", group, new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000 } );
if (_dbResponseTime == null)
_dbResponseTime = new RateStat("dbResponseTime", "how long it takes to get a db response from the peer (in milliseconds)", "profile", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000 } );
_dbResponseTime = new RateStat("dbResponseTime", "how long it takes to get a db response from the peer (in milliseconds)", group, new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000 } );
if (_tunnelCreateResponseTime == null)
_tunnelCreateResponseTime = new RateStat("tunnelCreateResponseTime", "how long it takes to get a tunnel create response from the peer (in milliseconds)", "profile", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000 } );
_tunnelCreateResponseTime = new RateStat("tunnelCreateResponseTime", "how long it takes to get a tunnel create response from the peer (in milliseconds)", group, new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000 } );
if (_tunnelTestResponseTime == null)
_tunnelTestResponseTime = new RateStat("tunnelTestResponseTime", "how long it takes to successfully test a tunnel this peer participates in (in milliseconds)", "profile", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000 } );
_tunnelTestResponseTime = new RateStat("tunnelTestResponseTime", "how long it takes to successfully test a tunnel this peer participates in (in milliseconds)", group, new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000 } );
if (_commError == null)
_commError = new RateStat("commErrorRate", "how long between communication errors with the peer (e.g. disconnection)", "profile", new long[] { 60*1000l, 10*60*1000l, 60*60*1000l, 24*60*60*1000 } );
_commError = new RateStat("commErrorRate", "how long between communication errors with the peer (e.g. disconnection)", group, new long[] { 60*1000l, 10*60*1000l, 60*60*1000l, 24*60*60*1000 } );
if (_dbIntroduction == null)
_dbIntroduction = new RateStat("dbIntroduction", "how many new peers we get from dbSearchReplyMessages or dbStore messages", "profile", new long[] { 60*60*1000l, 24*60*60*1000l, 7*24*60*60*1000l });
_dbIntroduction = new RateStat("dbIntroduction", "how many new peers we get from dbSearchReplyMessages or dbStore messages", group, new long[] { 60*60*1000l, 24*60*60*1000l, 7*24*60*60*1000l });
if (_tunnelHistory == null)
_tunnelHistory = new TunnelHistory(_context);
_tunnelHistory = new TunnelHistory(_context, group);
if (_dbHistory == null)
_dbHistory = new DBHistory(_context);
_dbHistory = new DBHistory(_context, group);
_sendSuccessSize.setStatLog(_context.statManager().getStatLog());
_sendFailureSize.setStatLog(_context.statManager().getStatLog());
_receiveSize.setStatLog(_context.statManager().getStatLog());
_dbResponseTime.setStatLog(_context.statManager().getStatLog());
_tunnelCreateResponseTime.setStatLog(_context.statManager().getStatLog());
_tunnelTestResponseTime.setStatLog(_context.statManager().getStatLog());
_commError.setStatLog(_context.statManager().getStatLog());
_dbIntroduction.setStatLog(_context.statManager().getStatLog());
_expanded = true;
}

View File

@ -23,22 +23,26 @@ public class TunnelHistory {
private volatile long _lastFailed;
private RateStat _rejectRate;
private RateStat _failRate;
private String _statGroup;
public TunnelHistory(RouterContext context) {
public TunnelHistory(RouterContext context, String statGroup) {
_context = context;
_log = context.logManager().getLog(TunnelHistory.class);
_statGroup = statGroup;
_lifetimeAgreedTo = 0;
_lifetimeFailed = 0;
_lifetimeRejected = 0;
_lastAgreedTo = 0;
_lastFailed = 0;
_lastRejected = 0;
createRates();
createRates(statGroup);
}
private void createRates() {
_rejectRate = new RateStat("tunnelHistory.rejectRate", "How often does this peer reject a tunnel request?", "tunnelHistory", new long[] { 60*1000l, 10*60*1000l, 30*60*1000l, 60*60*1000l, 24*60*60*1000l });
_failRate = new RateStat("tunnelHistory.failRate", "How often do tunnels this peer accepts fail?", "tunnelHistory", new long[] { 60*1000l, 10*60*1000l, 30*60*1000l, 60*60*1000l, 24*60*60*1000l });
private void createRates(String statGroup) {
_rejectRate = new RateStat("tunnelHistory.rejectRate", "How often does this peer reject a tunnel request?", statGroup, new long[] { 60*1000l, 10*60*1000l, 30*60*1000l, 60*60*1000l, 24*60*60*1000l });
_failRate = new RateStat("tunnelHistory.failRate", "How often do tunnels this peer accepts fail?", statGroup, new long[] { 60*1000l, 10*60*1000l, 30*60*1000l, 60*60*1000l, 24*60*60*1000l });
_rejectRate.setStatLog(_context.statManager().getStatLog());
_failRate.setStatLog(_context.statManager().getStatLog());
}
/** total tunnels the peer has agreed to participate in */
@ -126,7 +130,7 @@ public class TunnelHistory {
_log.debug("Loading tunnelHistory.failRate");
} catch (IllegalArgumentException iae) {
_log.warn("TunnelHistory rates are corrupt, resetting", iae);
createRates();
createRates(_statGroup);
}
}