2004-10-03 jrandom
* Add a new stat logging component to optionally dump the raw stats to disk as they are generated, rather than rely upon the summarized data. By default, this is off, but the router property "stat.logFilters" can be set to a comma delimited list of stats (e.g. "client.sendAckTime") which will be written to the file "stats.log" (or whatever the property "stat.logFile" is set to). This can also log profile related stats, such as "dbResponseTime" or "tunnelTestResponseTime".
This commit is contained in:
@ -15,9 +15,9 @@ import net.i2p.CoreVersion;
|
||||
*
|
||||
*/
|
||||
public class RouterVersion {
|
||||
public final static String ID = "$Revision: 1.39 $ $Date: 2004/10/02 07:31:15 $";
|
||||
public final static String ID = "$Revision: 1.40 $ $Date: 2004/10/02 14:05:24 $";
|
||||
public final static String VERSION = "0.4.1.1";
|
||||
public final static long BUILD = 5;
|
||||
public final static long BUILD = 6;
|
||||
public static void main(String args[]) {
|
||||
System.out.println("I2P Router version: " + VERSION);
|
||||
System.out.println("Router ID: " + RouterVersion.ID);
|
||||
|
@ -52,7 +52,7 @@ public class ClientManager {
|
||||
_log = context.logManager().getLog(ClientManager.class);
|
||||
_context.statManager().createRateStat("client.receiveMessageSize",
|
||||
"How large are messages received by the client?",
|
||||
"Client Messages",
|
||||
"ClientMessages",
|
||||
new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_runners = new HashMap();
|
||||
_pendingRunners = new HashSet();
|
||||
|
@ -100,14 +100,14 @@ public class OutboundClientMessageJob extends JobImpl {
|
||||
super(ctx);
|
||||
_log = ctx.logManager().getLog(OutboundClientMessageJob.class);
|
||||
|
||||
ctx.statManager().createFrequencyStat("client.sendMessageFailFrequency", "How often does a client fail to send a message?", "Client Messages", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.sendMessageSize", "How large are messages sent by the client?", "Client Messages", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.sendAttemptAverage", "How many different tunnels do we have to try when sending a client message?", "Client Messages", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.sendAckTime", "How long does it take to get an ACK back from a message?", "Client Messages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.sendsPerFailure", "How many send attempts do we make when they all fail?", "Client Messages", new long[] { 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.timeoutCongestionTunnel", "How lagged our tunnels are when a send times out?", "Client Messages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.timeoutCongestionMessage", "How fast we process messages locally when a send times out?", "Client Messages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.timeoutCongestionInbound", "How much faster we are receiving data than our average bps when a send times out?", "Client Messages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createFrequencyStat("client.sendMessageFailFrequency", "How often does a client fail to send a message?", "ClientMessages", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.sendMessageSize", "How large are messages sent by the client?", "ClientMessages", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.sendAttemptAverage", "How many different tunnels do we have to try when sending a client message?", "ClientMessages", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.sendAckTime", "How long does it take to get an ACK back from a message?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.sendsPerFailure", "How many send attempts do we make when they all fail?", "ClientMessages", new long[] { 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.timeoutCongestionTunnel", "How lagged our tunnels are when a send times out?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.timeoutCongestionMessage", "How fast we process messages locally when a send times out?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.timeoutCongestionInbound", "How much faster we are receiving data than our average bps when a send times out?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
|
||||
long timeoutMs = OVERALL_TIMEOUT_MS_DEFAULT;
|
||||
|
||||
|
@ -27,8 +27,8 @@ public class DatabaseLookupMessageHandler implements HandlerJobBuilder {
|
||||
public DatabaseLookupMessageHandler(RouterContext context) {
|
||||
_context = context;
|
||||
_log = context.logManager().getLog(DatabaseLookupMessageHandler.class);
|
||||
_context.statManager().createRateStat("netDb.lookupsReceived", "How many netDb lookups have we received?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("netDb.lookupsDropped", "How many netDb lookups did we drop due to throttling?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("netDb.lookupsReceived", "How many netDb lookups have we received?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("netDb.lookupsDropped", "How many netDb lookups did we drop due to throttling?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
}
|
||||
|
||||
public Job createJob(I2NPMessage receivedMessage, RouterIdentity from, Hash fromHash) {
|
||||
|
@ -51,8 +51,8 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
|
||||
public HandleDatabaseLookupMessageJob(RouterContext ctx, DatabaseLookupMessage receivedMessage, RouterIdentity from, Hash fromHash) {
|
||||
super(ctx);
|
||||
_log = getContext().logManager().getLog(HandleDatabaseLookupMessageJob.class);
|
||||
getContext().statManager().createRateStat("netDb.lookupsHandled", "How many netDb lookups have we handled?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
getContext().statManager().createRateStat("netDb.lookupsMatched", "How many netDb lookups did we have the data for?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
getContext().statManager().createRateStat("netDb.lookupsHandled", "How many netDb lookups have we handled?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
getContext().statManager().createRateStat("netDb.lookupsMatched", "How many netDb lookups did we have the data for?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_message = receivedMessage;
|
||||
_from = from;
|
||||
_fromHash = fromHash;
|
||||
|
@ -38,7 +38,7 @@ public class HandleDatabaseStoreMessageJob extends JobImpl {
|
||||
public HandleDatabaseStoreMessageJob(RouterContext ctx, DatabaseStoreMessage receivedMessage, RouterIdentity from, Hash fromHash) {
|
||||
super(ctx);
|
||||
_log = ctx.logManager().getLog(HandleDatabaseStoreMessageJob.class);
|
||||
ctx.statManager().createRateStat("netDb.storeHandled", "How many netDb store messages have we handled?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("netDb.storeHandled", "How many netDb store messages have we handled?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_message = receivedMessage;
|
||||
_from = from;
|
||||
_fromHash = fromHash;
|
||||
|
@ -82,15 +82,15 @@ class SearchJob extends JobImpl {
|
||||
_isLease = isLease;
|
||||
_peerSelector = new PeerSelector(getContext());
|
||||
_expiration = getContext().clock().now() + timeoutMs;
|
||||
getContext().statManager().createRateStat("netDb.successTime", "How long a successful search takes", "Network Database", new long[] { 60*60*1000l, 24*60*60*1000l });
|
||||
getContext().statManager().createRateStat("netDb.failedTime", "How long a failed search takes", "Network Database", new long[] { 60*60*1000l, 24*60*60*1000l });
|
||||
getContext().statManager().createRateStat("netDb.successPeers", "How many peers are contacted in a successful search", "Network Database", new long[] { 60*60*1000l, 24*60*60*1000l });
|
||||
getContext().statManager().createRateStat("netDb.failedPeers", "How many peers fail to respond to a lookup?", "Network Database", new long[] { 60*60*1000l, 24*60*60*1000l });
|
||||
getContext().statManager().createRateStat("netDb.searchCount", "Overall number of searches sent", "Network Database", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
getContext().statManager().createRateStat("netDb.searchMessageCount", "Overall number of mesages for all searches sent", "Network Database", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
getContext().statManager().createRateStat("netDb.searchReplyValidated", "How many search replies we get that we are able to validate (fetch)", "Network Database", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
getContext().statManager().createRateStat("netDb.searchReplyNotValidated", "How many search replies we get that we are NOT able to validate (fetch)", "Network Database", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
getContext().statManager().createRateStat("netDb.searchReplyValidationSkipped", "How many search replies we get from unreliable peers that we skip?", "Network Database", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
getContext().statManager().createRateStat("netDb.successTime", "How long a successful search takes", "NetworkDatabase", new long[] { 60*60*1000l, 24*60*60*1000l });
|
||||
getContext().statManager().createRateStat("netDb.failedTime", "How long a failed search takes", "NetworkDatabase", new long[] { 60*60*1000l, 24*60*60*1000l });
|
||||
getContext().statManager().createRateStat("netDb.successPeers", "How many peers are contacted in a successful search", "NetworkDatabase", new long[] { 60*60*1000l, 24*60*60*1000l });
|
||||
getContext().statManager().createRateStat("netDb.failedPeers", "How many peers fail to respond to a lookup?", "NetworkDatabase", new long[] { 60*60*1000l, 24*60*60*1000l });
|
||||
getContext().statManager().createRateStat("netDb.searchCount", "Overall number of searches sent", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
getContext().statManager().createRateStat("netDb.searchMessageCount", "Overall number of mesages for all searches sent", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
getContext().statManager().createRateStat("netDb.searchReplyValidated", "How many search replies we get that we are able to validate (fetch)", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
getContext().statManager().createRateStat("netDb.searchReplyNotValidated", "How many search replies we get that we are NOT able to validate (fetch)", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
getContext().statManager().createRateStat("netDb.searchReplyValidationSkipped", "How many search replies we get from unreliable peers that we skip?", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Search (" + getClass().getName() + " for " + key.toBase64(), new Exception("Search enqueued by"));
|
||||
}
|
||||
|
@ -73,9 +73,9 @@ class StoreJob extends JobImpl {
|
||||
DataStructure data, Job onSuccess, Job onFailure, long timeoutMs, Set toSkip) {
|
||||
super(context);
|
||||
_log = context.logManager().getLog(StoreJob.class);
|
||||
getContext().statManager().createRateStat("netDb.storeSent", "How many netDb store messages have we sent?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
getContext().statManager().createRateStat("netDb.storePeers", "How many peers each netDb must be sent to before success?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
getContext().statManager().createRateStat("netDb.ackTime", "How long does it take for a peer to ack a netDb store?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
getContext().statManager().createRateStat("netDb.storeSent", "How many netDb store messages have we sent?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
getContext().statManager().createRateStat("netDb.storePeers", "How many peers each netDb must be sent to before success?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
getContext().statManager().createRateStat("netDb.ackTime", "How long does it take for a peer to ack a netDb store?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_facade = facade;
|
||||
_state = new StoreState(getContext(), key, data, toSkip);
|
||||
_onSuccess = onSuccess;
|
||||
|
@ -28,10 +28,12 @@ public class DBHistory {
|
||||
private long _lastLookupReceived;
|
||||
private long _unpromptedDbStoreNew;
|
||||
private long _unpromptedDbStoreOld;
|
||||
private String _statGroup;
|
||||
|
||||
public DBHistory(RouterContext context) {
|
||||
public DBHistory(RouterContext context, String statGroup) {
|
||||
_context = context;
|
||||
_log = context.logManager().getLog(DBHistory.class);
|
||||
_statGroup = statGroup;
|
||||
_successfulLookups = 0;
|
||||
_failedLookups = 0;
|
||||
_failedLookupRate = null;
|
||||
@ -45,7 +47,7 @@ public class DBHistory {
|
||||
_lastLookupReceived = -1;
|
||||
_unpromptedDbStoreNew = 0;
|
||||
_unpromptedDbStoreOld = 0;
|
||||
createRates();
|
||||
createRates(statGroup);
|
||||
}
|
||||
|
||||
/** how many times we have sent them a db lookup and received the value back from them */
|
||||
@ -212,15 +214,17 @@ public class DBHistory {
|
||||
_invalidReplyRate.load(props, "dbHistory.invalidReplyRate", true);
|
||||
} catch (IllegalArgumentException iae) {
|
||||
_log.warn("DB History invalid reply rate is corrupt, resetting", iae);
|
||||
createRates();
|
||||
createRates(_statGroup);
|
||||
}
|
||||
}
|
||||
|
||||
private void createRates() {
|
||||
private void createRates(String statGroup) {
|
||||
if (_failedLookupRate == null)
|
||||
_failedLookupRate = new RateStat("dbHistory.failedLookupRate", "How often does this peer to respond to a lookup?", "dbHistory", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_failedLookupRate = new RateStat("dbHistory.failedLookupRate", "How often does this peer to respond to a lookup?", statGroup, new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
if (_invalidReplyRate == null)
|
||||
_invalidReplyRate = new RateStat("dbHistory.invalidReplyRate", "How often does this peer give us a bad (nonexistant, forged, etc) peer?", "dbHistory", new long[] { 30*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_invalidReplyRate = new RateStat("dbHistory.invalidReplyRate", "How often does this peer give us a bad (nonexistant, forged, etc) peer?", statGroup, new long[] { 30*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_failedLookupRate.setStatLog(_context.statManager().getStatLog());
|
||||
_invalidReplyRate.setStatLog(_context.statManager().getStatLog());
|
||||
}
|
||||
|
||||
private final static long getLong(Properties props, String key) {
|
||||
|
@ -45,9 +45,6 @@ public class PeerProfile {
|
||||
// does this peer profile contain expanded data, or just the basics?
|
||||
private boolean _expanded;
|
||||
|
||||
public PeerProfile(RouterContext context) {
|
||||
this(context, null, true);
|
||||
}
|
||||
public PeerProfile(RouterContext context, Hash peer) {
|
||||
this(context, peer, true);
|
||||
}
|
||||
@ -236,28 +233,37 @@ public class PeerProfile {
|
||||
*
|
||||
*/
|
||||
public void expandProfile() {
|
||||
String group = (null == _peer ? "profileUnknown" : _peer.toBase64().substring(0,6));
|
||||
if (_sendSuccessSize == null)
|
||||
_sendSuccessSize = new RateStat("sendSuccessSize", "How large successfully sent messages are", "profile", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_sendSuccessSize = new RateStat("sendSuccessSize", "How large successfully sent messages are", group, new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
if (_sendFailureSize == null)
|
||||
_sendFailureSize = new RateStat("sendFailureSize", "How large messages that could not be sent were", "profile", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000 } );
|
||||
_sendFailureSize = new RateStat("sendFailureSize", "How large messages that could not be sent were", group, new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000 } );
|
||||
if (_receiveSize == null)
|
||||
_receiveSize = new RateStat("receiveSize", "How large received messages are", "profile", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000 } );
|
||||
_receiveSize = new RateStat("receiveSize", "How large received messages are", group, new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000 } );
|
||||
if (_dbResponseTime == null)
|
||||
_dbResponseTime = new RateStat("dbResponseTime", "how long it takes to get a db response from the peer (in milliseconds)", "profile", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000 } );
|
||||
_dbResponseTime = new RateStat("dbResponseTime", "how long it takes to get a db response from the peer (in milliseconds)", group, new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000 } );
|
||||
if (_tunnelCreateResponseTime == null)
|
||||
_tunnelCreateResponseTime = new RateStat("tunnelCreateResponseTime", "how long it takes to get a tunnel create response from the peer (in milliseconds)", "profile", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000 } );
|
||||
_tunnelCreateResponseTime = new RateStat("tunnelCreateResponseTime", "how long it takes to get a tunnel create response from the peer (in milliseconds)", group, new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000 } );
|
||||
if (_tunnelTestResponseTime == null)
|
||||
_tunnelTestResponseTime = new RateStat("tunnelTestResponseTime", "how long it takes to successfully test a tunnel this peer participates in (in milliseconds)", "profile", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000 } );
|
||||
_tunnelTestResponseTime = new RateStat("tunnelTestResponseTime", "how long it takes to successfully test a tunnel this peer participates in (in milliseconds)", group, new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000 } );
|
||||
if (_commError == null)
|
||||
_commError = new RateStat("commErrorRate", "how long between communication errors with the peer (e.g. disconnection)", "profile", new long[] { 60*1000l, 10*60*1000l, 60*60*1000l, 24*60*60*1000 } );
|
||||
_commError = new RateStat("commErrorRate", "how long between communication errors with the peer (e.g. disconnection)", group, new long[] { 60*1000l, 10*60*1000l, 60*60*1000l, 24*60*60*1000 } );
|
||||
if (_dbIntroduction == null)
|
||||
_dbIntroduction = new RateStat("dbIntroduction", "how many new peers we get from dbSearchReplyMessages or dbStore messages", "profile", new long[] { 60*60*1000l, 24*60*60*1000l, 7*24*60*60*1000l });
|
||||
_dbIntroduction = new RateStat("dbIntroduction", "how many new peers we get from dbSearchReplyMessages or dbStore messages", group, new long[] { 60*60*1000l, 24*60*60*1000l, 7*24*60*60*1000l });
|
||||
|
||||
if (_tunnelHistory == null)
|
||||
_tunnelHistory = new TunnelHistory(_context);
|
||||
_tunnelHistory = new TunnelHistory(_context, group);
|
||||
if (_dbHistory == null)
|
||||
_dbHistory = new DBHistory(_context);
|
||||
_dbHistory = new DBHistory(_context, group);
|
||||
|
||||
_sendSuccessSize.setStatLog(_context.statManager().getStatLog());
|
||||
_sendFailureSize.setStatLog(_context.statManager().getStatLog());
|
||||
_receiveSize.setStatLog(_context.statManager().getStatLog());
|
||||
_dbResponseTime.setStatLog(_context.statManager().getStatLog());
|
||||
_tunnelCreateResponseTime.setStatLog(_context.statManager().getStatLog());
|
||||
_tunnelTestResponseTime.setStatLog(_context.statManager().getStatLog());
|
||||
_commError.setStatLog(_context.statManager().getStatLog());
|
||||
_dbIntroduction.setStatLog(_context.statManager().getStatLog());
|
||||
_expanded = true;
|
||||
}
|
||||
|
||||
|
@ -23,22 +23,26 @@ public class TunnelHistory {
|
||||
private volatile long _lastFailed;
|
||||
private RateStat _rejectRate;
|
||||
private RateStat _failRate;
|
||||
private String _statGroup;
|
||||
|
||||
public TunnelHistory(RouterContext context) {
|
||||
public TunnelHistory(RouterContext context, String statGroup) {
|
||||
_context = context;
|
||||
_log = context.logManager().getLog(TunnelHistory.class);
|
||||
_statGroup = statGroup;
|
||||
_lifetimeAgreedTo = 0;
|
||||
_lifetimeFailed = 0;
|
||||
_lifetimeRejected = 0;
|
||||
_lastAgreedTo = 0;
|
||||
_lastFailed = 0;
|
||||
_lastRejected = 0;
|
||||
createRates();
|
||||
createRates(statGroup);
|
||||
}
|
||||
|
||||
private void createRates() {
|
||||
_rejectRate = new RateStat("tunnelHistory.rejectRate", "How often does this peer reject a tunnel request?", "tunnelHistory", new long[] { 60*1000l, 10*60*1000l, 30*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_failRate = new RateStat("tunnelHistory.failRate", "How often do tunnels this peer accepts fail?", "tunnelHistory", new long[] { 60*1000l, 10*60*1000l, 30*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
private void createRates(String statGroup) {
|
||||
_rejectRate = new RateStat("tunnelHistory.rejectRate", "How often does this peer reject a tunnel request?", statGroup, new long[] { 60*1000l, 10*60*1000l, 30*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_failRate = new RateStat("tunnelHistory.failRate", "How often do tunnels this peer accepts fail?", statGroup, new long[] { 60*1000l, 10*60*1000l, 30*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_rejectRate.setStatLog(_context.statManager().getStatLog());
|
||||
_failRate.setStatLog(_context.statManager().getStatLog());
|
||||
}
|
||||
|
||||
/** total tunnels the peer has agreed to participate in */
|
||||
@ -126,7 +130,7 @@ public class TunnelHistory {
|
||||
_log.debug("Loading tunnelHistory.failRate");
|
||||
} catch (IllegalArgumentException iae) {
|
||||
_log.warn("TunnelHistory rates are corrupt, resetting", iae);
|
||||
createRates();
|
||||
createRates(_statGroup);
|
||||
}
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user