forked from I2P_Developers/i2p.i2p
Stats:
- Refactor handling of required stats - Shorten description of required stats - Improve save messages on configstats.jsp - Change bw.sendBps and bw.receiveBps stats to bytes, not Kbytes - Expires directive for graphs
This commit is contained in:
@ -22,7 +22,7 @@ class JobQueueRunner implements Runnable {
|
||||
_log = _context.logManager().getLog(JobQueueRunner.class);
|
||||
_context.statManager().createRateStat("jobQueue.jobRun", "How long jobs take", "JobQueue", new long[] { 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("jobQueue.jobRunSlow", "How long jobs that take over a second take", "JobQueue", new long[] { 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("jobQueue.jobLag", "How long jobs have to wait before running", "JobQueue", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRequiredRateStat("jobQueue.jobLag", "Job run delay (ms)", "JobQueue", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("jobQueue.jobWait", "How long does a job sit on the job queue?", "JobQueue", new long[] { 60*60*1000l, 24*60*60*1000l });
|
||||
//_context.statManager().createRateStat("jobQueue.jobRunnerInactive", "How long are runners inactive?", "JobQueue", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
//_state = 1;
|
||||
|
@ -1423,19 +1423,20 @@ private static class CoalesceStatsEvent implements SimpleTimer.TimedEvent {
|
||||
|
||||
public CoalesceStatsEvent(RouterContext ctx) {
|
||||
_ctx = ctx;
|
||||
ctx.statManager().createRateStat("bw.receiveBps", "How fast we receive data (in KBps)", "Bandwidth", new long[] { 60*1000, 5*60*1000, 60*60*1000 });
|
||||
ctx.statManager().createRateStat("bw.sendBps", "How fast we send data (in KBps)", "Bandwidth", new long[] { 60*1000, 5*60*1000, 60*60*1000 });
|
||||
ctx.statManager().createRateStat("bw.sendRate", "Low level bandwidth send rate", "Bandwidth", new long[] { 60*1000l, 5*60*1000l, 10*60*1000l, 60*60*1000l });
|
||||
ctx.statManager().createRateStat("bw.recvRate", "Low level bandwidth receive rate", "Bandwidth", new long[] { 60*1000l, 5*60*1000l, 10*60*1000l, 60*60*1000l });
|
||||
ctx.statManager().createRequiredRateStat("bw.receiveBps", "Message receive rate (Bytes/sec)", "Bandwidth", new long[] { 60*1000, 5*60*1000, 60*60*1000 });
|
||||
// used in the router watchdog
|
||||
ctx.statManager().createRequiredRateStat("bw.sendBps", "Message send rate (Bytes/sec)", "Bandwidth", new long[] { 60*1000, 5*60*1000, 60*60*1000 });
|
||||
ctx.statManager().createRequiredRateStat("bw.sendRate", "Low-level send rate (Bytes/sec)", "Bandwidth", new long[] { 60*1000l, 5*60*1000l, 10*60*1000l, 60*60*1000l });
|
||||
ctx.statManager().createRequiredRateStat("bw.recvRate", "Low-level receive rate (Bytes/sec)", "Bandwidth", new long[] { 60*1000l, 5*60*1000l, 10*60*1000l, 60*60*1000l });
|
||||
ctx.statManager().createRateStat("router.activePeers", "How many peers we are actively talking with", "Throttle", new long[] { 60*1000, 5*60*1000, 60*60*1000 });
|
||||
ctx.statManager().createRateStat("router.activeSendPeers", "How many peers we've sent to this minute", "Throttle", new long[] { 60*1000, 5*60*1000, 60*60*1000 });
|
||||
ctx.statManager().createRateStat("router.highCapacityPeers", "How many high capacity peers we know", "Throttle", new long[] { 5*60*1000, 60*60*1000 });
|
||||
ctx.statManager().createRateStat("router.fastPeers", "How many fast peers we know", "Throttle", new long[] { 5*60*1000, 60*60*1000 });
|
||||
ctx.statManager().createRequiredRateStat("router.fastPeers", "Known fast peers", "Throttle", new long[] { 5*60*1000, 60*60*1000 });
|
||||
_maxMemory = Runtime.getRuntime().maxMemory();
|
||||
String legend = "(Bytes)";
|
||||
if (_maxMemory < Long.MAX_VALUE)
|
||||
legend += " Max is " + DataHelper.formatSize(_maxMemory) + 'B';
|
||||
ctx.statManager().createRateStat("router.memoryUsed", legend, "Router", new long[] { 60*1000 });
|
||||
ctx.statManager().createRequiredRateStat("router.memoryUsed", legend, "Router", new long[] { 60*1000 });
|
||||
}
|
||||
private RouterContext getContext() { return _ctx; }
|
||||
public void timeReached() {
|
||||
@ -1468,8 +1469,8 @@ private static class CoalesceStatsEvent implements SimpleTimer.TimedEvent {
|
||||
Rate rate = receiveRate.getRate(60*1000);
|
||||
if (rate != null) {
|
||||
double bytes = rate.getLastTotalValue();
|
||||
double KBps = (bytes*1000.0d)/(rate.getPeriod()*1024.0d);
|
||||
getContext().statManager().addRateData("bw.receiveBps", (long)KBps, 60*1000);
|
||||
double bps = (bytes*1000.0d)/rate.getPeriod();
|
||||
getContext().statManager().addRateData("bw.receiveBps", (long)bps, 60*1000);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1478,8 +1479,8 @@ private static class CoalesceStatsEvent implements SimpleTimer.TimedEvent {
|
||||
Rate rate = sendRate.getRate(60*1000);
|
||||
if (rate != null) {
|
||||
double bytes = rate.getLastTotalValue();
|
||||
double KBps = (bytes*1000.0d)/(rate.getPeriod()*1024.0d);
|
||||
getContext().statManager().addRateData("bw.sendBps", (long)KBps, 60*1000);
|
||||
double bps = (bytes*1000.0d)/rate.getPeriod();
|
||||
getContext().statManager().addRateData("bw.sendBps", (long)bps, 60*1000);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -131,7 +131,7 @@ public class RouterClock extends Clock {
|
||||
getLog().info("Updating target clock offset to " + offsetMs + "ms from " + _offset + "ms, Stratum " + stratum);
|
||||
|
||||
if (!_statCreated) {
|
||||
_contextRC.statManager().createRateStat("clock.skew", "How far is the already adjusted clock being skewed?", "Clock", new long[] { 10*60*1000, 3*60*60*1000, 24*60*60*60 });
|
||||
_contextRC.statManager().createRequiredRateStat("clock.skew", "Clock step adjustment (ms)", "Clock", new long[] { 10*60*1000, 3*60*60*1000, 24*60*60*60 });
|
||||
_statCreated = true;
|
||||
}
|
||||
_contextRC.statManager().addRateData("clock.skew", delta, 0);
|
||||
|
@ -88,8 +88,8 @@ class RouterWatchdog implements Runnable {
|
||||
r = null;
|
||||
if (rs != null)
|
||||
r = rs.getRate(60*1000);
|
||||
double kbps = (r != null ? r.getAverageValue() : 0);
|
||||
_log.error("Outbound send rate: " + kbps + "KBps");
|
||||
double bps = (r != null ? r.getAverageValue() : 0);
|
||||
_log.error("Outbound send rate: " + bps + " Bps");
|
||||
long max = Runtime.getRuntime().maxMemory();
|
||||
long used = Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory();
|
||||
_log.error("Memory: " + DataHelper.formatSize(used) + '/' + DataHelper.formatSize(max));
|
||||
|
@ -163,7 +163,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
SimpleScheduler.getInstance().addPeriodicEvent(new OCMOSJCacheCleaner(ctx), CLEAN_INTERVAL, CLEAN_INTERVAL);
|
||||
ctx.statManager().createFrequencyStat("client.sendMessageFailFrequency", "How often does a client fail to send a message?", "ClientMessages", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.sendMessageSize", "How large are messages sent by the client?", "ClientMessages", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.sendAckTime", "Message round trip time", "ClientMessages", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRequiredRateStat("client.sendAckTime", "Message round trip time (ms)", "ClientMessages", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.timeoutCongestionTunnel", "How lagged our tunnels are when a send times out?", "ClientMessages", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.timeoutCongestionMessage", "How fast we process messages locally when a send times out?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.timeoutCongestionInbound", "How much faster we are receiving data than our average bps when a send times out?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
|
@ -48,7 +48,7 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
|
||||
_verifiesInProgress = new ConcurrentHashSet(8);
|
||||
_alwaysQuery = _context.getProperty("netDb.alwaysQuery");
|
||||
|
||||
_context.statManager().createRateStat("netDb.successTime", "How long a successful search takes", "NetworkDatabase", new long[] { 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRequiredRateStat("netDb.successTime", "Time for successful lookup (ms)", "NetworkDatabase", new long[] { 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("netDb.failedTime", "How long a failed search takes", "NetworkDatabase", new long[] { 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("netDb.failedAttemptedPeers", "How many peers we sent a search to when the search fails", "NetworkDatabase", new long[] { 60*1000l, 10*60*1000l });
|
||||
_context.statManager().createRateStat("netDb.successPeers", "How many peers are contacted in a successful search", "NetworkDatabase", new long[] { 60*60*1000l, 24*60*60*1000l });
|
||||
|
@ -116,7 +116,7 @@ public class ProfileOrganizer {
|
||||
_context.statManager().createRateStat("peer.profilePlaceTime", "How long the reorg takes placing peers in the tiers", "Peers", new long[] { 10*60*1000 });
|
||||
_context.statManager().createRateStat("peer.profileReorgTime", "How long the reorg takes overall", "Peers", new long[] { 10*60*1000 });
|
||||
// used in DBHistory
|
||||
_context.statManager().createRateStat("peer.failedLookupRate", "DB Lookup fail rate", "Peers", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRequiredRateStat("peer.failedLookupRate", "Net DB Lookup fail rate", "Peers", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
}
|
||||
|
||||
private void getReadLock() {
|
||||
|
@ -85,13 +85,6 @@ public class FIFOBandwidthLimiter {
|
||||
_context.statManager().createRateStat("bwLimiter.pendingInboundRequests", "How many inbound requests are ahead of the current one (ignoring ones with 0)?", "BandwidthLimiter", new long[] { 60*1000l, 5*60*1000l, 10*60*1000l, 60*60*1000l });
|
||||
_context.statManager().createRateStat("bwLimiter.outboundDelayedTime", "How long it takes to honor an outbound request (ignoring ones with that go instantly)?", "BandwidthLimiter", new long[] { 60*1000l, 5*60*1000l, 10*60*1000l, 60*60*1000l });
|
||||
_context.statManager().createRateStat("bwLimiter.inboundDelayedTime", "How long it takes to honor an inbound request (ignoring ones with that go instantly)?", "BandwidthLimiter", new long[] { 60*1000l, 5*60*1000l, 10*60*1000l, 60*60*1000l });
|
||||
if (_log.shouldLog(Log.WARN)) {
|
||||
// If you want to see these you better have the logging set at startup!
|
||||
_context.statManager().createRateStat("bw.sendBps1s", "How fast we are transmitting for the 1s quantization (period is the number of bytes transmitted)?", "Bandwidth", new long[] { 60*1000l, 10*60*1000l });
|
||||
_context.statManager().createRateStat("bw.recvBps1s", "How fast we are receiving for the 1s quantization (period is the number of bytes transmitted)?", "Bandwidth", new long[] { 60*1000l, 10*60*1000l });
|
||||
_context.statManager().createRateStat("bw.sendBps15s", "How fast we are transmitting for the 15s quantization (period is the number of bytes transmitted)?", "Bandwidth", new long[] { 60*1000l, 10*60*1000l });
|
||||
_context.statManager().createRateStat("bw.recvBps15s", "How fast we are receiving for the 15s quantization (period is the number of bytes transmitted)?", "Bandwidth", new long[] { 60*1000l, 10*60*1000l });
|
||||
}
|
||||
_pendingInboundRequests = new ArrayList(16);
|
||||
_pendingOutboundRequests = new ArrayList(16);
|
||||
_lastTotalSent = _totalAllocatedOutboundBytes.get();
|
||||
|
@ -64,11 +64,11 @@ public abstract class TransportImpl implements Transport {
|
||||
_log = _context.logManager().getLog(TransportImpl.class);
|
||||
|
||||
_context.statManager().createRateStat("transport.sendMessageFailureLifetime", "How long the lifetime of messages that fail are?", "Transport", new long[] { 60*1000l, 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("transport.sendMessageSize", "How large are the messages sent?", "Transport", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("transport.receiveMessageSize", "How large are the messages received?", "Transport", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRequiredRateStat("transport.sendMessageSize", "Size of sent messages (bytes)", "Transport", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRequiredRateStat("transport.receiveMessageSize", "Size of received messages (bytes)", "Transport", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("transport.receiveMessageTime", "How long it takes to read a message?", "Transport", new long[] { 60*1000l, 5*60*1000l, 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("transport.receiveMessageTimeSlow", "How long it takes to read a message (when it takes more than a second)?", "Transport", new long[] { 60*1000l, 5*60*1000l, 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("transport.sendProcessingTime", "How long does it take from noticing that we want to send the message to having it completely sent (successfully or failed)?", "Transport", new long[] { 60*1000l, 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRequiredRateStat("transport.sendProcessingTime", "Time to process and send a message (ms)", "Transport", new long[] { 60*1000l, 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("transport.expiredOnQueueLifetime", "How long a message that expires on our outbound queue is processed", "Transport", new long[] { 60*1000l, 10*60*1000l, 60*60*1000l, 24*60*60*1000l } );
|
||||
_sendPool = new ArrayList(16);
|
||||
_unreachableEntries = new HashMap(16);
|
||||
|
@ -23,10 +23,11 @@ import net.i2p.util.Log;
|
||||
* Currently, the comm system doesn't even inject any lag, though it could (later).
|
||||
* It does honor the standard transport stats though, but not the TCP specific ones.
|
||||
*
|
||||
* FOR DEBUGGING AND LOCAL TESTING ONLY.
|
||||
*/
|
||||
public class VMCommSystem extends CommSystemFacade {
|
||||
private Log _log;
|
||||
private RouterContext _context;
|
||||
private final Log _log;
|
||||
private final RouterContext _context;
|
||||
/**
|
||||
* Mapping from Hash to VMCommSystem for all routers hooked together
|
||||
*/
|
||||
@ -36,15 +37,15 @@ public class VMCommSystem extends CommSystemFacade {
|
||||
_context = context;
|
||||
_log = context.logManager().getLog(VMCommSystem.class);
|
||||
_context.statManager().createFrequencyStat("transport.sendMessageFailureFrequency", "How often do we fail to send messages?", "Transport", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("transport.sendMessageSize", "How large are the messages sent?", "Transport", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("transport.receiveMessageSize", "How large are the messages received?", "Transport", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRequiredRateStat("transport.sendMessageSize", "Size of sent messages (bytes)", "Transport", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRequiredRateStat("transport.receiveMessageSize", "Size of received messages (bytes)", "Transport", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("transport.sendMessageSmall", "How many messages under 1KB are sent?", "Transport", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("transport.receiveMessageSmall", "How many messages under 1KB are received?", "Transport", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("transport.sendMessageMedium", "How many messages between 1KB and 4KB are sent?", "Transport", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("transport.receiveMessageMedium", "How many messages between 1KB and 4KB are received?", "Transport", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("transport.sendMessageLarge", "How many messages over 4KB are sent?", "Transport", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("transport.receiveMessageLarge", "How many messages over 4KB are received?", "Transport", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("transport.sendProcessingTime", "How long does it take from noticing that we want to send the message to having it completely sent (successfully or failed)?", "Transport", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRequiredRateStat("transport.sendProcessingTime", "Time to process and send a message (ms)", "Transport", new long[] { 60*1000l, 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -64,7 +64,7 @@ class OutboundMessageFragments {
|
||||
_context.statManager().createRateStat("udp.sendSparse", "How many fragments were partially ACKed and hence not resent (time == message lifetime)", "udp", UDPTransport.RATES);
|
||||
_context.statManager().createRateStat("udp.sendPiggyback", "How many acks were piggybacked on a data packet (time == message lifetime)", "udp", UDPTransport.RATES);
|
||||
_context.statManager().createRateStat("udp.sendPiggybackPartial", "How many partial acks were piggybacked on a data packet (time == message lifetime)", "udp", UDPTransport.RATES);
|
||||
_context.statManager().createRateStat("udp.packetsRetransmitted", "Lifetime of packets during their retransmission (period == packets transmitted, lifetime)", "udp", UDPTransport.RATES);
|
||||
_context.statManager().createRequiredRateStat("udp.packetsRetransmitted", "Lifetime of packets during retransmission (ms)", "udp", UDPTransport.RATES);
|
||||
_context.statManager().createRateStat("udp.peerPacketsRetransmitted", "How many packets have been retransmitted to the peer (lifetime) when a burst of packets are retransmitted (period == packets transmitted, lifetime)", "udp", UDPTransport.RATES);
|
||||
_context.statManager().createRateStat("udp.blockedRetransmissions", "How packets have been transmitted to the peer when we blocked a retransmission to them?", "udp", UDPTransport.RATES);
|
||||
_context.statManager().createRateStat("udp.sendCycleTime", "How long it takes to cycle through all of the active messages?", "udp", UDPTransport.RATES);
|
||||
|
@ -38,12 +38,12 @@ class UDPSender {
|
||||
_context.statManager().createRateStat("udp.sendQueueSize", "How many packets are queued on the UDP sender", "udp", UDPTransport.RATES);
|
||||
_context.statManager().createRateStat("udp.sendQueueFailed", "How often it was unable to add a new packet to the queue", "udp", UDPTransport.RATES);
|
||||
_context.statManager().createRateStat("udp.sendQueueTrimmed", "How many packets were removed from the queue for being too old (duration == remaining)", "udp", UDPTransport.RATES);
|
||||
_context.statManager().createRateStat("udp.sendPacketSize", "How large packets sent are", "udp", UDPTransport.RATES);
|
||||
_context.statManager().createRequiredRateStat("udp.sendPacketSize", "Size of sent packets (bytes)", "udp", UDPTransport.RATES);
|
||||
_context.statManager().createRateStat("udp.socketSendTime", "How long the actual socket.send took", "udp", UDPTransport.RATES);
|
||||
_context.statManager().createRateStat("udp.sendBWThrottleTime", "How long the send is blocked by the bandwidth throttle", "udp", UDPTransport.RATES);
|
||||
_context.statManager().createRateStat("udp.sendACKTime", "How long an ACK packet is blocked for (duration == lifetime)", "udp", UDPTransport.RATES);
|
||||
// used in RouterWatchdog
|
||||
_context.statManager().createRateStat("udp.sendException", "How frequently we fail to send a packet (likely due to a windows exception)", "udp", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRequiredRateStat("udp.sendException", "Send fails (Windows exception?)", "udp", new long[] { 60*1000, 10*60*1000 });
|
||||
|
||||
_context.statManager().createRateStat("udp.sendPacketSize." + PacketBuilder.TYPE_ACK, "ack-only packet size", "udp", UDPTransport.RATES);
|
||||
_context.statManager().createRateStat("udp.sendPacketSize." + PacketBuilder.TYPE_PUNCH, "hole punch packet size", "udp", UDPTransport.RATES);
|
||||
|
@ -114,9 +114,9 @@ public class FragmentHandler {
|
||||
"Tunnels", RATES);
|
||||
_context.statManager().createRateStat("tunnel.fragmentedComplete", "How many fragments were in a completely received message?",
|
||||
"Tunnels", RATES);
|
||||
_context.statManager().createRateStat("tunnel.fragmentedDropped", "How many fragments were in a partially received yet failed message?",
|
||||
_context.statManager().createRequiredRateStat("tunnel.fragmentedDropped", "Number of dropped fragments",
|
||||
"Tunnels", RATES);
|
||||
_context.statManager().createRateStat("tunnel.corruptMessage", "How many corrupted messages arrived?",
|
||||
_context.statManager().createRequiredRateStat("tunnel.corruptMessage", "Corrupt messages received",
|
||||
"Tunnels", RATES);
|
||||
}
|
||||
|
||||
|
@ -57,9 +57,9 @@ public class TunnelDispatcher implements Service {
|
||||
_validator = null;
|
||||
_pumper = new TunnelGatewayPumper(ctx);
|
||||
_leaveJob = new LeaveTunnel(ctx);
|
||||
ctx.statManager().createRateStat("tunnel.participatingTunnels",
|
||||
"How many tunnels are we participating in?", "Tunnels",
|
||||
new long[] { 60*1000, 10*60*1000l, 60*60*1000l });
|
||||
ctx.statManager().createRequiredRateStat("tunnel.participatingTunnels",
|
||||
"Tunnels routed for others", "Tunnels",
|
||||
new long[] { 60*1000, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("tunnel.dispatchOutboundPeer",
|
||||
"How many messages we send out a tunnel targetting a peer?", "Tunnels",
|
||||
new long[] { 10*60*1000l, 60*60*1000l });
|
||||
@ -108,17 +108,17 @@ public class TunnelDispatcher implements Service {
|
||||
ctx.statManager().createRateStat("tunnel.dispatchOutboundZeroHopTime",
|
||||
"How long it takes to dispatch an outbound message through a zero hop tunnel", "Tunnels",
|
||||
new long[] { 60*1000l, 60*60*1000l });
|
||||
ctx.statManager().createRateStat("tunnel.participatingBandwidth",
|
||||
"Participating traffic", "Tunnels",
|
||||
ctx.statManager().createRequiredRateStat("tunnel.participatingBandwidth",
|
||||
"Participating traffic received (Bytes/sec)", "Tunnels",
|
||||
new long[] { 60*1000l, 60*10*1000l });
|
||||
ctx.statManager().createRateStat("tunnel.participatingBandwidthOut",
|
||||
"Participating traffic", "Tunnels",
|
||||
ctx.statManager().createRequiredRateStat("tunnel.participatingBandwidthOut",
|
||||
"Participating traffic sent (Bytes/sec)", "Tunnels",
|
||||
new long[] { 60*1000l, 60*10*1000l });
|
||||
ctx.statManager().createRateStat("tunnel.participatingMessageDropped",
|
||||
"Dropped for exceeding share limit", "Tunnels",
|
||||
new long[] { 60*1000l, 60*10*1000l });
|
||||
ctx.statManager().createRateStat("tunnel.participatingMessageCount",
|
||||
"How many messages are sent through a participating tunnel?", "Tunnels",
|
||||
ctx.statManager().createRequiredRateStat("tunnel.participatingMessageCount",
|
||||
"Number of 1KB participating messages", "Tunnels",
|
||||
new long[] { 60*1000l, 60*10*1000l, 60*60*1000l });
|
||||
ctx.statManager().createRateStat("tunnel.ownedMessageCount",
|
||||
"How many messages are sent through a tunnel we created (period == failures)?", "Tunnels",
|
||||
|
@ -52,13 +52,13 @@ class BuildExecutor implements Runnable {
|
||||
_recentlyBuildingMap = new ConcurrentHashMap(4 * MAX_CONCURRENT_BUILDS);
|
||||
_context.statManager().createRateStat("tunnel.concurrentBuilds", "How many builds are going at once", "Tunnels", new long[] { 60*1000, 5*60*1000, 60*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.concurrentBuildsLagged", "How many builds are going at once when we reject further builds, due to job lag (period is lag)", "Tunnels", new long[] { 60*1000, 5*60*1000, 60*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.buildExploratoryExpire", "How often an exploratory tunnel times out during creation", "Tunnels", new long[] { 10*60*1000, 60*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.buildClientExpire", "How often a client tunnel times out during creation", "Tunnels", new long[] { 10*60*1000, 60*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.buildExploratorySuccess", "Response time for success", "Tunnels", new long[] { 10*60*1000, 60*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.buildClientSuccess", "Response time for success", "Tunnels", new long[] { 10*60*1000, 60*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.buildExploratoryReject", "Response time for rejection", "Tunnels", new long[] { 10*60*1000, 60*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.buildClientReject", "Response time for rejection", "Tunnels", new long[] { 10*60*1000, 60*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.buildRequestTime", "How long it takes to build a tunnel request", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRequiredRateStat("tunnel.buildExploratoryExpire", "No response to our build request", "Tunnels", new long[] { 10*60*1000, 60*60*1000 });
|
||||
_context.statManager().createRequiredRateStat("tunnel.buildClientExpire", "No response to our build request", "Tunnels", new long[] { 10*60*1000, 60*60*1000 });
|
||||
_context.statManager().createRequiredRateStat("tunnel.buildExploratorySuccess", "Response time for success (ms)", "Tunnels", new long[] { 10*60*1000, 60*60*1000 });
|
||||
_context.statManager().createRequiredRateStat("tunnel.buildClientSuccess", "Response time for success (ms)", "Tunnels", new long[] { 10*60*1000, 60*60*1000 });
|
||||
_context.statManager().createRequiredRateStat("tunnel.buildExploratoryReject", "Response time for rejection (ms)", "Tunnels", new long[] { 10*60*1000, 60*60*1000 });
|
||||
_context.statManager().createRequiredRateStat("tunnel.buildClientReject", "Response time for rejection (ms)", "Tunnels", new long[] { 10*60*1000, 60*60*1000 });
|
||||
_context.statManager().createRequiredRateStat("tunnel.buildRequestTime", "Time to build a tunnel request (ms)", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.buildRequestZeroHopTime", "How long it takes to build a zero hop tunnel", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.pendingRemaining", "How many inbound requests are pending after a pass (period is how long the pass takes)?", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.buildFailFirstHop", "How often we fail to build a OB tunnel because we can't contact the first hop", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
|
@ -69,18 +69,18 @@ class BuildHandler {
|
||||
_context.statManager().createRateStat("tunnel.reject.30", "How often we reject a tunnel because of bandwidth overload", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.reject.50", "How often we reject a tunnel because of a critical issue (shutdown, etc)", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
|
||||
_context.statManager().createRateStat("tunnel.decryptRequestTime", "How long it takes to decrypt a new tunnel build request", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.rejectTimeout", "How often we reject a tunnel because we can't find the next hop", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.rejectTimeout2", "How often we fail a tunnel because we can't contact the next hop", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRequiredRateStat("tunnel.decryptRequestTime", "Time to decrypt a build request (ms)", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRequiredRateStat("tunnel.rejectTimeout", "Reject tunnel count (unknown next hop)", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRequiredRateStat("tunnel.rejectTimeout2", "Reject tunnel count (can't contact next hop)", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
|
||||
_context.statManager().createRateStat("tunnel.rejectOverloaded", "How long we had to wait before processing the request (when it was rejected)", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.acceptLoad", "Delay before processing the accepted request", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRequiredRateStat("tunnel.rejectOverloaded", "Delay to process rejected request (ms)", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRequiredRateStat("tunnel.acceptLoad", "Delay to process accepted request (ms)", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.dropConnLimits", "Drop instead of reject due to conn limits", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.dropLoad", "How long we had to wait before finally giving up on an inbound request (period is queue count)?", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.dropLoadDelay", "How long we had to wait before finally giving up on an inbound request?", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.dropLoadBacklog", "How many requests were pending when they were so lagged that we had to drop a new inbound request??", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.dropLoadProactive", "What the estimated queue time was when we dropped an inbound request (period is num pending)", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.dropLoadProactiveAbort", "How often we would have proactively dropped a request, but allowed it through?", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRequiredRateStat("tunnel.dropLoad", "Delay before dropping request (ms)?", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRequiredRateStat("tunnel.dropLoadDelay", "Delay before abandoning request (ms)", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRequiredRateStat("tunnel.dropLoadBacklog", "Pending request count when dropped", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRequiredRateStat("tunnel.dropLoadProactive", "Delay estimate when dropped (ms)", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRequiredRateStat("tunnel.dropLoadProactiveAbort", "Allowed requests during load", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.handleRemaining", "How many pending inbound requests were left on the queue after one pass?", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.buildReplyTooSlow", "How often a tunnel build reply came back after we had given up waiting for it?", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
|
||||
|
@ -34,7 +34,6 @@ class TestJob extends JobImpl {
|
||||
|
||||
/** base to randomize the test delay on */
|
||||
private static final int TEST_DELAY = 30*1000;
|
||||
private static final long[] RATES = { 60*1000, 10*60*1000l, 60*60*1000l };
|
||||
|
||||
public TestJob(RouterContext ctx, PooledTunnelCreatorConfig cfg, TunnelPool pool) {
|
||||
super(ctx);
|
||||
@ -47,20 +46,7 @@ class TestJob extends JobImpl {
|
||||
if ( (_pool == null) && (_log.shouldLog(Log.ERROR)) )
|
||||
_log.error("Invalid tunnel test configuration: no pool for " + cfg, new Exception("origin"));
|
||||
getTiming().setStartAfter(getDelay() + ctx.clock().now());
|
||||
ctx.statManager().createRateStat("tunnel.testFailedTime", "How long did the failure take (max of 60s for full timeout)?", "Tunnels",
|
||||
RATES);
|
||||
ctx.statManager().createRateStat("tunnel.testExploratoryFailedTime", "How long did the failure of an exploratory tunnel take (max of 60s for full timeout)?", "Tunnels",
|
||||
RATES);
|
||||
ctx.statManager().createRateStat("tunnel.testFailedCompletelyTime", "How long did the complete failure take (max of 60s for full timeout)?", "Tunnels",
|
||||
RATES);
|
||||
ctx.statManager().createRateStat("tunnel.testExploratoryFailedCompletelyTime", "How long did the complete failure of an exploratory tunnel take (max of 60s for full timeout)?", "Tunnels",
|
||||
RATES);
|
||||
ctx.statManager().createRateStat("tunnel.testSuccessLength", "How long were the tunnels that passed the test?", "Tunnels",
|
||||
RATES);
|
||||
ctx.statManager().createRateStat("tunnel.testSuccessTime", "How long did tunnel testing take?", "Tunnels",
|
||||
RATES);
|
||||
ctx.statManager().createRateStat("tunnel.testAborted", "Tunnel test could not occur, since there weren't any tunnels to test with", "Tunnels",
|
||||
RATES);
|
||||
// stats are created in TunnelPoolManager
|
||||
}
|
||||
|
||||
public String getName() { return "Test tunnel"; }
|
||||
|
@ -80,7 +80,7 @@ public class TunnelPool {
|
||||
_context.clientManager().requestLeaseSet(_settings.getDestination(), ls);
|
||||
}
|
||||
_context.statManager().createRateStat(_rateName,
|
||||
"Tunnel Bandwidth", "Tunnels",
|
||||
"Tunnel Bandwidth (Bytes/sec)", "Tunnels",
|
||||
new long[] { 5*60*1000l });
|
||||
}
|
||||
|
||||
@ -599,7 +599,7 @@ public class TunnelPool {
|
||||
if (rs == null) {
|
||||
// Create the RateStat here rather than at the top because
|
||||
// the user could change the length settings while running
|
||||
_context.statManager().createRateStat(buildRateName(),
|
||||
_context.statManager().createRequiredRateStat(buildRateName(),
|
||||
"Tunnel Build Frequency", "Tunnels",
|
||||
new long[] { TUNNEL_LIFETIME });
|
||||
rs = _context.statManager().getRate(buildRateName());
|
||||
|
@ -47,6 +47,7 @@ public class TunnelPoolManager implements TunnelManagerFacade {
|
||||
private TunnelPool _outboundExploratory;
|
||||
private final BuildExecutor _executor;
|
||||
private boolean _isShutdown;
|
||||
private static final long[] RATES = { 60*1000, 10*60*1000l, 60*60*1000l };
|
||||
|
||||
public TunnelPoolManager(RouterContext ctx) {
|
||||
_context = ctx;
|
||||
@ -66,12 +67,21 @@ public class TunnelPoolManager implements TunnelManagerFacade {
|
||||
execThread.setDaemon(true);
|
||||
execThread.start();
|
||||
|
||||
ctx.statManager().createRateStat("tunnel.testSuccessTime",
|
||||
"How long do successful tunnel tests take?", "Tunnels",
|
||||
new long[] { 60*1000, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("tunnel.participatingTunnels",
|
||||
"How many tunnels are we participating in?", "Tunnels",
|
||||
new long[] { 60*1000, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
// The following are for TestJob
|
||||
ctx.statManager().createRequiredRateStat("tunnel.testFailedTime", "Time for tunnel test failure (ms)", "Tunnels",
|
||||
RATES);
|
||||
ctx.statManager().createRateStat("tunnel.testExploratoryFailedTime", "How long did the failure of an exploratory tunnel take (max of 60s for full timeout)?", "Tunnels",
|
||||
RATES);
|
||||
ctx.statManager().createRateStat("tunnel.testFailedCompletelyTime", "How long did the complete failure take (max of 60s for full timeout)?", "Tunnels",
|
||||
RATES);
|
||||
ctx.statManager().createRateStat("tunnel.testExploratoryFailedCompletelyTime", "How long did the complete failure of an exploratory tunnel take (max of 60s for full timeout)?", "Tunnels",
|
||||
RATES);
|
||||
ctx.statManager().createRateStat("tunnel.testSuccessLength", "How long were the tunnels that passed the test?", "Tunnels",
|
||||
RATES);
|
||||
ctx.statManager().createRequiredRateStat("tunnel.testSuccessTime", "Time for tunnel test success (ms)", "Tunnels",
|
||||
RATES);
|
||||
ctx.statManager().createRateStat("tunnel.testAborted", "Tunnel test could not occur, since there weren't any tunnels to test with", "Tunnels",
|
||||
RATES);
|
||||
}
|
||||
|
||||
/** pick an inbound tunnel not bound to a particular destination */
|
||||
|
Reference in New Issue
Block a user