foil evil typo plot

This commit is contained in:
duck
2004-10-18 23:37:49 +00:00
committed by zzz
parent a95a968fa8
commit 81e0a145f1
17 changed files with 91 additions and 91 deletions

View File

@ -274,9 +274,9 @@ public class PeerData {
_lostRate.addData(numTimedOut, 0);
_receiveRate.coallesceStats();
_sendRate.coallesceStats();
_lostRate.coallesceStats();
_receiveRate.coalesceStats();
_sendRate.coalesceStats();
_lostRate.coalesceStats();
if (_log.shouldLog(Log.DEBUG))
_log.debug("Peer data cleaned up " + numTimedOut + " timed out pings and removed " + numDropped

View File

@ -206,11 +206,11 @@ public class NetMonitor {
}
/** drop all the old summary data */
public void coallesceData() {
public void coalesceData() {
synchronized (_peerSummaries) {
for (Iterator iter = _peerSummaries.values().iterator(); iter.hasNext(); ) {
PeerSummary summary = (PeerSummary)iter.next();
summary.coallesceData(_summaryDurationHours * 60*60*1000);
summary.coalesceData(_summaryDurationHours * 60*60*1000);
}
}
}

View File

@ -44,7 +44,7 @@ class NetMonitorRunner implements Runnable {
long nextExport = now + _monitor.getExportDelay() * 1000;
while (_monitor.isRunning()) {
now = Clock.getInstance().now();
_monitor.coallesceData();
_monitor.coalesceData();
if (now >= nextHarvest) {
runHarvest();
nextHarvest = now + _monitor.getHarvestDelay() * 1000;

View File

@ -22,7 +22,7 @@ public class PeerSummary {
/** statName to a List of PeerStat elements (sorted by sample date, earliest first) */
private Map _stats;
/** lock on this when accessing stat data */
private Object _coallesceLock = new Object();
private Object _coalesceLock = new Object();
public PeerSummary(String peer) {
_peer = peer;
@ -38,7 +38,7 @@ public class PeerSummary {
* @param val actual data harvested
*/
public void addData(String stat, String description, String valueDescriptions[], long when, double val[]) {
synchronized (_coallesceLock) {
synchronized (_coalesceLock) {
TreeMap stats = locked_getData(stat);
stats.put(new Long(when), new PeerStat(stat, description, valueDescriptions, when, val));
}
@ -53,7 +53,7 @@ public class PeerSummary {
* @param val actual data harvested
*/
public void addData(String stat, String description, String valueDescriptions[], long when, long val[]) {
synchronized (_coallesceLock) {
synchronized (_coalesceLock) {
TreeMap stats = locked_getData(stat);
stats.put(new Long(when), new PeerStat(stat, description, valueDescriptions, when, val));
}
@ -68,7 +68,7 @@ public class PeerSummary {
*
*/
public List getData(String statName) {
synchronized (_coallesceLock) {
synchronized (_coalesceLock) {
return new ArrayList(((TreeMap)_stats.get(statName)).values());
}
}
@ -78,21 +78,21 @@ public class PeerSummary {
*
*/
public Set getStatNames() {
synchronized (_coallesceLock) {
synchronized (_coalesceLock) {
return new HashSet(_stats.keySet());
}
}
/** drop old data points */
public void coallesceData(long summaryDurationMs) {
public void coalesceData(long summaryDurationMs) {
long earliest = Clock.getInstance().now() - summaryDurationMs;
synchronized (_coallesceLock) {
locked_coallesce(earliest);
synchronized (_coalesceLock) {
locked_coalesce(earliest);
}
}
/** go through all the stats and remove ones from before the given date */
private void locked_coallesce(long earliestSampleDate) {
private void locked_coalesce(long earliestSampleDate) {
if (true) return;
for (Iterator iter = _stats.keySet().iterator(); iter.hasNext(); ) {
String statName = (String)iter.next();

View File

@ -87,7 +87,7 @@ class PeerSummaryReader {
}
if (summary == null)
return;
summary.coallesceData(monitor.getSummaryDurationHours() * 60*60*1000);
summary.coalesceData(monitor.getSummaryDurationHours() * 60*60*1000);
monitor.addSummary(summary);
}

View File

@ -26,10 +26,10 @@ public class FrequencyStat {
_frequencies[i].eventOccurred();
}
/** coallesce all the stats */
public void coallesceStats() {
/** coalesce all the stats */
public void coalesceStats() {
//for (int i = 0; i < _frequencies.length; i++)
// _frequencies[i].coallesceStats();
// _frequencies[i].coalesceStats();
}
public String getName() {

View File

@ -27,11 +27,11 @@ public class Rate {
private volatile long _lifetimeEventCount;
private volatile long _lifetimeTotalEventTime;
private volatile long _lastCoallesceDate;
private volatile long _lastCoalesceDate;
private long _creationDate;
private long _period;
/** locked during coallesce and addData */
/** locked during coalesce and addData */
private Object _lock = new Object();
/** in the current (partial) period, what is the total value acrued through all events? */
@ -94,9 +94,9 @@ public class Rate {
return _lifetimeTotalEventTime;
}
/** when was the rate last coallesced? */
public long getLastCoallesceDate() {
return _lastCoallesceDate;
/** when was the rate last coalesced? */
public long getLastCoalesceDate() {
return _lastCoalesceDate;
}
/** when was this rate created? */
@ -130,7 +130,7 @@ public class Rate {
_lifetimeTotalEventTime = 0;
_creationDate = now();
_lastCoallesceDate = _creationDate;
_lastCoalesceDate = _creationDate;
_period = period;
}
@ -175,23 +175,23 @@ public class Rate {
}
}
public void coallesce() {
public void coalesce() {
synchronized (_lock) {
long now = now();
long measuredPeriod = now - _lastCoallesceDate;
long measuredPeriod = now - _lastCoalesceDate;
if (measuredPeriod < _period) {
// no need to coallesce
// no need to coalesce
return;
}
// ok ok, lets coallesce
// ok ok, lets coalesce
// how much were we off by? (so that we can sample down the measured values)
double periodFactor = measuredPeriod / _period;
_lastTotalValue = (_currentTotalValue == 0 ? 0.0D : _currentTotalValue / periodFactor);
_lastEventCount = (_currentEventCount == 0 ? 0L : (long) (_currentEventCount / periodFactor));
_lastTotalEventTime = (_currentTotalEventTime == 0 ? 0L : (long) (_currentTotalEventTime / periodFactor));
_lastCoallesceDate = now;
_lastCoalesceDate = now;
if (_lastTotalValue > _extremeTotalValue) {
_extremeTotalValue = _lastTotalValue;
@ -346,25 +346,25 @@ public class Rate {
PersistenceHelper.add(buf, prefix, ".period", "Number of milliseconds in the period", _period);
PersistenceHelper.add(buf, prefix, ".creationDate",
"When was this rate created? (milliseconds since the epoch, GMT)", _creationDate);
PersistenceHelper.add(buf, prefix, ".lastCoallesceDate",
"When did we last coallesce this rate? (milliseconds since the epoch, GMT)",
_lastCoallesceDate);
PersistenceHelper.add(buf, prefix, ".lastCoalesceDate",
"When did we last coalesce this rate? (milliseconds since the epoch, GMT)",
_lastCoalesceDate);
PersistenceHelper.add(buf, prefix, ".currentDate",
"When did this data get written? (milliseconds since the epoch, GMT)", now());
PersistenceHelper.add(buf, prefix, ".currentTotalValue",
"Total value of data points in the current (uncoallesced) period", _currentTotalValue);
"Total value of data points in the current (uncoalesced) period", _currentTotalValue);
PersistenceHelper
.add(buf, prefix, ".currentEventCount",
"How many events have occurred in the current (uncoallesced) period?", _currentEventCount);
"How many events have occurred in the current (uncoalesced) period?", _currentEventCount);
PersistenceHelper.add(buf, prefix, ".currentTotalEventTime",
"How many milliseconds have the events in the current (uncoallesced) period consumed?",
"How many milliseconds have the events in the current (uncoalesced) period consumed?",
_currentTotalEventTime);
PersistenceHelper.add(buf, prefix, ".lastTotalValue",
"Total value of data points in the most recent (coallesced) period", _lastTotalValue);
"Total value of data points in the most recent (coalesced) period", _lastTotalValue);
PersistenceHelper.add(buf, prefix, ".lastEventCount",
"How many events have occurred in the most recent (coallesced) period?", _lastEventCount);
"How many events have occurred in the most recent (coalesced) period?", _lastEventCount);
PersistenceHelper.add(buf, prefix, ".lastTotalEventTime",
"How many milliseconds have the events in the most recent (coallesced) period consumed?",
"How many milliseconds have the events in the most recent (coalesced) period consumed?",
_lastTotalEventTime);
PersistenceHelper.add(buf, prefix, ".extremeTotalValue",
"Total value of data points in the most extreme period", _extremeTotalValue);
@ -395,7 +395,7 @@ public class Rate {
public void load(Properties props, String prefix, boolean treatAsCurrent) throws IllegalArgumentException {
_period = PersistenceHelper.getLong(props, prefix, ".period");
_creationDate = PersistenceHelper.getLong(props, prefix, ".creationDate");
_lastCoallesceDate = PersistenceHelper.getLong(props, prefix, ".lastCoallesceDate");
_lastCoalesceDate = PersistenceHelper.getLong(props, prefix, ".lastCoalesceDate");
_currentTotalValue = PersistenceHelper.getDouble(props, prefix, ".currentTotalValue");
_currentEventCount = PersistenceHelper.getLong(props, prefix, ".currentEventCount");
_currentTotalEventTime = PersistenceHelper.getLong(props, prefix, ".currentTotalEventTime");
@ -409,17 +409,17 @@ public class Rate {
_lifetimeEventCount = PersistenceHelper.getLong(props, prefix, ".lifetimeEventCount");
_lifetimeTotalEventTime = PersistenceHelper.getLong(props, prefix, ".lifetimeTotalEventTime");
if (treatAsCurrent) _lastCoallesceDate = now();
if (treatAsCurrent) _lastCoalesceDate = now();
if (_period <= 0) throw new IllegalArgumentException("Period for " + prefix + " is invalid");
coallesce();
coalesce();
}
public boolean equals(Object obj) {
if ((obj == null) || (obj.getClass() != Rate.class)) return false;
Rate r = (Rate) obj;
return _period == r.getPeriod() && _creationDate == r.getCreationDate() &&
//_lastCoallesceDate == r.getLastCoallesceDate() &&
//_lastCoalesceDate == r.getLastCoalesceDate() &&
_currentTotalValue == r.getCurrentTotalValue() && _currentEventCount == r.getCurrentEventCount()
&& _currentTotalEventTime == r.getCurrentTotalEventTime() && _lastTotalValue == r.getLastTotalValue()
&& _lastEventCount == r.getLastEventCount() && _lastTotalEventTime == r.getLastTotalEventTime()
@ -466,7 +466,7 @@ public class Rate {
}
rate.addData(i * 100, 20);
}
rate.coallesce();
rate.coalesce();
StringBuffer buf = new StringBuffer(1024);
try {
rate.store("rate.test", buf);

View File

@ -41,10 +41,10 @@ public class RateStat {
_rates[i].addData(value, eventDuration);
}
/** coallesce all the stats */
public void coallesceStats() {
/** coalesce all the stats */
public void coalesceStats() {
for (int i = 0; i < _rates.length; i++)
_rates[i].coallesce();
_rates[i].coalesce();
}
public String getName() {
@ -166,7 +166,7 @@ public class RateStat {
}
rs.addData(i * 100, 20);
}
rs.coallesceStats();
rs.coalesceStats();
java.io.ByteArrayOutputStream baos = new java.io.ByteArrayOutputStream(2048);
try {
rs.store(baos, "rateStat.test");

View File

@ -94,12 +94,12 @@ public class StatManager {
if (stat != null) stat.addData(data, eventDuration);
}
public void coallesceStats() {
public void coalesceStats() {
synchronized (_frequencyStats) {
for (Iterator iter = _frequencyStats.values().iterator(); iter.hasNext();) {
FrequencyStat stat = (FrequencyStat)iter.next();
if (stat != null) {
stat.coallesceStats();
stat.coalesceStats();
}
}
}
@ -107,7 +107,7 @@ public class StatManager {
for (Iterator iter = _rateStats.values().iterator(); iter.hasNext();) {
RateStat stat = (RateStat)iter.next();
if (stat != null) {
stat.coallesceStats();
stat.coalesceStats();
}
}
}

View File

@ -185,7 +185,7 @@ public class Router {
setupHandlers();
startupQueue();
_context.jobQueue().addJob(new CoallesceStatsJob());
_context.jobQueue().addJob(new CoalesceStatsJob());
_context.jobQueue().addJob(new UpdateRoutingKeyModifierJob());
warmupCrypto();
_sessionKeyPersistenceHelper.startup();
@ -261,18 +261,18 @@ public class Router {
}
/**
* coallesce the stats framework every minute
* coalesce the stats framework every minute
*
*/
private final class CoallesceStatsJob extends JobImpl {
public CoallesceStatsJob() {
private final class CoalesceStatsJob extends JobImpl {
public CoalesceStatsJob() {
super(Router.this._context);
Router.this._context.statManager().createRateStat("bw.receiveBps", "How fast we receive data", "Bandwidth", new long[] { 60*1000, 5*60*1000, 60*60*1000 });
Router.this._context.statManager().createRateStat("bw.sendBps", "How fast we send data", "Bandwidth", new long[] { 60*1000, 5*60*1000, 60*60*1000 });
}
public String getName() { return "Coallesce stats"; }
public String getName() { return "Coalesce stats"; }
public void runJob() {
Router.this._context.statManager().coallesceStats();
Router.this._context.statManager().coalesceStats();
RateStat receiveRate = _context.statManager().getRate("transport.receiveMessageSize");
if (receiveRate != null) {

View File

@ -52,7 +52,7 @@ class KBucketSet {
int oldSize = _buckets[bucket].getKeyCount();
int numInBucket = _buckets[bucket].add(peer);
if (numInBucket > BUCKET_SIZE) {
// perhaps queue up coallesce job? naaahh.. lets let 'er grow for now
// perhaps queue up coalesce job? naaahh.. lets let 'er grow for now
}
if (_log.shouldLog(Log.DEBUG))
_log.debug("Peer " + peer + " added to bucket " + bucket);

View File

@ -155,10 +155,10 @@ public class DBHistory {
public void setUnpromptedDbStoreNew(long num) { _unpromptedDbStoreNew = num; }
public void setUnpromptedDbStoreOld(long num) { _unpromptedDbStoreOld = num; }
public void coallesceStats() {
public void coalesceStats() {
_log.debug("Coallescing stats");
_failedLookupRate.coallesceStats();
_invalidReplyRate.coallesceStats();
_failedLookupRate.coalesceStats();
_invalidReplyRate.coalesceStats();
}
private final static String NL = System.getProperty("line.separator");

View File

@ -11,7 +11,7 @@ import net.i2p.util.Log;
/**
* Run across all of the profiles, coallescing the stats and reorganizing them
* into appropriate groups. The stat coallesce must be run at least once a minute,
* into appropriate groups. The stat coalesce must be run at least once a minute,
* so if the group reorg wants to get changed, this may want to be split into two
* jobs.
*
@ -34,14 +34,14 @@ class EvaluateProfilesJob extends JobImpl {
Hash peer = (Hash)iter.next();
PeerProfile profile = getContext().profileOrganizer().getProfile(peer);
if (profile != null)
profile.coallesceStats();
profile.coalesceStats();
}
long afterCoallesce = getContext().clock().now();
long afterCoalesce = getContext().clock().now();
getContext().profileOrganizer().reorganize();
long afterReorganize = getContext().clock().now();
if (_log.shouldLog(Log.DEBUG))
_log.debug("Profiles coallesced and reorganized. total: " + allPeers.size() + ", selectAll: " + (afterSelect-start) + "ms, coallesce: " + (afterCoallesce-afterSelect) + "ms, reorganize: " + (afterReorganize-afterSelect));
_log.debug("Profiles coalesced and reorganized. total: " + allPeers.size() + ", selectAll: " + (afterSelect-start) + "ms, coalesce: " + (afterCoalesce-afterSelect) + "ms, reorganize: " + (afterReorganize-afterSelect));
} catch (Throwable t) {
_log.log(Log.CRIT, "Error evaluating profiles", t);
} finally {

View File

@ -273,18 +273,18 @@ public class PeerProfile {
}
/** update the stats and rates (this should be called once a minute) */
public void coallesceStats() {
public void coalesceStats() {
if (!_expanded) return;
_commError.coallesceStats();
_dbIntroduction.coallesceStats();
_dbResponseTime.coallesceStats();
_receiveSize.coallesceStats();
_sendFailureSize.coallesceStats();
_sendSuccessSize.coallesceStats();
_tunnelCreateResponseTime.coallesceStats();
_tunnelTestResponseTime.coallesceStats();
_dbHistory.coallesceStats();
_tunnelHistory.coallesceStats();
_commError.coalesceStats();
_dbIntroduction.coalesceStats();
_dbResponseTime.coalesceStats();
_receiveSize.coalesceStats();
_sendFailureSize.coalesceStats();
_sendSuccessSize.coalesceStats();
_tunnelCreateResponseTime.coalesceStats();
_tunnelTestResponseTime.coalesceStats();
_dbHistory.coalesceStats();
_tunnelHistory.coalesceStats();
_speedValue = calculateSpeed();
_reliabilityValue = calculateReliability();
@ -293,7 +293,7 @@ public class PeerProfile {
_isFailing = calculateIsFailing();
if (_log.shouldLog(Log.DEBUG))
_log.debug("Coallesced: speed [" + _speedValue + "] reliability [" + _reliabilityValue + "] capacity [" + _capacityValue + "] integration [" + _integrationValue + "] failing? [" + _isFailing + "]");
_log.debug("Coalesced: speed [" + _speedValue + "] reliability [" + _reliabilityValue + "] capacity [" + _capacityValue + "] integration [" + _integrationValue + "] failing? [" + _isFailing + "]");
}
private double calculateSpeed() { return _context.speedCalculator().calc(this); }
@ -347,7 +347,7 @@ public class PeerProfile {
buf.append("Could not load profile ").append(args[i]).append('\n');
continue;
}
//profile.coallesceStats();
//profile.coalesceStats();
buf.append("Peer " + profile.getPeer().toBase64()
+ ":\t Speed:\t" + fmt.format(profile.calculateSpeed())
+ " Reliability:\t" + fmt.format(profile.calculateReliability())

View File

@ -129,7 +129,7 @@ public class ProfileOrganizer {
synchronized (_reorganizeLock) {
PeerProfile old = locked_getProfile(profile.getPeer());
profile.coallesceStats();
profile.coalesceStats();
locked_placeProfile(profile);
_strictCapacityOrder.add(profile);
return old;
@ -383,7 +383,7 @@ public class ProfileOrganizer {
/**
* Place peers into the correct tier, as well as expand/contract and even drop profiles
* according to whatever limits are in place. Peer profiles are not coallesced during
* according to whatever limits are in place. Peer profiles are not coalesced during
* this method, but the averages are recalculated.
*
*/

View File

@ -83,11 +83,11 @@ public class TunnelHistory {
public RateStat getRejectionRate() { return _rejectRate; }
public RateStat getFailedRate() { return _failRate; }
public void coallesceStats() {
public void coalesceStats() {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Coallescing stats");
_rejectRate.coallesceStats();
_failRate.coallesceStats();
_rejectRate.coalesceStats();
_failRate.coalesceStats();
}
private final static String NL = System.getProperty("line.separator");

View File

@ -209,7 +209,7 @@ public class TCPConnection {
/** how many Bps we are sending data to the peer (or 2KBps if we don't know) */
public long getSendRate() {
if (_sendRate == null) return 2*1024;
_sendRate.coallesceStats();
_sendRate.coalesceStats();
Rate r = _sendRate.getRate(60*1000);
if (r == null) {
return 2*1024;