forked from I2P_Developers/i2p.i2p
propagate from branch 'i2p.i2p.zab.782' (head 64415601890b9c494a8f06379f9feefbc855e07c)
to branch 'i2p.i2p' (head 0e92cf3a3844e7b738ca9c2486112867fc663b6f)
This commit is contained in:
@ -16,6 +16,7 @@ import net.i2p.router.peermanager.DBHistory;
|
||||
import net.i2p.router.peermanager.PeerProfile;
|
||||
import net.i2p.router.peermanager.ProfileOrganizer;
|
||||
import net.i2p.stat.Rate;
|
||||
import net.i2p.stat.RateAverages;
|
||||
import net.i2p.stat.RateStat;
|
||||
|
||||
/**
|
||||
@ -171,11 +172,12 @@ class ProfileOrganizerRenderer {
|
||||
if (_context.banlist().isBanlisted(peer)) buf.append(_("Banned"));
|
||||
if (prof.getIsFailing()) buf.append(' ').append(_("Failing"));
|
||||
if (_context.commSystem().wasUnreachable(peer)) buf.append(' ').append(_("Unreachable"));
|
||||
RateAverages ra = RateAverages.getTemp();
|
||||
Rate failed = prof.getTunnelHistory().getFailedRate().getRate(30*60*1000);
|
||||
long fails = failed.getCurrentEventCount() + failed.getLastEventCount();
|
||||
long fails = failed.computeAverages(ra, false).getTotalEventCount();
|
||||
if (fails > 0) {
|
||||
Rate accepted = prof.getTunnelCreateResponseTime().getRate(30*60*1000);
|
||||
long total = fails + accepted.getCurrentEventCount() + accepted.getLastEventCount();
|
||||
long total = fails + accepted.computeAverages(ra, false).getTotalEventCount();
|
||||
if (total / fails <= 10) // hide if < 10%
|
||||
buf.append(' ').append(fails).append('/').append(total).append(' ').append(_("Test Fails"));
|
||||
}
|
||||
@ -218,6 +220,7 @@ class ProfileOrganizerRenderer {
|
||||
buf.append("<th class=\"smallhead\">").append(_("1h Fail Rate")).append("</th>");
|
||||
buf.append("<th class=\"smallhead\">").append(_("1d Fail Rate")).append("</th>");
|
||||
buf.append("</tr>");
|
||||
RateAverages ra = RateAverages.getTemp();
|
||||
for (Iterator<PeerProfile> iter = integratedPeers.iterator(); iter.hasNext();) {
|
||||
PeerProfile prof = iter.next();
|
||||
Hash peer = prof.getPeer();
|
||||
@ -240,9 +243,9 @@ class ProfileOrganizerRenderer {
|
||||
buf.append("<td align=\"right\">").append(DataHelper.formatDuration2(time)).append("</td>");
|
||||
time = now - prof.getLastSendFailed();
|
||||
buf.append("<td align=\"right\">").append(DataHelper.formatDuration2(time)).append("</td>");
|
||||
buf.append("<td align=\"right\">").append(avg(prof, 10*60*1000l)).append("</td>");
|
||||
buf.append("<td align=\"right\">").append(avg(prof, 60*60*1000l)).append("</td>");
|
||||
buf.append("<td align=\"right\">").append(avg(prof, 24*60*60*1000l)).append("</td>");
|
||||
buf.append("<td align=\"right\">").append(avg(prof, 10*60*1000l, ra)).append("</td>");
|
||||
buf.append("<td align=\"right\">").append(avg(prof, 60*60*1000l, ra)).append("</td>");
|
||||
buf.append("<td align=\"right\">").append(avg(prof, 24*60*60*1000l, ra)).append("</td>");
|
||||
DBHistory dbh = prof.getDBHistory();
|
||||
if (dbh != null) {
|
||||
time = now - dbh.getLastLookupSuccessful();
|
||||
@ -253,8 +256,8 @@ class ProfileOrganizerRenderer {
|
||||
buf.append("<td align=\"right\">").append(DataHelper.formatDuration2(time)).append("</td>");
|
||||
time = now - dbh.getLastStoreFailed();
|
||||
buf.append("<td align=\"right\">").append(DataHelper.formatDuration2(time)).append("</td>");
|
||||
buf.append("<td align=\"right\">").append(davg(dbh, 60*60*1000l)).append("</td>");
|
||||
buf.append("<td align=\"right\">").append(davg(dbh, 24*60*60*1000l)).append("</td>");
|
||||
buf.append("<td align=\"right\">").append(davg(dbh, 60*60*1000l, ra)).append("</td>");
|
||||
buf.append("<td align=\"right\">").append(davg(dbh, 24*60*60*1000l, ra)).append("</td>");
|
||||
} else {
|
||||
for (int i = 0; i < 6; i++)
|
||||
buf.append("<td align=\"right\">").append(_(NA));
|
||||
@ -340,31 +343,30 @@ class ProfileOrganizerRenderer {
|
||||
private final static String num(double num) { synchronized (_fmt) { return _fmt.format(num); } }
|
||||
private final static String NA = HelperBase._x("n/a");
|
||||
|
||||
private String avg (PeerProfile prof, long rate) {
|
||||
private String avg (PeerProfile prof, long rate, RateAverages ra) {
|
||||
RateStat rs = prof.getDbResponseTime();
|
||||
if (rs == null)
|
||||
return _(NA);
|
||||
Rate r = rs.getRate(rate);
|
||||
if (r == null)
|
||||
return _(NA);
|
||||
long c = r.getCurrentEventCount() + r.getLastEventCount();
|
||||
if (c == 0)
|
||||
r.computeAverages(ra, false);
|
||||
if (ra.getTotalEventCount() == 0)
|
||||
return _(NA);
|
||||
double d = r.getCurrentTotalValue() + r.getLastTotalValue();
|
||||
return DataHelper.formatDuration2(Math.round(d/c));
|
||||
return DataHelper.formatDuration2(Math.round(ra.getAverage()));
|
||||
}
|
||||
|
||||
private String davg (DBHistory dbh, long rate) {
|
||||
private String davg (DBHistory dbh, long rate, RateAverages ra) {
|
||||
RateStat rs = dbh.getFailedLookupRate();
|
||||
if (rs == null)
|
||||
return "0%";
|
||||
Rate r = rs.getRate(rate);
|
||||
if (r == null)
|
||||
return "0%";
|
||||
long c = r.getCurrentEventCount() + r.getLastEventCount();
|
||||
if (c <= 0)
|
||||
r.computeAverages(ra, false);
|
||||
if (ra.getTotalEventCount() <= 0)
|
||||
return "0%";
|
||||
double avg = 0.5 + 100 * (r.getCurrentTotalValue() + r.getLastTotalValue()) / c;
|
||||
double avg = 0.5 + 100 * ra.getAverage();
|
||||
return ((int) avg) + "%";
|
||||
}
|
||||
|
||||
|
@ -14,25 +14,25 @@ import net.i2p.data.DataHelper;
|
||||
*/
|
||||
public class Rate {
|
||||
//private final static Log _log = new Log(Rate.class);
|
||||
private volatile double _currentTotalValue;
|
||||
private double _currentTotalValue;
|
||||
// was long, save space
|
||||
private volatile int _currentEventCount;
|
||||
private volatile long _currentTotalEventTime;
|
||||
private volatile double _lastTotalValue;
|
||||
private int _currentEventCount;
|
||||
private long _currentTotalEventTime;
|
||||
private double _lastTotalValue;
|
||||
// was long, save space
|
||||
private volatile int _lastEventCount;
|
||||
private volatile long _lastTotalEventTime;
|
||||
private volatile double _extremeTotalValue;
|
||||
private int _lastEventCount;
|
||||
private long _lastTotalEventTime;
|
||||
private double _extremeTotalValue;
|
||||
// was long, save space
|
||||
private volatile int _extremeEventCount;
|
||||
private volatile long _extremeTotalEventTime;
|
||||
private volatile double _lifetimeTotalValue;
|
||||
private volatile long _lifetimeEventCount;
|
||||
private volatile long _lifetimeTotalEventTime;
|
||||
private int _extremeEventCount;
|
||||
private long _extremeTotalEventTime;
|
||||
private double _lifetimeTotalValue;
|
||||
private long _lifetimeEventCount;
|
||||
private long _lifetimeTotalEventTime;
|
||||
private RateSummaryListener _summaryListener;
|
||||
private RateStat _stat;
|
||||
|
||||
private volatile long _lastCoalesceDate;
|
||||
private long _lastCoalesceDate;
|
||||
private long _creationDate;
|
||||
// was long, save space
|
||||
private int _period;
|
||||
@ -41,37 +41,37 @@ public class Rate {
|
||||
// private final Object _lock = new Object();
|
||||
|
||||
/** in the current (partial) period, what is the total value acrued through all events? */
|
||||
public double getCurrentTotalValue() {
|
||||
public synchronized double getCurrentTotalValue() {
|
||||
return _currentTotalValue;
|
||||
}
|
||||
|
||||
/** in the current (partial) period, how many events have occurred? */
|
||||
public long getCurrentEventCount() {
|
||||
public synchronized long getCurrentEventCount() {
|
||||
return _currentEventCount;
|
||||
}
|
||||
|
||||
/** in the current (partial) period, how much of the time has been spent doing the events? */
|
||||
public long getCurrentTotalEventTime() {
|
||||
public synchronized long getCurrentTotalEventTime() {
|
||||
return _currentTotalEventTime;
|
||||
}
|
||||
|
||||
/** in the last full period, what was the total value acrued through all events? */
|
||||
public double getLastTotalValue() {
|
||||
public synchronized double getLastTotalValue() {
|
||||
return _lastTotalValue;
|
||||
}
|
||||
|
||||
/** in the last full period, how many events occurred? */
|
||||
public long getLastEventCount() {
|
||||
public synchronized long getLastEventCount() {
|
||||
return _lastEventCount;
|
||||
}
|
||||
|
||||
/** in the last full period, how much of the time was spent doing the events? */
|
||||
public long getLastTotalEventTime() {
|
||||
public synchronized long getLastTotalEventTime() {
|
||||
return _lastTotalEventTime;
|
||||
}
|
||||
|
||||
/** what was the max total value acrued in any period? */
|
||||
public double getExtremeTotalValue() {
|
||||
public synchronized double getExtremeTotalValue() {
|
||||
return _extremeTotalValue;
|
||||
}
|
||||
|
||||
@ -79,42 +79,42 @@ public class Rate {
|
||||
* when the max(totalValue) was achieved, how many events occurred in that period?
|
||||
* Note that this is not necesarily the highest event count; that isn't tracked.
|
||||
*/
|
||||
public long getExtremeEventCount() {
|
||||
public synchronized long getExtremeEventCount() {
|
||||
return _extremeEventCount;
|
||||
}
|
||||
|
||||
/** when the max(totalValue) was achieved, how much of the time was spent doing the events? */
|
||||
public long getExtremeTotalEventTime() {
|
||||
public synchronized long getExtremeTotalEventTime() {
|
||||
return _extremeTotalEventTime;
|
||||
}
|
||||
|
||||
/** since rate creation, what was the total value acrued through all events? */
|
||||
public double getLifetimeTotalValue() {
|
||||
public synchronized double getLifetimeTotalValue() {
|
||||
return _lifetimeTotalValue;
|
||||
}
|
||||
|
||||
/** since rate creation, how many events have occurred? */
|
||||
public long getLifetimeEventCount() {
|
||||
public synchronized long getLifetimeEventCount() {
|
||||
return _lifetimeEventCount;
|
||||
}
|
||||
|
||||
/** since rate creation, how much of the time was spent doing the events? */
|
||||
public long getLifetimeTotalEventTime() {
|
||||
public synchronized long getLifetimeTotalEventTime() {
|
||||
return _lifetimeTotalEventTime;
|
||||
}
|
||||
|
||||
/** when was the rate last coalesced? */
|
||||
public long getLastCoalesceDate() {
|
||||
public synchronized long getLastCoalesceDate() {
|
||||
return _lastCoalesceDate;
|
||||
}
|
||||
|
||||
/** when was this rate created? */
|
||||
public long getCreationDate() {
|
||||
public synchronized long getCreationDate() {
|
||||
return _creationDate;
|
||||
}
|
||||
|
||||
/** how large should this rate's cycle be? */
|
||||
public long getPeriod() {
|
||||
public synchronized long getPeriod() {
|
||||
return _period;
|
||||
}
|
||||
|
||||
@ -160,13 +160,11 @@ public class Rate {
|
||||
* If you always use this call, eventDuration is always zero,
|
||||
* and the various get*Saturation*() and get*EventTime() methods will return zero.
|
||||
*/
|
||||
public void addData(long value) {
|
||||
synchronized (this) {
|
||||
_currentTotalValue += value;
|
||||
_currentEventCount++;
|
||||
_lifetimeTotalValue += value;
|
||||
_lifetimeEventCount++;
|
||||
}
|
||||
public synchronized void addData(long value) {
|
||||
_currentTotalValue += value;
|
||||
_currentEventCount++;
|
||||
_lifetimeTotalValue += value;
|
||||
_lifetimeEventCount++;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -202,16 +200,14 @@ public class Rate {
|
||||
* @param value value to accrue in the current period
|
||||
* @param eventDuration how long it took to accrue this data (set to 0 if it was instantaneous)
|
||||
*/
|
||||
public void addData(long value, long eventDuration) {
|
||||
synchronized (this) {
|
||||
_currentTotalValue += value;
|
||||
_currentEventCount++;
|
||||
_currentTotalEventTime += eventDuration;
|
||||
public synchronized void addData(long value, long eventDuration) {
|
||||
_currentTotalValue += value;
|
||||
_currentEventCount++;
|
||||
_currentTotalEventTime += eventDuration;
|
||||
|
||||
_lifetimeTotalValue += value;
|
||||
_lifetimeEventCount++;
|
||||
_lifetimeTotalEventTime += eventDuration;
|
||||
}
|
||||
_lifetimeTotalValue += value;
|
||||
_lifetimeEventCount++;
|
||||
_lifetimeTotalEventTime += eventDuration;
|
||||
}
|
||||
|
||||
/** 2s is plenty of slack to deal with slow coalescing (across many stats) */
|
||||
@ -261,10 +257,8 @@ public class Rate {
|
||||
|
||||
/**
|
||||
* What was the average value across the events in the last period?
|
||||
*
|
||||
* Warning - unsynchronized, might glitch during coalesce, caller may prevent by synchronizing on this.
|
||||
*/
|
||||
public double getAverageValue() {
|
||||
public synchronized double getAverageValue() {
|
||||
int lec = _lastEventCount; // avoid race NPE
|
||||
if ((_lastTotalValue != 0) && (lec > 0))
|
||||
return _lastTotalValue / lec;
|
||||
@ -275,10 +269,8 @@ public class Rate {
|
||||
/**
|
||||
* During the extreme period (i.e. the period with the highest total value),
|
||||
* what was the average value?
|
||||
*
|
||||
* Warning - unsynchronized, might glitch during coalesce, caller may prevent by synchronizing on this.
|
||||
*/
|
||||
public double getExtremeAverageValue() {
|
||||
public synchronized double getExtremeAverageValue() {
|
||||
if ((_extremeTotalValue != 0) && (_extremeEventCount > 0))
|
||||
return _extremeTotalValue / _extremeEventCount;
|
||||
|
||||
@ -287,23 +279,31 @@ public class Rate {
|
||||
|
||||
/**
|
||||
* What was the average value across the events since the stat was created?
|
||||
*
|
||||
* Warning - unsynchronized, might glitch during coalesce, caller may prevent by synchronizing on this.
|
||||
*/
|
||||
public double getLifetimeAverageValue() {
|
||||
public synchronized double getLifetimeAverageValue() {
|
||||
if ((_lifetimeTotalValue != 0) && (_lifetimeEventCount > 0))
|
||||
return _lifetimeTotalValue / _lifetimeEventCount;
|
||||
|
||||
return 0.0D;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the average or lifetime average depending on last event count
|
||||
* @since 0.9.4
|
||||
*/
|
||||
public synchronized double getAvgOrLifetimeAvg() {
|
||||
if (getLastEventCount() > 0)
|
||||
return getAverageValue();
|
||||
return getLifetimeAverageValue();
|
||||
}
|
||||
|
||||
/**
|
||||
* During the last period, how much of the time was spent actually processing events in proportion
|
||||
* to how many events could have occurred if there were no intervals?
|
||||
*
|
||||
* @return ratio, or 0 if event times aren't used
|
||||
*/
|
||||
public double getLastEventSaturation() {
|
||||
public synchronized double getLastEventSaturation() {
|
||||
if ((_lastEventCount > 0) && (_lastTotalEventTime > 0)) {
|
||||
/*double eventTime = (double) _lastTotalEventTime / (double) _lastEventCount;
|
||||
double maxEvents = _period / eventTime;
|
||||
@ -321,11 +321,9 @@ public class Rate {
|
||||
* how much of the time was spent actually processing events
|
||||
* in proportion to how many events could have occurred if there were no intervals?
|
||||
*
|
||||
* Warning - unsynchronized, might glitch during coalesce, caller may prevent by synchronizing on this.
|
||||
*
|
||||
* @return ratio, or 0 if the statistic doesn't use event times
|
||||
*/
|
||||
public double getExtremeEventSaturation() {
|
||||
public synchronized double getExtremeEventSaturation() {
|
||||
if ((_extremeEventCount > 0) && (_extremeTotalEventTime > 0)) {
|
||||
double eventTime = (double) _extremeTotalEventTime / (double) _extremeEventCount;
|
||||
double maxEvents = _period / eventTime;
|
||||
@ -338,11 +336,9 @@ public class Rate {
|
||||
* During the lifetime of this stat, how much of the time was spent actually processing events in proportion
|
||||
* to how many events could have occurred if there were no intervals?
|
||||
*
|
||||
* Warning - unsynchronized, might glitch during coalesce, caller may prevent by synchronizing on this.
|
||||
*
|
||||
* @return ratio, or 0 if event times aren't used
|
||||
*/
|
||||
public double getLifetimeEventSaturation() {
|
||||
public synchronized double getLifetimeEventSaturation() {
|
||||
if ((_lastEventCount > 0) && (_lifetimeTotalEventTime > 0)) {
|
||||
double eventTime = (double) _lifetimeTotalEventTime / (double) _lifetimeEventCount;
|
||||
double maxEvents = _period / eventTime;
|
||||
@ -354,7 +350,7 @@ public class Rate {
|
||||
}
|
||||
|
||||
/** how many periods have we already completed? */
|
||||
public long getLifetimePeriods() {
|
||||
public synchronized long getLifetimePeriods() {
|
||||
long lifetime = now() - _creationDate;
|
||||
double periods = lifetime / (double) _period;
|
||||
return (long) Math.floor(periods);
|
||||
@ -364,11 +360,9 @@ public class Rate {
|
||||
* using the last period's rate, what is the total value that could have been sent
|
||||
* if events were constant?
|
||||
*
|
||||
* Warning - unsynchronized, might glitch during coalesce, caller may prevent by synchronizing on this.
|
||||
*
|
||||
* @return max total value, or 0 if event times aren't used
|
||||
*/
|
||||
public double getLastSaturationLimit() {
|
||||
public synchronized double getLastSaturationLimit() {
|
||||
if ((_lastTotalValue != 0) && (_lastEventCount > 0) && (_lastTotalEventTime > 0)) {
|
||||
double saturation = getLastEventSaturation();
|
||||
if (saturation != 0.0D) return _lastTotalValue / saturation;
|
||||
@ -383,11 +377,9 @@ public class Rate {
|
||||
* what is the total value that could have been
|
||||
* sent if events were constant?
|
||||
*
|
||||
* Warning - unsynchronized, might glitch during coalesce, caller may prevent by synchronizing on this.
|
||||
*
|
||||
* @return event total at saturation, or 0 if no event times are measured
|
||||
*/
|
||||
public double getExtremeSaturationLimit() {
|
||||
public synchronized double getExtremeSaturationLimit() {
|
||||
if ((_extremeTotalValue != 0) && (_extremeEventCount > 0) && (_extremeTotalEventTime > 0)) {
|
||||
double saturation = getExtremeEventSaturation();
|
||||
if (saturation != 0.0d) return _extremeTotalValue / saturation;
|
||||
@ -402,10 +394,8 @@ public class Rate {
|
||||
* What was the total value, compared to the total value in
|
||||
* the extreme period (i.e. the period with the highest total value),
|
||||
* Warning- returns ratio, not percentage (i.e. it is not multiplied by 100 here)
|
||||
*
|
||||
* Warning - unsynchronized, might glitch during coalesce, caller may prevent by synchronizing on this.
|
||||
*/
|
||||
public double getPercentageOfExtremeValue() {
|
||||
public synchronized double getPercentageOfExtremeValue() {
|
||||
if ((_lastTotalValue != 0) && (_extremeTotalValue != 0))
|
||||
return _lastTotalValue / _extremeTotalValue;
|
||||
|
||||
@ -415,10 +405,8 @@ public class Rate {
|
||||
/**
|
||||
* How large was the last period's value as compared to the lifetime average value?
|
||||
* Warning- returns ratio, not percentage (i.e. it is not multiplied by 100 here)
|
||||
*
|
||||
* Warning - unsynchronized, might glitch during coalesce, caller may prevent by synchronizing on this.
|
||||
*/
|
||||
public double getPercentageOfLifetimeValue() {
|
||||
public synchronized double getPercentageOfLifetimeValue() {
|
||||
if ((_lastTotalValue != 0) && (_lifetimeTotalValue != 0)) {
|
||||
double lifetimePeriodValue = _period * (_lifetimeTotalValue / (now() - _creationDate));
|
||||
return _lastTotalValue / lifetimePeriodValue;
|
||||
@ -427,7 +415,44 @@ public class Rate {
|
||||
return 0.0D;
|
||||
}
|
||||
|
||||
public void store(String prefix, StringBuilder buf) throws IOException {
|
||||
/**
|
||||
* @return a thread-local temp object containing computed averages.
|
||||
* @since 0.9.4
|
||||
*/
|
||||
public RateAverages computeAverages() {
|
||||
return computeAverages(RateAverages.getTemp(),false);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param out where to store the computed averages.
|
||||
* @param useLifetime whether the lifetime average should be used if
|
||||
* there are no events.
|
||||
* @return the same RateAverages object for chaining
|
||||
* @since 0.9.4
|
||||
*/
|
||||
public synchronized RateAverages computeAverages(RateAverages out, boolean useLifetime) {
|
||||
out.reset();
|
||||
|
||||
final long total = _currentEventCount + _lastEventCount;
|
||||
out.setTotalEventCount(total);
|
||||
|
||||
if (total <= 0) {
|
||||
final double avg = useLifetime ? getLifetimeAverageValue() : getAverageValue();
|
||||
out.setAverage(avg);
|
||||
} else {
|
||||
|
||||
if (_currentEventCount > 0)
|
||||
out.setCurrent( getCurrentTotalValue() / _currentEventCount );
|
||||
if (_lastEventCount > 0)
|
||||
out.setLast( getLastTotalValue() / _lastEventCount );
|
||||
|
||||
out.setTotalValues(getCurrentTotalValue() + getLastTotalValue());
|
||||
out.setAverage( out.getTotalValues() / total );
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
public synchronized void store(String prefix, StringBuilder buf) throws IOException {
|
||||
PersistenceHelper.addTime(buf, prefix, ".period", "Length of the period:", _period);
|
||||
PersistenceHelper.addDate(buf, prefix, ".creationDate",
|
||||
"When was this rate created?", _creationDate);
|
||||
@ -476,7 +501,7 @@ public class Rate {
|
||||
* treat the data with as much freshness (or staleness) as appropriate.
|
||||
* @throws IllegalArgumentException if the data was formatted incorrectly
|
||||
*/
|
||||
public void load(Properties props, String prefix, boolean treatAsCurrent) throws IllegalArgumentException {
|
||||
public synchronized void load(Properties props, String prefix, boolean treatAsCurrent) throws IllegalArgumentException {
|
||||
_period = PersistenceHelper.getInt(props, prefix, ".period");
|
||||
_creationDate = PersistenceHelper.getLong(props, prefix, ".creationDate");
|
||||
_lastCoalesceDate = PersistenceHelper.getLong(props, prefix, ".lastCoalesceDate");
|
||||
@ -504,7 +529,7 @@ public class Rate {
|
||||
* We base it on the stat we are tracking, not the stored data.
|
||||
*/
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
public synchronized boolean equals(Object obj) {
|
||||
if ((obj == null) || !(obj instanceof Rate)) return false;
|
||||
if (obj == this) return true;
|
||||
Rate r = (Rate) obj;
|
||||
@ -519,12 +544,12 @@ public class Rate {
|
||||
* (RateStat stores in an array) so let's make this easy.
|
||||
*/
|
||||
@Override
|
||||
public int hashCode() {
|
||||
public synchronized int hashCode() {
|
||||
return DataHelper.hashCode(_stat) ^ _period ^ ((int) _creationDate);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
public synchronized String toString() {
|
||||
StringBuilder buf = new StringBuilder(2048);
|
||||
buf.append("\n\t total value: ").append(getLastTotalValue());
|
||||
buf.append("\n\t highest total value: ").append(getExtremeTotalValue());
|
||||
@ -554,39 +579,4 @@ public class Rate {
|
||||
// skew periodically
|
||||
return System.currentTimeMillis(); //Clock.getInstance().now();
|
||||
}
|
||||
|
||||
/******
|
||||
public static void main(String args[]) {
|
||||
Rate rate = new Rate(1000);
|
||||
for (int i = 0; i < 50; i++) {
|
||||
try {
|
||||
Thread.sleep(20);
|
||||
} catch (InterruptedException ie) { // nop
|
||||
}
|
||||
rate.addData(i * 100, 20);
|
||||
}
|
||||
rate.coalesce();
|
||||
StringBuilder buf = new StringBuilder(1024);
|
||||
try {
|
||||
rate.store("rate.test", buf);
|
||||
byte data[] = buf.toString().getBytes();
|
||||
_log.error("Stored rate: size = " + data.length + "\n" + buf.toString());
|
||||
|
||||
Properties props = new Properties();
|
||||
props.load(new java.io.ByteArrayInputStream(data));
|
||||
|
||||
//_log.error("Properties loaded: \n" + props);
|
||||
|
||||
Rate r = new Rate(props, "rate.test", true);
|
||||
|
||||
_log.error("Comparison after store/load: " + r.equals(rate));
|
||||
} catch (Throwable t) {
|
||||
_log.error("b0rk", t);
|
||||
}
|
||||
try {
|
||||
Thread.sleep(5000);
|
||||
} catch (InterruptedException ie) { // nop
|
||||
}
|
||||
}
|
||||
******/
|
||||
}
|
||||
|
102
core/java/src/net/i2p/stat/RateAverages.java
Normal file
102
core/java/src/net/i2p/stat/RateAverages.java
Normal file
@ -0,0 +1,102 @@
|
||||
package net.i2p.stat;
|
||||
|
||||
/**
|
||||
* Storage space for computations of various averages.
|
||||
*
|
||||
* @author zab
|
||||
* @since 0.9.4
|
||||
*/
|
||||
public class RateAverages {
|
||||
|
||||
/** thread-local temp instance */
|
||||
private static final ThreadLocal<RateAverages> TEMP =
|
||||
new ThreadLocal<RateAverages>() {
|
||||
public RateAverages initialValue() {
|
||||
return new RateAverages();
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* @since 0.9.4
|
||||
* @return thread-local temp instance.
|
||||
*/
|
||||
public static RateAverages getTemp() {
|
||||
return TEMP.get();
|
||||
}
|
||||
|
||||
private double average, current, last, totalValues;
|
||||
private long totalEventCount;
|
||||
|
||||
void reset() {
|
||||
average = 0;
|
||||
current = 0;
|
||||
last = 0;
|
||||
totalEventCount = 0;
|
||||
totalValues = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* @since 0.9.4
|
||||
* @return one of several things:
|
||||
* if there are any events (current or last) => weighted average
|
||||
* otherwise if the useLifetime parameter to Rate.computeAverages was:
|
||||
* true => the lifetime average value
|
||||
* false => zero
|
||||
*/
|
||||
public double getAverage() {
|
||||
return average;
|
||||
}
|
||||
|
||||
void setAverage(double average) {
|
||||
this.average = average;
|
||||
}
|
||||
|
||||
/**
|
||||
* @since 0.9.4
|
||||
* @return the current average == current value / current event count
|
||||
*/
|
||||
public double getCurrent() {
|
||||
return current;
|
||||
}
|
||||
|
||||
void setCurrent(double current) {
|
||||
this.current = current;
|
||||
}
|
||||
|
||||
/**
|
||||
* @since 0.9.4
|
||||
* @return the last average == last value / last event count
|
||||
*/
|
||||
public double getLast() {
|
||||
return last;
|
||||
}
|
||||
|
||||
void setLast(double last) {
|
||||
this.last = last;
|
||||
}
|
||||
|
||||
/**
|
||||
* @since 0.9.4
|
||||
* @return the total event count == current + last event counts
|
||||
*/
|
||||
public long getTotalEventCount() {
|
||||
return totalEventCount;
|
||||
}
|
||||
|
||||
void setTotalEventCount(long totalEventCount) {
|
||||
this.totalEventCount = totalEventCount;
|
||||
}
|
||||
|
||||
/**
|
||||
* @since 0.9.4
|
||||
* @return the total values == current + last values
|
||||
*/
|
||||
public double getTotalValues() {
|
||||
return totalValues;
|
||||
}
|
||||
|
||||
void setTotalValues(double totalValues) {
|
||||
this.totalValues = totalValues;
|
||||
}
|
||||
|
||||
}
|
@ -3,9 +3,9 @@ package net.i2p.router;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.router.peermanager.TunnelHistory;
|
||||
import net.i2p.stat.Rate;
|
||||
import net.i2p.stat.RateAverages;
|
||||
import net.i2p.stat.RateStat;
|
||||
import net.i2p.util.Log;
|
||||
import net.i2p.util.SimpleScheduler;
|
||||
import net.i2p.util.SimpleTimer;
|
||||
|
||||
/**
|
||||
@ -119,6 +119,8 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
//long lag = _context.jobQueue().getMaxLag();
|
||||
// reject here if lag too high???
|
||||
|
||||
RateAverages ra = RateAverages.getTemp();
|
||||
|
||||
// TODO
|
||||
// This stat is highly dependent on transport mix.
|
||||
// For NTCP, it is queueing delay only, ~25ms
|
||||
@ -133,37 +135,19 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
|
||||
//Reject tunnels if the time to process messages and send them is too large. Too much time implies congestion.
|
||||
if(r != null) {
|
||||
long current = r.getCurrentEventCount();
|
||||
long last = r.getLastEventCount();
|
||||
long total = current + last;
|
||||
double avgSendProcessingTime = 0;
|
||||
double currentSendProcessingTime = 0;
|
||||
double lastSendProcessingTime = 0;
|
||||
|
||||
//Calculate times
|
||||
if(total > 0) {
|
||||
if(current > 0)
|
||||
currentSendProcessingTime = r.getCurrentTotalValue() / current;
|
||||
if(last > 0)
|
||||
lastSendProcessingTime = r.getLastTotalValue() / last;
|
||||
avgSendProcessingTime = (r.getCurrentTotalValue() + r.getLastTotalValue()) / total;
|
||||
} else {
|
||||
avgSendProcessingTime = r.getAverageValue();
|
||||
//if(_log.shouldLog(Log.WARN))
|
||||
// _log.warn("No events occurred. Using 1 minute average to look at message delay.");
|
||||
}
|
||||
r.computeAverages(ra,false);
|
||||
|
||||
int maxProcessingTime = _context.getProperty(PROP_MAX_PROCESSINGTIME, DEFAULT_MAX_PROCESSINGTIME);
|
||||
|
||||
//Set throttling if necessary
|
||||
if((avgSendProcessingTime > maxProcessingTime*0.9
|
||||
|| currentSendProcessingTime > maxProcessingTime
|
||||
|| lastSendProcessingTime > maxProcessingTime)) {
|
||||
if((ra.getAverage() > maxProcessingTime*0.9
|
||||
|| ra.getCurrent() > maxProcessingTime
|
||||
|| ra.getLast() > maxProcessingTime)) {
|
||||
if(_log.shouldLog(Log.WARN)) {
|
||||
_log.warn("Refusing tunnel request due to sendProcessingTime " +
|
||||
((int)currentSendProcessingTime) + " / " +
|
||||
((int)lastSendProcessingTime) + " / " +
|
||||
((int)avgSendProcessingTime) + " / " +
|
||||
((int)ra.getCurrent()) + " / " +
|
||||
((int)ra.getLast()) + " / " +
|
||||
((int)ra.getAverage()) + " / " +
|
||||
maxProcessingTime +
|
||||
" current/last/avg/max ms");
|
||||
}
|
||||
@ -181,11 +165,9 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
double tunnelGrowthFactor = getTunnelGrowthFactor();
|
||||
Rate avgTunnels = _context.statManager().getRate("tunnel.participatingTunnels").getRate(10*60*1000);
|
||||
if (avgTunnels != null) {
|
||||
double avg = 0;
|
||||
if (avgTunnels.getLastEventCount() > 0)
|
||||
avg = avgTunnels.getAverageValue();
|
||||
else
|
||||
avg = avgTunnels.getLifetimeAverageValue();
|
||||
|
||||
double avg = avgTunnels.getAvgOrLifetimeAvg();
|
||||
|
||||
int min = getMinThrottleTunnels();
|
||||
if (avg < min)
|
||||
avg = min;
|
||||
@ -222,11 +204,7 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
Rate tunnelTestTime10m = _context.statManager().getRate("tunnel.testSuccessTime").getRate(10*60*1000);
|
||||
if ( (tunnelTestTime1m != null) && (tunnelTestTime10m != null) && (tunnelTestTime1m.getLastEventCount() > 0) ) {
|
||||
double avg1m = tunnelTestTime1m.getAverageValue();
|
||||
double avg10m = 0;
|
||||
if (tunnelTestTime10m.getLastEventCount() > 0)
|
||||
avg10m = tunnelTestTime10m.getAverageValue();
|
||||
else
|
||||
avg10m = tunnelTestTime10m.getLifetimeAverageValue();
|
||||
double avg10m = tunnelTestTime10m.getAvgOrLifetimeAvg();
|
||||
|
||||
if (avg10m < 5000)
|
||||
avg10m = 5000; // minimum before complaining
|
||||
@ -272,13 +250,8 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
double messagesPerTunnel = DEFAULT_MESSAGES_PER_TUNNEL_ESTIMATE;
|
||||
if (rs != null) {
|
||||
r = rs.getRate(60*1000);
|
||||
if (r != null) {
|
||||
long count = r.getLastEventCount() + r.getCurrentEventCount();
|
||||
if (count > 0)
|
||||
messagesPerTunnel = (r.getLastTotalValue() + r.getCurrentTotalValue()) / count;
|
||||
else
|
||||
messagesPerTunnel = r.getLifetimeAverageValue();
|
||||
}
|
||||
if (r != null)
|
||||
messagesPerTunnel = r.computeAverages(ra, true).getAverage();
|
||||
}
|
||||
if (messagesPerTunnel < DEFAULT_MESSAGES_PER_TUNNEL_ESTIMATE)
|
||||
messagesPerTunnel = DEFAULT_MESSAGES_PER_TUNNEL_ESTIMATE;
|
||||
|
@ -2,6 +2,7 @@ package net.i2p.router.peermanager;
|
||||
|
||||
import net.i2p.I2PAppContext;
|
||||
import net.i2p.stat.Rate;
|
||||
import net.i2p.stat.RateAverages;
|
||||
import net.i2p.stat.RateStat;
|
||||
|
||||
/**
|
||||
@ -122,15 +123,16 @@ class CapacityCalculator {
|
||||
Rate curAccepted = acceptStat.getRate(period);
|
||||
Rate curRejected = rejectStat.getRate(period);
|
||||
Rate curFailed = failedStat.getRate(period);
|
||||
RateAverages ra = RateAverages.getTemp();
|
||||
|
||||
double eventCount = 0;
|
||||
if (curAccepted != null) {
|
||||
eventCount = curAccepted.getCurrentEventCount() + curAccepted.getLastEventCount();
|
||||
eventCount = curAccepted.computeAverages(ra, false).getTotalEventCount();
|
||||
// Punish for rejections.
|
||||
// We don't want to simply do eventCount -= rejected or we get to zero with 50% rejection,
|
||||
// and we don't want everybody to be at zero during times of congestion.
|
||||
if (eventCount > 0 && curRejected != null) {
|
||||
long rejected = curRejected.getCurrentEventCount() + curRejected.getLastEventCount();
|
||||
long rejected = curRejected.computeAverages(ra,false).getTotalEventCount();
|
||||
if (rejected > 0)
|
||||
eventCount *= eventCount / (eventCount + (2 * rejected));
|
||||
}
|
||||
@ -144,7 +146,7 @@ class CapacityCalculator {
|
||||
// fast pool, for example, you have a 1/7 chance of being falsely blamed.
|
||||
// We also don't want to drive everybody's capacity to zero, that isn't helpful.
|
||||
if (curFailed != null) {
|
||||
double failed = curFailed.getCurrentTotalValue() + curFailed.getLastTotalValue();
|
||||
double failed = curFailed.computeAverages(ra, false).getTotalValues();
|
||||
if (failed > 0) {
|
||||
//if ( (period <= 10*60*1000) && (curFailed.getCurrentEventCount() > 0) )
|
||||
// return 0.0d; // their tunnels have failed in the last 0-10 minutes
|
||||
|
@ -178,6 +178,6 @@ class ExploratoryPeerSelector extends TunnelPeerSelector {
|
||||
Rate r = rs.getRate(period);
|
||||
if (r == null)
|
||||
return 0;
|
||||
return (int) (r.getLastEventCount() + r.getCurrentEventCount());
|
||||
return (int) (r.computeAverages().getTotalEventCount());
|
||||
}
|
||||
}
|
||||
|
@ -19,6 +19,7 @@ import net.i2p.router.TunnelInfo;
|
||||
import net.i2p.router.TunnelPoolSettings;
|
||||
import net.i2p.router.tunnel.HopConfig;
|
||||
import net.i2p.stat.Rate;
|
||||
import net.i2p.stat.RateAverages;
|
||||
import net.i2p.stat.RateStat;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
@ -331,9 +332,10 @@ public class TunnelPool {
|
||||
Rate rr = r.getRate(10*60*1000);
|
||||
Rate sr = s.getRate(10*60*1000);
|
||||
if (er != null && rr != null && sr != null) {
|
||||
long ec = er.getCurrentEventCount() + er.getLastEventCount();
|
||||
long rc = rr.getCurrentEventCount() + rr.getLastEventCount();
|
||||
long sc = sr.getCurrentEventCount() + sr.getLastEventCount();
|
||||
RateAverages ra = RateAverages.getTemp();
|
||||
long ec = er.computeAverages(ra, false).getTotalEventCount();
|
||||
long rc = rr.computeAverages(ra, false).getTotalEventCount();
|
||||
long sc = sr.computeAverages(ra, false).getTotalEventCount();
|
||||
long tot = ec + rc + sc;
|
||||
if (tot >= BUILD_TRIES_QUANTITY_OVERRIDE) {
|
||||
if (1000 * sc / tot <= 1000 / BUILD_TRIES_QUANTITY_OVERRIDE)
|
||||
@ -366,9 +368,10 @@ public class TunnelPool {
|
||||
Rate rr = r.getRate(10*60*1000);
|
||||
Rate sr = s.getRate(10*60*1000);
|
||||
if (er != null && rr != null && sr != null) {
|
||||
long ec = er.getCurrentEventCount() + er.getLastEventCount();
|
||||
long rc = rr.getCurrentEventCount() + rr.getLastEventCount();
|
||||
long sc = sr.getCurrentEventCount() + sr.getLastEventCount();
|
||||
RateAverages ra = RateAverages.getTemp();
|
||||
long ec = er.computeAverages(ra, false).getTotalEventCount();
|
||||
long rc = rr.computeAverages(ra, false).getTotalEventCount();
|
||||
long sc = sr.computeAverages(ra, false).getTotalEventCount();
|
||||
long tot = ec + rc + sc;
|
||||
if (tot >= BUILD_TRIES_LENGTH_OVERRIDE) {
|
||||
if (1000 * sc / tot <= 1000 / BUILD_TRIES_LENGTH_OVERRIDE)
|
||||
|
Reference in New Issue
Block a user