* Profiles:
- Remove the almost-unused send and receive size RateStats; this also reduces the effective time for isActive() - Only store DB history on-demand to save space; fix up resulting NPEs
This commit is contained in:
@ -105,6 +105,9 @@ class ProfileOrganizerRenderer {
|
||||
|
||||
buf.append("<tr><td align=\"center\" nowrap>");
|
||||
buf.append(_context.commSystem().renderPeerHTML(peer));
|
||||
// debug
|
||||
//if(prof.getIsExpandedDB())
|
||||
// buf.append(" ** ");
|
||||
buf.append("</td><td align=\"center\">");
|
||||
|
||||
switch (tier) {
|
||||
@ -156,7 +159,8 @@ class ProfileOrganizerRenderer {
|
||||
buf.append(' ').append(fails).append('/').append(total).append(" ").append(_("Test Fails"));
|
||||
}
|
||||
buf.append(" </td>");
|
||||
buf.append("<td nowrap align=\"center\"><a target=\"_blank\" href=\"dumpprofile.jsp?peer=").append(peer.toBase64().substring(0,6)).append("\">profile</a>");
|
||||
buf.append("<td nowrap align=\"center\"><a target=\"_blank\" href=\"dumpprofile.jsp?peer=")
|
||||
.append(peer.toBase64().substring(0,6)).append("\">").append(_("profile")).append("</a>");
|
||||
buf.append(" <a href=\"configpeer.jsp?peer=").append(peer.toBase64()).append("\">+-</a></td>\n");
|
||||
buf.append("</tr>");
|
||||
// let's not build the whole page in memory (~500 bytes per peer)
|
||||
@ -223,6 +227,8 @@ class ProfileOrganizerRenderer {
|
||||
buf.append("<td align=\"right\">").append(dbh.getUnpromptedDbStoreOld()).append("</td>");
|
||||
buf.append("<td align=\"right\">").append(davg(dbh, 60*60*1000l)).append("</td>");
|
||||
buf.append("<td align=\"right\">").append(davg(dbh, 24*60*60*1000l)).append("</td>");
|
||||
} else {
|
||||
buf.append("<td align=\"right\">n/a<td align=\"right\">n/a<td align=\"right\">n/a<td align=\"right\">n/a<td align=\"right\">n/a<td align=\"right\">n/a");
|
||||
}
|
||||
}
|
||||
buf.append("</table>");
|
||||
|
@ -891,12 +891,13 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
public int getPeerTimeout(Hash peer) {
|
||||
PeerProfile prof = _context.profileOrganizer().getProfile(peer);
|
||||
double responseTime = MAX_PER_PEER_TIMEOUT;
|
||||
if (prof != null)
|
||||
if (prof != null && prof.getIsExpandedDB()) {
|
||||
responseTime = prof.getDbResponseTime().getLifetimeAverageValue();
|
||||
if (responseTime < MIN_PER_PEER_TIMEOUT)
|
||||
responseTime = MIN_PER_PEER_TIMEOUT;
|
||||
else if (responseTime > MAX_PER_PEER_TIMEOUT)
|
||||
responseTime = MAX_PER_PEER_TIMEOUT;
|
||||
if (responseTime < MIN_PER_PEER_TIMEOUT)
|
||||
responseTime = MIN_PER_PEER_TIMEOUT;
|
||||
else if (responseTime > MAX_PER_PEER_TIMEOUT)
|
||||
responseTime = MAX_PER_PEER_TIMEOUT;
|
||||
}
|
||||
return 4 * (int)responseTime; // give it up to 4x the average response time
|
||||
}
|
||||
|
||||
|
@ -174,11 +174,13 @@ class StoreJob extends JobImpl {
|
||||
_state.addSkipped(peer);
|
||||
} else {
|
||||
int peerTimeout = _facade.getPeerTimeout(peer);
|
||||
PeerProfile prof = getContext().profileOrganizer().getProfile(peer);
|
||||
if (prof != null) {
|
||||
RateStat failing = prof.getDBHistory().getFailedLookupRate();
|
||||
Rate failed = failing.getRate(60*60*1000);
|
||||
}
|
||||
|
||||
//PeerProfile prof = getContext().profileOrganizer().getProfile(peer);
|
||||
//if (prof != null && prof.getIsExpandedDB()) {
|
||||
// RateStat failing = prof.getDBHistory().getFailedLookupRate();
|
||||
// Rate failed = failing.getRate(60*60*1000);
|
||||
//}
|
||||
|
||||
//long failedCount = failed.getCurrentEventCount()+failed.getLastEventCount();
|
||||
//if (failedCount > 10) {
|
||||
// _state.addSkipped(peer);
|
||||
|
@ -19,12 +19,15 @@ public class IntegrationCalculator extends Calculator {
|
||||
|
||||
@Override
|
||||
public double calc(PeerProfile profile) {
|
||||
// give more weight to recent counts
|
||||
long val = profile.getDbIntroduction().getRate(24*60*60*1000l).getCurrentEventCount();
|
||||
val += 2 * 4 * profile.getDbIntroduction().getRate(6*60*60*1000l).getLastEventCount();
|
||||
val += 3 * 4 * profile.getDbIntroduction().getRate(6*60*60*1000l).getCurrentEventCount();
|
||||
val += 4 * 24 * profile.getDbIntroduction().getRate(60*60*1000l).getCurrentEventCount();
|
||||
val /= 10;
|
||||
long val = 0;
|
||||
if (profile.getIsExpandedDB()) {
|
||||
// give more weight to recent counts
|
||||
val = profile.getDbIntroduction().getRate(24*60*60*1000l).getCurrentEventCount();
|
||||
val += 2 * 4 * profile.getDbIntroduction().getRate(6*60*60*1000l).getLastEventCount();
|
||||
val += 3 * 4 * profile.getDbIntroduction().getRate(6*60*60*1000l).getCurrentEventCount();
|
||||
val += 4 * 24 * profile.getDbIntroduction().getRate(60*60*1000l).getCurrentEventCount();
|
||||
val /= 10;
|
||||
}
|
||||
val += profile.getIntegrationBonus();
|
||||
return val;
|
||||
}
|
||||
|
@ -36,8 +36,8 @@ public class PeerProfile {
|
||||
private long _lastHeardFrom;
|
||||
private double _tunnelTestResponseTimeAvg;
|
||||
// periodic rates
|
||||
private RateStat _sendSuccessSize = null;
|
||||
private RateStat _receiveSize = null;
|
||||
//private RateStat _sendSuccessSize = null;
|
||||
//private RateStat _receiveSize = null;
|
||||
private RateStat _dbResponseTime = null;
|
||||
private RateStat _tunnelCreateResponseTime = null;
|
||||
private RateStat _tunnelTestResponseTime = null;
|
||||
@ -56,6 +56,7 @@ public class PeerProfile {
|
||||
private DBHistory _dbHistory;
|
||||
// does this peer profile contain expanded data, or just the basics?
|
||||
private boolean _expanded;
|
||||
private boolean _expandedDB;
|
||||
private int _consecutiveShitlists;
|
||||
|
||||
public PeerProfile(RouterContext context, Hash peer) {
|
||||
@ -72,6 +73,8 @@ public class PeerProfile {
|
||||
_consecutiveShitlists = 0;
|
||||
_tunnelTestResponseTimeAvg = 0.0d;
|
||||
_peer = peer;
|
||||
// this is always true, and there are several places in the router that will NPE
|
||||
// if it is false, so all need to be fixed before we can have non-expanded profiles
|
||||
if (expand)
|
||||
expandProfile();
|
||||
}
|
||||
@ -87,6 +90,7 @@ public class PeerProfile {
|
||||
*
|
||||
*/
|
||||
public boolean getIsExpanded() { return _expanded; }
|
||||
public boolean getIsExpandedDB() { return _expandedDB; }
|
||||
|
||||
public int incrementShitlists() { return _consecutiveShitlists++; }
|
||||
public void unshitlist() { _consecutiveShitlists = 0; }
|
||||
@ -107,18 +111,25 @@ public class PeerProfile {
|
||||
*
|
||||
* Note: this appears to be the only use for these two RateStats.
|
||||
*
|
||||
* Update: Rewritten so we can get rid of the two RateStats.
|
||||
* This also helps by not having it depend on coalesce boundaries.
|
||||
*
|
||||
* @param period must be one of the periods in the RateStat constructors below
|
||||
* (5*60*1000 or 60*60*1000)
|
||||
*/
|
||||
public boolean getIsActive(long period) {
|
||||
if ( (getSendSuccessSize().getRate(period).getCurrentEventCount() > 0) ||
|
||||
(getSendSuccessSize().getRate(period).getLastEventCount() > 0) ||
|
||||
(getReceiveSize().getRate(period).getCurrentEventCount() > 0) ||
|
||||
(getReceiveSize().getRate(period).getLastEventCount() > 0) ||
|
||||
_context.commSystem().isEstablished(_peer) )
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
//if ( (getSendSuccessSize().getRate(period).getCurrentEventCount() > 0) ||
|
||||
// (getSendSuccessSize().getRate(period).getLastEventCount() > 0) ||
|
||||
// (getReceiveSize().getRate(period).getCurrentEventCount() > 0) ||
|
||||
// (getReceiveSize().getRate(period).getLastEventCount() > 0) ||
|
||||
// _context.commSystem().isEstablished(_peer) )
|
||||
// return true;
|
||||
//else
|
||||
// return false;
|
||||
long before = _context.clock().now() - period;
|
||||
return getLastHeardFrom() < before ||
|
||||
getLastSendSuccessful() < before ||
|
||||
_context.commSystem().isEstablished(_peer);
|
||||
}
|
||||
|
||||
|
||||
@ -142,25 +153,31 @@ public class PeerProfile {
|
||||
public long getLastHeardFrom() { return _lastHeardFrom; }
|
||||
public void setLastHeardFrom(long when) { _lastHeardFrom = when; }
|
||||
|
||||
/** history of tunnel activity with the peer */
|
||||
/** history of tunnel activity with the peer
|
||||
Warning - may return null if !getIsExpanded() */
|
||||
public TunnelHistory getTunnelHistory() { return _tunnelHistory; }
|
||||
public void setTunnelHistory(TunnelHistory history) { _tunnelHistory = history; }
|
||||
|
||||
/** history of db activity with the peer */
|
||||
/** history of db activity with the peer
|
||||
Warning - may return null if !getIsExpandedDB() */
|
||||
public DBHistory getDBHistory() { return _dbHistory; }
|
||||
public void setDBHistory(DBHistory hist) { _dbHistory = hist; }
|
||||
|
||||
/** how large successfully sent messages are, calculated over a 1 minute, 1 hour, and 1 day period */
|
||||
public RateStat getSendSuccessSize() { return _sendSuccessSize; }
|
||||
//public RateStat getSendSuccessSize() { return _sendSuccessSize; }
|
||||
/** how large received messages are, calculated over a 1 minute, 1 hour, and 1 day period */
|
||||
public RateStat getReceiveSize() { return _receiveSize; }
|
||||
/** how long it takes to get a db response from the peer (in milliseconds), calculated over a 1 minute, 1 hour, and 1 day period */
|
||||
//public RateStat getReceiveSize() { return _receiveSize; }
|
||||
/** how long it takes to get a db response from the peer (in milliseconds), calculated over a 1 minute, 1 hour, and 1 day period
|
||||
Warning - may return null if !getIsExpandedDB() */
|
||||
public RateStat getDbResponseTime() { return _dbResponseTime; }
|
||||
/** how long it takes to get a tunnel create response from the peer (in milliseconds), calculated over a 1 minute, 1 hour, and 1 day period */
|
||||
/** how long it takes to get a tunnel create response from the peer (in milliseconds), calculated over a 1 minute, 1 hour, and 1 day period
|
||||
Warning - may return null if !getIsExpanded() */
|
||||
public RateStat getTunnelCreateResponseTime() { return _tunnelCreateResponseTime; }
|
||||
/** how long it takes to successfully test a tunnel this peer participates in (in milliseconds), calculated over a 10 minute, 1 hour, and 1 day period */
|
||||
/** how long it takes to successfully test a tunnel this peer participates in (in milliseconds), calculated over a 10 minute, 1 hour, and 1 day period
|
||||
Warning - may return null if !getIsExpanded() */
|
||||
public RateStat getTunnelTestResponseTime() { return _tunnelTestResponseTime; }
|
||||
/** how many new peers we get from dbSearchReplyMessages or dbStore messages, calculated over a 1 hour, 1 day, and 1 week period */
|
||||
/** how many new peers we get from dbSearchReplyMessages or dbStore messages, calculated over a 1 hour, 1 day, and 1 week period
|
||||
Warning - may return null if !getIsExpandedDB() */
|
||||
public RateStat getDbIntroduction() { return _dbIntroduction; }
|
||||
|
||||
/**
|
||||
@ -327,10 +344,12 @@ public class PeerProfile {
|
||||
* extensive stats on them, call this to discard excess data points. Specifically,
|
||||
* this drops the rates, the tunnelHistory, and the dbHistory.
|
||||
*
|
||||
* UNUSED for now, will cause NPEs elsewhere
|
||||
*/
|
||||
/*****
|
||||
public void shrinkProfile() {
|
||||
_sendSuccessSize = null;
|
||||
_receiveSize = null;
|
||||
//_sendSuccessSize = null;
|
||||
//_receiveSize = null;
|
||||
_dbResponseTime = null;
|
||||
_tunnelCreateResponseTime = null;
|
||||
_tunnelTestResponseTime = null;
|
||||
@ -339,7 +358,9 @@ public class PeerProfile {
|
||||
_dbHistory = null;
|
||||
|
||||
_expanded = false;
|
||||
_expandedDB = false;
|
||||
}
|
||||
******/
|
||||
|
||||
/**
|
||||
* When the given peer is performing well enough that we want to keep detailed
|
||||
@ -350,32 +371,43 @@ public class PeerProfile {
|
||||
*/
|
||||
public void expandProfile() {
|
||||
String group = (null == _peer ? "profileUnknown" : _peer.toBase64().substring(0,6));
|
||||
if (_sendSuccessSize == null)
|
||||
_sendSuccessSize = new RateStat("sendSuccessSize", "How large successfully sent messages are", group, new long[] { 5*60*1000l, 60*60*1000l });
|
||||
if (_receiveSize == null)
|
||||
_receiveSize = new RateStat("receiveSize", "How large received messages are", group, new long[] { 5*60*1000l, 60*60*1000l } );
|
||||
if (_dbResponseTime == null)
|
||||
_dbResponseTime = new RateStat("dbResponseTime", "how long it takes to get a db response from the peer (in milliseconds)", group, new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000 } );
|
||||
//if (_sendSuccessSize == null)
|
||||
// _sendSuccessSize = new RateStat("sendSuccessSize", "How large successfully sent messages are", group, new long[] { 5*60*1000l, 60*60*1000l });
|
||||
//if (_receiveSize == null)
|
||||
// _receiveSize = new RateStat("receiveSize", "How large received messages are", group, new long[] { 5*60*1000l, 60*60*1000l } );
|
||||
if (_tunnelCreateResponseTime == null)
|
||||
_tunnelCreateResponseTime = new RateStat("tunnelCreateResponseTime", "how long it takes to get a tunnel create response from the peer (in milliseconds)", group, new long[] { 10*60*1000l, 30*60*1000l, 60*60*1000l, 24*60*60*1000 } );
|
||||
if (_tunnelTestResponseTime == null)
|
||||
_tunnelTestResponseTime = new RateStat("tunnelTestResponseTime", "how long it takes to successfully test a tunnel this peer participates in (in milliseconds)", group, new long[] { 10*60*1000l, 30*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000 } );
|
||||
if (_dbIntroduction == null)
|
||||
_dbIntroduction = new RateStat("dbIntroduction", "how many new peers we get from dbSearchReplyMessages or dbStore messages", group, new long[] { 60*60*1000l, 6*60*60*1000l, 24*60*60*1000l });
|
||||
|
||||
if (_tunnelHistory == null)
|
||||
_tunnelHistory = new TunnelHistory(_context, group);
|
||||
|
||||
//_sendSuccessSize.setStatLog(_context.statManager().getStatLog());
|
||||
//_receiveSize.setStatLog(_context.statManager().getStatLog());
|
||||
_tunnelCreateResponseTime.setStatLog(_context.statManager().getStatLog());
|
||||
_tunnelTestResponseTime.setStatLog(_context.statManager().getStatLog());
|
||||
_expanded = true;
|
||||
}
|
||||
|
||||
/**
|
||||
* For floodfills
|
||||
*/
|
||||
public synchronized void expandDBProfile() {
|
||||
String group = (null == _peer ? "profileUnknown" : _peer.toBase64().substring(0,6));
|
||||
if (_dbResponseTime == null)
|
||||
_dbResponseTime = new RateStat("dbResponseTime", "how long it takes to get a db response from the peer (in milliseconds)", group, new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000 } );
|
||||
if (_dbIntroduction == null)
|
||||
_dbIntroduction = new RateStat("dbIntroduction", "how many new peers we get from dbSearchReplyMessages or dbStore messages", group, new long[] { 60*60*1000l, 6*60*60*1000l, 24*60*60*1000l });
|
||||
|
||||
if (_dbHistory == null)
|
||||
_dbHistory = new DBHistory(_context, group);
|
||||
|
||||
_sendSuccessSize.setStatLog(_context.statManager().getStatLog());
|
||||
_receiveSize.setStatLog(_context.statManager().getStatLog());
|
||||
_dbResponseTime.setStatLog(_context.statManager().getStatLog());
|
||||
_tunnelCreateResponseTime.setStatLog(_context.statManager().getStatLog());
|
||||
_tunnelTestResponseTime.setStatLog(_context.statManager().getStatLog());
|
||||
_dbIntroduction.setStatLog(_context.statManager().getStatLog());
|
||||
_expanded = true;
|
||||
_expandedDB = true;
|
||||
}
|
||||
|
||||
/** once a day, on average, cut the measured throughtput values in half */
|
||||
/** let's try once an hour times 3/4 */
|
||||
private static final int DROP_PERIOD_MINUTES = 60;
|
||||
@ -419,14 +451,16 @@ public class PeerProfile {
|
||||
/** update the stats and rates (this should be called once a minute) */
|
||||
public void coalesceStats() {
|
||||
if (!_expanded) return;
|
||||
_dbIntroduction.coalesceStats();
|
||||
_dbResponseTime.coalesceStats();
|
||||
_receiveSize.coalesceStats();
|
||||
_sendSuccessSize.coalesceStats();
|
||||
//_receiveSize.coalesceStats();
|
||||
//_sendSuccessSize.coalesceStats();
|
||||
_tunnelCreateResponseTime.coalesceStats();
|
||||
_tunnelTestResponseTime.coalesceStats();
|
||||
_dbHistory.coalesceStats();
|
||||
_tunnelHistory.coalesceStats();
|
||||
if (_expandedDB) {
|
||||
_dbIntroduction.coalesceStats();
|
||||
_dbResponseTime.coalesceStats();
|
||||
_dbHistory.coalesceStats();
|
||||
}
|
||||
|
||||
coalesceThroughput();
|
||||
|
||||
|
@ -39,7 +39,7 @@ public class ProfileManagerImpl implements ProfileManager {
|
||||
PeerProfile data = getProfile(peer);
|
||||
if (data == null) return;
|
||||
data.setLastSendSuccessful(_context.clock().now());
|
||||
data.getSendSuccessSize().addData(bytesSent, msToSend);
|
||||
//data.getSendSuccessSize().addData(bytesSent, msToSend);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -169,11 +169,14 @@ public class ProfileManagerImpl implements ProfileManager {
|
||||
/**
|
||||
* Note that the peer was able to return the valid data for a db lookup
|
||||
*
|
||||
* This will force creation of DB stats
|
||||
*/
|
||||
public void dbLookupSuccessful(Hash peer, long responseTimeMs) {
|
||||
PeerProfile data = getProfile(peer);
|
||||
if (data == null) return;
|
||||
data.setLastHeardFrom(_context.clock().now());
|
||||
if (!data.getIsExpandedDB())
|
||||
data.expandDBProfile();
|
||||
data.getDbResponseTime().addData(responseTimeMs, responseTimeMs);
|
||||
DBHistory hist = data.getDBHistory();
|
||||
hist.lookupSuccessful();
|
||||
@ -183,10 +186,13 @@ public class ProfileManagerImpl implements ProfileManager {
|
||||
* Note that the peer was unable to reply to a db lookup - either with data or with
|
||||
* a lookupReply redirecting the user elsewhere
|
||||
*
|
||||
* This will force creation of DB stats
|
||||
*/
|
||||
public void dbLookupFailed(Hash peer) {
|
||||
PeerProfile data = getProfile(peer);
|
||||
if (data == null) return;
|
||||
if (!data.getIsExpandedDB())
|
||||
data.expandDBProfile();
|
||||
DBHistory hist = data.getDBHistory();
|
||||
hist.lookupFailed();
|
||||
}
|
||||
@ -203,6 +209,8 @@ public class ProfileManagerImpl implements ProfileManager {
|
||||
PeerProfile data = getProfile(peer);
|
||||
if (data == null) return;
|
||||
data.setLastHeardFrom(_context.clock().now());
|
||||
if (!data.getIsExpandedDB())
|
||||
return;
|
||||
data.getDbResponseTime().addData(responseTimeMs, responseTimeMs);
|
||||
data.getDbIntroduction().addData(newPeers, responseTimeMs);
|
||||
DBHistory hist = data.getDBHistory();
|
||||
@ -217,6 +225,8 @@ public class ProfileManagerImpl implements ProfileManager {
|
||||
PeerProfile data = getProfile(peer);
|
||||
if (data == null) return;
|
||||
data.setLastHeardFrom(_context.clock().now());
|
||||
if (!data.getIsExpandedDB())
|
||||
return;
|
||||
DBHistory hist = data.getDBHistory();
|
||||
hist.lookupReceived();
|
||||
}
|
||||
@ -229,6 +239,8 @@ public class ProfileManagerImpl implements ProfileManager {
|
||||
PeerProfile data = getProfile(peer);
|
||||
if (data == null) return;
|
||||
data.setLastHeardFrom(_context.clock().now());
|
||||
if (!data.getIsExpandedDB())
|
||||
return;
|
||||
DBHistory hist = data.getDBHistory();
|
||||
hist.unpromptedStoreReceived(wasNewKey);
|
||||
}
|
||||
@ -242,8 +254,10 @@ public class ProfileManagerImpl implements ProfileManager {
|
||||
PeerProfile data = getProfile(peer);
|
||||
if (data == null) return;
|
||||
long now = _context.clock().now();
|
||||
data.setLastSendSuccessful(now);
|
||||
data.setLastHeardFrom(now);
|
||||
if (!data.getIsExpandedDB())
|
||||
return;
|
||||
data.setLastSendSuccessful(now);
|
||||
// we could do things like update some sort of "how many successful stores we've sent them"...
|
||||
// naah.. dont really care now
|
||||
}
|
||||
@ -279,7 +293,7 @@ public class ProfileManagerImpl implements ProfileManager {
|
||||
PeerProfile data = getProfile(peer);
|
||||
if (data == null) return;
|
||||
data.setLastHeardFrom(_context.clock().now());
|
||||
data.getReceiveSize().addData(bytesRead, msToReceive);
|
||||
//data.getReceiveSize().addData(bytesRead, msToReceive);
|
||||
}
|
||||
|
||||
private PeerProfile getProfile(Hash peer) {
|
||||
|
@ -241,7 +241,7 @@ public class ProfileOrganizer {
|
||||
*/
|
||||
public boolean peerSendsBadReplies(Hash peer) {
|
||||
PeerProfile profile = getProfile(peer);
|
||||
if (profile != null) {
|
||||
if (profile != null && profile.getIsExpandedDB()) {
|
||||
RateStat invalidReplyRateStat = profile.getDBHistory().getInvalidReplyRate();
|
||||
Rate invalidReplyRate = invalidReplyRateStat.getRate(30*60*1000l);
|
||||
if ( (invalidReplyRate.getCurrentTotalValue() > MAX_BAD_REPLIES_PER_HOUR) ||
|
||||
|
@ -128,18 +128,20 @@ class ProfilePersistenceHelper {
|
||||
|
||||
out.write(buf.toString().getBytes());
|
||||
|
||||
profile.getTunnelHistory().store(out);
|
||||
profile.getDBHistory().store(out);
|
||||
|
||||
if (profile.getIsExpanded()) {
|
||||
// only write out expanded data if, uh, we've got it
|
||||
profile.getDbIntroduction().store(out, "dbIntroduction");
|
||||
profile.getDbResponseTime().store(out, "dbResponseTime");
|
||||
profile.getReceiveSize().store(out, "receiveSize");
|
||||
profile.getSendSuccessSize().store(out, "sendSuccessSize");
|
||||
profile.getTunnelHistory().store(out);
|
||||
//profile.getReceiveSize().store(out, "receiveSize");
|
||||
//profile.getSendSuccessSize().store(out, "sendSuccessSize");
|
||||
profile.getTunnelCreateResponseTime().store(out, "tunnelCreateResponseTime");
|
||||
profile.getTunnelTestResponseTime().store(out, "tunnelTestResponseTime");
|
||||
}
|
||||
|
||||
if (profile.getIsExpandedDB()) {
|
||||
profile.getDBHistory().store(out);
|
||||
profile.getDbIntroduction().store(out, "dbIntroduction");
|
||||
profile.getDbResponseTime().store(out, "dbResponseTime");
|
||||
}
|
||||
}
|
||||
|
||||
public Set readProfiles() {
|
||||
@ -211,12 +213,22 @@ class ProfilePersistenceHelper {
|
||||
profile.setPeakTunnel1mThroughputKBps(getDouble(props, "tunnelPeakTunnel1mThroughput"));
|
||||
|
||||
profile.getTunnelHistory().load(props);
|
||||
profile.getDBHistory().load(props);
|
||||
|
||||
profile.getDbIntroduction().load(props, "dbIntroduction", true);
|
||||
profile.getDbResponseTime().load(props, "dbResponseTime", true);
|
||||
profile.getReceiveSize().load(props, "receiveSize", true);
|
||||
profile.getSendSuccessSize().load(props, "sendSuccessSize", true);
|
||||
|
||||
// In the interest of keeping the in-memory profiles small,
|
||||
// don't load the DB info at all unless there is something interesting there
|
||||
// (i.e. floodfills)
|
||||
// It seems like we do one or two lookups as a part of handshaking?
|
||||
// Not sure, to be researched.
|
||||
if (getLong(props, "dbHistory.successfulLookups") > 1 ||
|
||||
getLong(props, "dbHistory.failedlLokups") > 1) {
|
||||
profile.expandDBProfile();
|
||||
profile.getDBHistory().load(props);
|
||||
profile.getDbIntroduction().load(props, "dbIntroduction", true);
|
||||
profile.getDbResponseTime().load(props, "dbResponseTime", true);
|
||||
}
|
||||
|
||||
//profile.getReceiveSize().load(props, "receiveSize", true);
|
||||
//profile.getSendSuccessSize().load(props, "sendSuccessSize", true);
|
||||
profile.getTunnelCreateResponseTime().load(props, "tunnelCreateResponseTime", true);
|
||||
profile.getTunnelTestResponseTime().load(props, "tunnelTestResponseTime", true);
|
||||
|
||||
|
Reference in New Issue
Block a user