* Profiles:

- Remove the almost-unused send and receive size RateStats;
        this also reduces the effective time for isActive()
      - Only store DB history on-demand to save space;
        fix up resulting NPEs
This commit is contained in:
zzz
2009-11-02 16:50:28 +00:00
parent 7997aeaca5
commit 9976bea03f
8 changed files with 144 additions and 72 deletions

View File

@ -105,6 +105,9 @@ class ProfileOrganizerRenderer {
buf.append("<tr><td align=\"center\" nowrap>"); buf.append("<tr><td align=\"center\" nowrap>");
buf.append(_context.commSystem().renderPeerHTML(peer)); buf.append(_context.commSystem().renderPeerHTML(peer));
// debug
//if(prof.getIsExpandedDB())
// buf.append(" ** ");
buf.append("</td><td align=\"center\">"); buf.append("</td><td align=\"center\">");
switch (tier) { switch (tier) {
@ -156,7 +159,8 @@ class ProfileOrganizerRenderer {
buf.append(' ').append(fails).append('/').append(total).append(" ").append(_("Test Fails")); buf.append(' ').append(fails).append('/').append(total).append(" ").append(_("Test Fails"));
} }
buf.append("&nbsp;</td>"); buf.append("&nbsp;</td>");
buf.append("<td nowrap align=\"center\"><a target=\"_blank\" href=\"dumpprofile.jsp?peer=").append(peer.toBase64().substring(0,6)).append("\">profile</a>"); buf.append("<td nowrap align=\"center\"><a target=\"_blank\" href=\"dumpprofile.jsp?peer=")
.append(peer.toBase64().substring(0,6)).append("\">").append(_("profile")).append("</a>");
buf.append("&nbsp;<a href=\"configpeer.jsp?peer=").append(peer.toBase64()).append("\">+-</a></td>\n"); buf.append("&nbsp;<a href=\"configpeer.jsp?peer=").append(peer.toBase64()).append("\">+-</a></td>\n");
buf.append("</tr>"); buf.append("</tr>");
// let's not build the whole page in memory (~500 bytes per peer) // let's not build the whole page in memory (~500 bytes per peer)
@ -223,6 +227,8 @@ class ProfileOrganizerRenderer {
buf.append("<td align=\"right\">").append(dbh.getUnpromptedDbStoreOld()).append("</td>"); buf.append("<td align=\"right\">").append(dbh.getUnpromptedDbStoreOld()).append("</td>");
buf.append("<td align=\"right\">").append(davg(dbh, 60*60*1000l)).append("</td>"); buf.append("<td align=\"right\">").append(davg(dbh, 60*60*1000l)).append("</td>");
buf.append("<td align=\"right\">").append(davg(dbh, 24*60*60*1000l)).append("</td>"); buf.append("<td align=\"right\">").append(davg(dbh, 24*60*60*1000l)).append("</td>");
} else {
buf.append("<td align=\"right\">n/a<td align=\"right\">n/a<td align=\"right\">n/a<td align=\"right\">n/a<td align=\"right\">n/a<td align=\"right\">n/a");
} }
} }
buf.append("</table>"); buf.append("</table>");

View File

@ -891,12 +891,13 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
public int getPeerTimeout(Hash peer) { public int getPeerTimeout(Hash peer) {
PeerProfile prof = _context.profileOrganizer().getProfile(peer); PeerProfile prof = _context.profileOrganizer().getProfile(peer);
double responseTime = MAX_PER_PEER_TIMEOUT; double responseTime = MAX_PER_PEER_TIMEOUT;
if (prof != null) if (prof != null && prof.getIsExpandedDB()) {
responseTime = prof.getDbResponseTime().getLifetimeAverageValue(); responseTime = prof.getDbResponseTime().getLifetimeAverageValue();
if (responseTime < MIN_PER_PEER_TIMEOUT) if (responseTime < MIN_PER_PEER_TIMEOUT)
responseTime = MIN_PER_PEER_TIMEOUT; responseTime = MIN_PER_PEER_TIMEOUT;
else if (responseTime > MAX_PER_PEER_TIMEOUT) else if (responseTime > MAX_PER_PEER_TIMEOUT)
responseTime = MAX_PER_PEER_TIMEOUT; responseTime = MAX_PER_PEER_TIMEOUT;
}
return 4 * (int)responseTime; // give it up to 4x the average response time return 4 * (int)responseTime; // give it up to 4x the average response time
} }

View File

@ -174,11 +174,13 @@ class StoreJob extends JobImpl {
_state.addSkipped(peer); _state.addSkipped(peer);
} else { } else {
int peerTimeout = _facade.getPeerTimeout(peer); int peerTimeout = _facade.getPeerTimeout(peer);
PeerProfile prof = getContext().profileOrganizer().getProfile(peer);
if (prof != null) { //PeerProfile prof = getContext().profileOrganizer().getProfile(peer);
RateStat failing = prof.getDBHistory().getFailedLookupRate(); //if (prof != null && prof.getIsExpandedDB()) {
Rate failed = failing.getRate(60*60*1000); // RateStat failing = prof.getDBHistory().getFailedLookupRate();
} // Rate failed = failing.getRate(60*60*1000);
//}
//long failedCount = failed.getCurrentEventCount()+failed.getLastEventCount(); //long failedCount = failed.getCurrentEventCount()+failed.getLastEventCount();
//if (failedCount > 10) { //if (failedCount > 10) {
// _state.addSkipped(peer); // _state.addSkipped(peer);

View File

@ -19,12 +19,15 @@ public class IntegrationCalculator extends Calculator {
@Override @Override
public double calc(PeerProfile profile) { public double calc(PeerProfile profile) {
// give more weight to recent counts long val = 0;
long val = profile.getDbIntroduction().getRate(24*60*60*1000l).getCurrentEventCount(); if (profile.getIsExpandedDB()) {
val += 2 * 4 * profile.getDbIntroduction().getRate(6*60*60*1000l).getLastEventCount(); // give more weight to recent counts
val += 3 * 4 * profile.getDbIntroduction().getRate(6*60*60*1000l).getCurrentEventCount(); val = profile.getDbIntroduction().getRate(24*60*60*1000l).getCurrentEventCount();
val += 4 * 24 * profile.getDbIntroduction().getRate(60*60*1000l).getCurrentEventCount(); val += 2 * 4 * profile.getDbIntroduction().getRate(6*60*60*1000l).getLastEventCount();
val /= 10; val += 3 * 4 * profile.getDbIntroduction().getRate(6*60*60*1000l).getCurrentEventCount();
val += 4 * 24 * profile.getDbIntroduction().getRate(60*60*1000l).getCurrentEventCount();
val /= 10;
}
val += profile.getIntegrationBonus(); val += profile.getIntegrationBonus();
return val; return val;
} }

View File

@ -36,8 +36,8 @@ public class PeerProfile {
private long _lastHeardFrom; private long _lastHeardFrom;
private double _tunnelTestResponseTimeAvg; private double _tunnelTestResponseTimeAvg;
// periodic rates // periodic rates
private RateStat _sendSuccessSize = null; //private RateStat _sendSuccessSize = null;
private RateStat _receiveSize = null; //private RateStat _receiveSize = null;
private RateStat _dbResponseTime = null; private RateStat _dbResponseTime = null;
private RateStat _tunnelCreateResponseTime = null; private RateStat _tunnelCreateResponseTime = null;
private RateStat _tunnelTestResponseTime = null; private RateStat _tunnelTestResponseTime = null;
@ -56,6 +56,7 @@ public class PeerProfile {
private DBHistory _dbHistory; private DBHistory _dbHistory;
// does this peer profile contain expanded data, or just the basics? // does this peer profile contain expanded data, or just the basics?
private boolean _expanded; private boolean _expanded;
private boolean _expandedDB;
private int _consecutiveShitlists; private int _consecutiveShitlists;
public PeerProfile(RouterContext context, Hash peer) { public PeerProfile(RouterContext context, Hash peer) {
@ -72,6 +73,8 @@ public class PeerProfile {
_consecutiveShitlists = 0; _consecutiveShitlists = 0;
_tunnelTestResponseTimeAvg = 0.0d; _tunnelTestResponseTimeAvg = 0.0d;
_peer = peer; _peer = peer;
// this is always true, and there are several places in the router that will NPE
// if it is false, so all need to be fixed before we can have non-expanded profiles
if (expand) if (expand)
expandProfile(); expandProfile();
} }
@ -87,6 +90,7 @@ public class PeerProfile {
* *
*/ */
public boolean getIsExpanded() { return _expanded; } public boolean getIsExpanded() { return _expanded; }
public boolean getIsExpandedDB() { return _expandedDB; }
public int incrementShitlists() { return _consecutiveShitlists++; } public int incrementShitlists() { return _consecutiveShitlists++; }
public void unshitlist() { _consecutiveShitlists = 0; } public void unshitlist() { _consecutiveShitlists = 0; }
@ -107,18 +111,25 @@ public class PeerProfile {
* *
* Note: this appears to be the only use for these two RateStats. * Note: this appears to be the only use for these two RateStats.
* *
* Update: Rewritten so we can get rid of the two RateStats.
* This also helps by not having it depend on coalesce boundaries.
*
* @param period must be one of the periods in the RateStat constructors below * @param period must be one of the periods in the RateStat constructors below
* (5*60*1000 or 60*60*1000) * (5*60*1000 or 60*60*1000)
*/ */
public boolean getIsActive(long period) { public boolean getIsActive(long period) {
if ( (getSendSuccessSize().getRate(period).getCurrentEventCount() > 0) || //if ( (getSendSuccessSize().getRate(period).getCurrentEventCount() > 0) ||
(getSendSuccessSize().getRate(period).getLastEventCount() > 0) || // (getSendSuccessSize().getRate(period).getLastEventCount() > 0) ||
(getReceiveSize().getRate(period).getCurrentEventCount() > 0) || // (getReceiveSize().getRate(period).getCurrentEventCount() > 0) ||
(getReceiveSize().getRate(period).getLastEventCount() > 0) || // (getReceiveSize().getRate(period).getLastEventCount() > 0) ||
_context.commSystem().isEstablished(_peer) ) // _context.commSystem().isEstablished(_peer) )
return true; // return true;
else //else
return false; // return false;
long before = _context.clock().now() - period;
return getLastHeardFrom() < before ||
getLastSendSuccessful() < before ||
_context.commSystem().isEstablished(_peer);
} }
@ -142,25 +153,31 @@ public class PeerProfile {
public long getLastHeardFrom() { return _lastHeardFrom; } public long getLastHeardFrom() { return _lastHeardFrom; }
public void setLastHeardFrom(long when) { _lastHeardFrom = when; } public void setLastHeardFrom(long when) { _lastHeardFrom = when; }
/** history of tunnel activity with the peer */ /** history of tunnel activity with the peer
Warning - may return null if !getIsExpanded() */
public TunnelHistory getTunnelHistory() { return _tunnelHistory; } public TunnelHistory getTunnelHistory() { return _tunnelHistory; }
public void setTunnelHistory(TunnelHistory history) { _tunnelHistory = history; } public void setTunnelHistory(TunnelHistory history) { _tunnelHistory = history; }
/** history of db activity with the peer */ /** history of db activity with the peer
Warning - may return null if !getIsExpandedDB() */
public DBHistory getDBHistory() { return _dbHistory; } public DBHistory getDBHistory() { return _dbHistory; }
public void setDBHistory(DBHistory hist) { _dbHistory = hist; } public void setDBHistory(DBHistory hist) { _dbHistory = hist; }
/** how large successfully sent messages are, calculated over a 1 minute, 1 hour, and 1 day period */ /** how large successfully sent messages are, calculated over a 1 minute, 1 hour, and 1 day period */
public RateStat getSendSuccessSize() { return _sendSuccessSize; } //public RateStat getSendSuccessSize() { return _sendSuccessSize; }
/** how large received messages are, calculated over a 1 minute, 1 hour, and 1 day period */ /** how large received messages are, calculated over a 1 minute, 1 hour, and 1 day period */
public RateStat getReceiveSize() { return _receiveSize; } //public RateStat getReceiveSize() { return _receiveSize; }
/** how long it takes to get a db response from the peer (in milliseconds), calculated over a 1 minute, 1 hour, and 1 day period */ /** how long it takes to get a db response from the peer (in milliseconds), calculated over a 1 minute, 1 hour, and 1 day period
Warning - may return null if !getIsExpandedDB() */
public RateStat getDbResponseTime() { return _dbResponseTime; } public RateStat getDbResponseTime() { return _dbResponseTime; }
/** how long it takes to get a tunnel create response from the peer (in milliseconds), calculated over a 1 minute, 1 hour, and 1 day period */ /** how long it takes to get a tunnel create response from the peer (in milliseconds), calculated over a 1 minute, 1 hour, and 1 day period
Warning - may return null if !getIsExpanded() */
public RateStat getTunnelCreateResponseTime() { return _tunnelCreateResponseTime; } public RateStat getTunnelCreateResponseTime() { return _tunnelCreateResponseTime; }
/** how long it takes to successfully test a tunnel this peer participates in (in milliseconds), calculated over a 10 minute, 1 hour, and 1 day period */ /** how long it takes to successfully test a tunnel this peer participates in (in milliseconds), calculated over a 10 minute, 1 hour, and 1 day period
Warning - may return null if !getIsExpanded() */
public RateStat getTunnelTestResponseTime() { return _tunnelTestResponseTime; } public RateStat getTunnelTestResponseTime() { return _tunnelTestResponseTime; }
/** how many new peers we get from dbSearchReplyMessages or dbStore messages, calculated over a 1 hour, 1 day, and 1 week period */ /** how many new peers we get from dbSearchReplyMessages or dbStore messages, calculated over a 1 hour, 1 day, and 1 week period
Warning - may return null if !getIsExpandedDB() */
public RateStat getDbIntroduction() { return _dbIntroduction; } public RateStat getDbIntroduction() { return _dbIntroduction; }
/** /**
@ -327,10 +344,12 @@ public class PeerProfile {
* extensive stats on them, call this to discard excess data points. Specifically, * extensive stats on them, call this to discard excess data points. Specifically,
* this drops the rates, the tunnelHistory, and the dbHistory. * this drops the rates, the tunnelHistory, and the dbHistory.
* *
* UNUSED for now, will cause NPEs elsewhere
*/ */
/*****
public void shrinkProfile() { public void shrinkProfile() {
_sendSuccessSize = null; //_sendSuccessSize = null;
_receiveSize = null; //_receiveSize = null;
_dbResponseTime = null; _dbResponseTime = null;
_tunnelCreateResponseTime = null; _tunnelCreateResponseTime = null;
_tunnelTestResponseTime = null; _tunnelTestResponseTime = null;
@ -339,7 +358,9 @@ public class PeerProfile {
_dbHistory = null; _dbHistory = null;
_expanded = false; _expanded = false;
_expandedDB = false;
} }
******/
/** /**
* When the given peer is performing well enough that we want to keep detailed * When the given peer is performing well enough that we want to keep detailed
@ -350,32 +371,43 @@ public class PeerProfile {
*/ */
public void expandProfile() { public void expandProfile() {
String group = (null == _peer ? "profileUnknown" : _peer.toBase64().substring(0,6)); String group = (null == _peer ? "profileUnknown" : _peer.toBase64().substring(0,6));
if (_sendSuccessSize == null) //if (_sendSuccessSize == null)
_sendSuccessSize = new RateStat("sendSuccessSize", "How large successfully sent messages are", group, new long[] { 5*60*1000l, 60*60*1000l }); // _sendSuccessSize = new RateStat("sendSuccessSize", "How large successfully sent messages are", group, new long[] { 5*60*1000l, 60*60*1000l });
if (_receiveSize == null) //if (_receiveSize == null)
_receiveSize = new RateStat("receiveSize", "How large received messages are", group, new long[] { 5*60*1000l, 60*60*1000l } ); // _receiveSize = new RateStat("receiveSize", "How large received messages are", group, new long[] { 5*60*1000l, 60*60*1000l } );
if (_dbResponseTime == null)
_dbResponseTime = new RateStat("dbResponseTime", "how long it takes to get a db response from the peer (in milliseconds)", group, new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000 } );
if (_tunnelCreateResponseTime == null) if (_tunnelCreateResponseTime == null)
_tunnelCreateResponseTime = new RateStat("tunnelCreateResponseTime", "how long it takes to get a tunnel create response from the peer (in milliseconds)", group, new long[] { 10*60*1000l, 30*60*1000l, 60*60*1000l, 24*60*60*1000 } ); _tunnelCreateResponseTime = new RateStat("tunnelCreateResponseTime", "how long it takes to get a tunnel create response from the peer (in milliseconds)", group, new long[] { 10*60*1000l, 30*60*1000l, 60*60*1000l, 24*60*60*1000 } );
if (_tunnelTestResponseTime == null) if (_tunnelTestResponseTime == null)
_tunnelTestResponseTime = new RateStat("tunnelTestResponseTime", "how long it takes to successfully test a tunnel this peer participates in (in milliseconds)", group, new long[] { 10*60*1000l, 30*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000 } ); _tunnelTestResponseTime = new RateStat("tunnelTestResponseTime", "how long it takes to successfully test a tunnel this peer participates in (in milliseconds)", group, new long[] { 10*60*1000l, 30*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000 } );
if (_dbIntroduction == null)
_dbIntroduction = new RateStat("dbIntroduction", "how many new peers we get from dbSearchReplyMessages or dbStore messages", group, new long[] { 60*60*1000l, 6*60*60*1000l, 24*60*60*1000l });
if (_tunnelHistory == null) if (_tunnelHistory == null)
_tunnelHistory = new TunnelHistory(_context, group); _tunnelHistory = new TunnelHistory(_context, group);
//_sendSuccessSize.setStatLog(_context.statManager().getStatLog());
//_receiveSize.setStatLog(_context.statManager().getStatLog());
_tunnelCreateResponseTime.setStatLog(_context.statManager().getStatLog());
_tunnelTestResponseTime.setStatLog(_context.statManager().getStatLog());
_expanded = true;
}
/**
* For floodfills
*/
public synchronized void expandDBProfile() {
String group = (null == _peer ? "profileUnknown" : _peer.toBase64().substring(0,6));
if (_dbResponseTime == null)
_dbResponseTime = new RateStat("dbResponseTime", "how long it takes to get a db response from the peer (in milliseconds)", group, new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000 } );
if (_dbIntroduction == null)
_dbIntroduction = new RateStat("dbIntroduction", "how many new peers we get from dbSearchReplyMessages or dbStore messages", group, new long[] { 60*60*1000l, 6*60*60*1000l, 24*60*60*1000l });
if (_dbHistory == null) if (_dbHistory == null)
_dbHistory = new DBHistory(_context, group); _dbHistory = new DBHistory(_context, group);
_sendSuccessSize.setStatLog(_context.statManager().getStatLog());
_receiveSize.setStatLog(_context.statManager().getStatLog());
_dbResponseTime.setStatLog(_context.statManager().getStatLog()); _dbResponseTime.setStatLog(_context.statManager().getStatLog());
_tunnelCreateResponseTime.setStatLog(_context.statManager().getStatLog());
_tunnelTestResponseTime.setStatLog(_context.statManager().getStatLog());
_dbIntroduction.setStatLog(_context.statManager().getStatLog()); _dbIntroduction.setStatLog(_context.statManager().getStatLog());
_expanded = true; _expandedDB = true;
} }
/** once a day, on average, cut the measured throughtput values in half */ /** once a day, on average, cut the measured throughtput values in half */
/** let's try once an hour times 3/4 */ /** let's try once an hour times 3/4 */
private static final int DROP_PERIOD_MINUTES = 60; private static final int DROP_PERIOD_MINUTES = 60;
@ -419,14 +451,16 @@ public class PeerProfile {
/** update the stats and rates (this should be called once a minute) */ /** update the stats and rates (this should be called once a minute) */
public void coalesceStats() { public void coalesceStats() {
if (!_expanded) return; if (!_expanded) return;
_dbIntroduction.coalesceStats(); //_receiveSize.coalesceStats();
_dbResponseTime.coalesceStats(); //_sendSuccessSize.coalesceStats();
_receiveSize.coalesceStats();
_sendSuccessSize.coalesceStats();
_tunnelCreateResponseTime.coalesceStats(); _tunnelCreateResponseTime.coalesceStats();
_tunnelTestResponseTime.coalesceStats(); _tunnelTestResponseTime.coalesceStats();
_dbHistory.coalesceStats();
_tunnelHistory.coalesceStats(); _tunnelHistory.coalesceStats();
if (_expandedDB) {
_dbIntroduction.coalesceStats();
_dbResponseTime.coalesceStats();
_dbHistory.coalesceStats();
}
coalesceThroughput(); coalesceThroughput();

View File

@ -39,7 +39,7 @@ public class ProfileManagerImpl implements ProfileManager {
PeerProfile data = getProfile(peer); PeerProfile data = getProfile(peer);
if (data == null) return; if (data == null) return;
data.setLastSendSuccessful(_context.clock().now()); data.setLastSendSuccessful(_context.clock().now());
data.getSendSuccessSize().addData(bytesSent, msToSend); //data.getSendSuccessSize().addData(bytesSent, msToSend);
} }
/** /**
@ -169,11 +169,14 @@ public class ProfileManagerImpl implements ProfileManager {
/** /**
* Note that the peer was able to return the valid data for a db lookup * Note that the peer was able to return the valid data for a db lookup
* *
* This will force creation of DB stats
*/ */
public void dbLookupSuccessful(Hash peer, long responseTimeMs) { public void dbLookupSuccessful(Hash peer, long responseTimeMs) {
PeerProfile data = getProfile(peer); PeerProfile data = getProfile(peer);
if (data == null) return; if (data == null) return;
data.setLastHeardFrom(_context.clock().now()); data.setLastHeardFrom(_context.clock().now());
if (!data.getIsExpandedDB())
data.expandDBProfile();
data.getDbResponseTime().addData(responseTimeMs, responseTimeMs); data.getDbResponseTime().addData(responseTimeMs, responseTimeMs);
DBHistory hist = data.getDBHistory(); DBHistory hist = data.getDBHistory();
hist.lookupSuccessful(); hist.lookupSuccessful();
@ -183,10 +186,13 @@ public class ProfileManagerImpl implements ProfileManager {
* Note that the peer was unable to reply to a db lookup - either with data or with * Note that the peer was unable to reply to a db lookup - either with data or with
* a lookupReply redirecting the user elsewhere * a lookupReply redirecting the user elsewhere
* *
* This will force creation of DB stats
*/ */
public void dbLookupFailed(Hash peer) { public void dbLookupFailed(Hash peer) {
PeerProfile data = getProfile(peer); PeerProfile data = getProfile(peer);
if (data == null) return; if (data == null) return;
if (!data.getIsExpandedDB())
data.expandDBProfile();
DBHistory hist = data.getDBHistory(); DBHistory hist = data.getDBHistory();
hist.lookupFailed(); hist.lookupFailed();
} }
@ -203,6 +209,8 @@ public class ProfileManagerImpl implements ProfileManager {
PeerProfile data = getProfile(peer); PeerProfile data = getProfile(peer);
if (data == null) return; if (data == null) return;
data.setLastHeardFrom(_context.clock().now()); data.setLastHeardFrom(_context.clock().now());
if (!data.getIsExpandedDB())
return;
data.getDbResponseTime().addData(responseTimeMs, responseTimeMs); data.getDbResponseTime().addData(responseTimeMs, responseTimeMs);
data.getDbIntroduction().addData(newPeers, responseTimeMs); data.getDbIntroduction().addData(newPeers, responseTimeMs);
DBHistory hist = data.getDBHistory(); DBHistory hist = data.getDBHistory();
@ -217,6 +225,8 @@ public class ProfileManagerImpl implements ProfileManager {
PeerProfile data = getProfile(peer); PeerProfile data = getProfile(peer);
if (data == null) return; if (data == null) return;
data.setLastHeardFrom(_context.clock().now()); data.setLastHeardFrom(_context.clock().now());
if (!data.getIsExpandedDB())
return;
DBHistory hist = data.getDBHistory(); DBHistory hist = data.getDBHistory();
hist.lookupReceived(); hist.lookupReceived();
} }
@ -229,6 +239,8 @@ public class ProfileManagerImpl implements ProfileManager {
PeerProfile data = getProfile(peer); PeerProfile data = getProfile(peer);
if (data == null) return; if (data == null) return;
data.setLastHeardFrom(_context.clock().now()); data.setLastHeardFrom(_context.clock().now());
if (!data.getIsExpandedDB())
return;
DBHistory hist = data.getDBHistory(); DBHistory hist = data.getDBHistory();
hist.unpromptedStoreReceived(wasNewKey); hist.unpromptedStoreReceived(wasNewKey);
} }
@ -242,8 +254,10 @@ public class ProfileManagerImpl implements ProfileManager {
PeerProfile data = getProfile(peer); PeerProfile data = getProfile(peer);
if (data == null) return; if (data == null) return;
long now = _context.clock().now(); long now = _context.clock().now();
data.setLastSendSuccessful(now);
data.setLastHeardFrom(now); data.setLastHeardFrom(now);
if (!data.getIsExpandedDB())
return;
data.setLastSendSuccessful(now);
// we could do things like update some sort of "how many successful stores we've sent them"... // we could do things like update some sort of "how many successful stores we've sent them"...
// naah.. dont really care now // naah.. dont really care now
} }
@ -279,7 +293,7 @@ public class ProfileManagerImpl implements ProfileManager {
PeerProfile data = getProfile(peer); PeerProfile data = getProfile(peer);
if (data == null) return; if (data == null) return;
data.setLastHeardFrom(_context.clock().now()); data.setLastHeardFrom(_context.clock().now());
data.getReceiveSize().addData(bytesRead, msToReceive); //data.getReceiveSize().addData(bytesRead, msToReceive);
} }
private PeerProfile getProfile(Hash peer) { private PeerProfile getProfile(Hash peer) {

View File

@ -241,7 +241,7 @@ public class ProfileOrganizer {
*/ */
public boolean peerSendsBadReplies(Hash peer) { public boolean peerSendsBadReplies(Hash peer) {
PeerProfile profile = getProfile(peer); PeerProfile profile = getProfile(peer);
if (profile != null) { if (profile != null && profile.getIsExpandedDB()) {
RateStat invalidReplyRateStat = profile.getDBHistory().getInvalidReplyRate(); RateStat invalidReplyRateStat = profile.getDBHistory().getInvalidReplyRate();
Rate invalidReplyRate = invalidReplyRateStat.getRate(30*60*1000l); Rate invalidReplyRate = invalidReplyRateStat.getRate(30*60*1000l);
if ( (invalidReplyRate.getCurrentTotalValue() > MAX_BAD_REPLIES_PER_HOUR) || if ( (invalidReplyRate.getCurrentTotalValue() > MAX_BAD_REPLIES_PER_HOUR) ||

View File

@ -128,18 +128,20 @@ class ProfilePersistenceHelper {
out.write(buf.toString().getBytes()); out.write(buf.toString().getBytes());
profile.getTunnelHistory().store(out);
profile.getDBHistory().store(out);
if (profile.getIsExpanded()) { if (profile.getIsExpanded()) {
// only write out expanded data if, uh, we've got it // only write out expanded data if, uh, we've got it
profile.getDbIntroduction().store(out, "dbIntroduction"); profile.getTunnelHistory().store(out);
profile.getDbResponseTime().store(out, "dbResponseTime"); //profile.getReceiveSize().store(out, "receiveSize");
profile.getReceiveSize().store(out, "receiveSize"); //profile.getSendSuccessSize().store(out, "sendSuccessSize");
profile.getSendSuccessSize().store(out, "sendSuccessSize");
profile.getTunnelCreateResponseTime().store(out, "tunnelCreateResponseTime"); profile.getTunnelCreateResponseTime().store(out, "tunnelCreateResponseTime");
profile.getTunnelTestResponseTime().store(out, "tunnelTestResponseTime"); profile.getTunnelTestResponseTime().store(out, "tunnelTestResponseTime");
} }
if (profile.getIsExpandedDB()) {
profile.getDBHistory().store(out);
profile.getDbIntroduction().store(out, "dbIntroduction");
profile.getDbResponseTime().store(out, "dbResponseTime");
}
} }
public Set readProfiles() { public Set readProfiles() {
@ -211,12 +213,22 @@ class ProfilePersistenceHelper {
profile.setPeakTunnel1mThroughputKBps(getDouble(props, "tunnelPeakTunnel1mThroughput")); profile.setPeakTunnel1mThroughputKBps(getDouble(props, "tunnelPeakTunnel1mThroughput"));
profile.getTunnelHistory().load(props); profile.getTunnelHistory().load(props);
profile.getDBHistory().load(props);
// In the interest of keeping the in-memory profiles small,
profile.getDbIntroduction().load(props, "dbIntroduction", true); // don't load the DB info at all unless there is something interesting there
profile.getDbResponseTime().load(props, "dbResponseTime", true); // (i.e. floodfills)
profile.getReceiveSize().load(props, "receiveSize", true); // It seems like we do one or two lookups as a part of handshaking?
profile.getSendSuccessSize().load(props, "sendSuccessSize", true); // Not sure, to be researched.
if (getLong(props, "dbHistory.successfulLookups") > 1 ||
getLong(props, "dbHistory.failedlLokups") > 1) {
profile.expandDBProfile();
profile.getDBHistory().load(props);
profile.getDbIntroduction().load(props, "dbIntroduction", true);
profile.getDbResponseTime().load(props, "dbResponseTime", true);
}
//profile.getReceiveSize().load(props, "receiveSize", true);
//profile.getSendSuccessSize().load(props, "sendSuccessSize", true);
profile.getTunnelCreateResponseTime().load(props, "tunnelCreateResponseTime", true); profile.getTunnelCreateResponseTime().load(props, "tunnelCreateResponseTime", true);
profile.getTunnelTestResponseTime().load(props, "tunnelTestResponseTime", true); profile.getTunnelTestResponseTime().load(props, "tunnelTestResponseTime", true);