Console: Fix NPE on /profiles

Profiles:
  - Fix first heard about to be earliest, undeprecate
  - Fixup first heard about at profile readin
  - Persist good/bad lookup/store DBHistory stats added in 0.7.8
  - Remove unused DBHistory methods and fields to save memory
  - Change bonus longs to ints to save memory
  - Extend profile expiration time from 3 days to 15
  - Consolidate getLong()
  - Synch fixes
Sybil tool: Tweaks and enhancements
This commit is contained in:
zzz
2015-12-04 20:35:38 +00:00
parent cab69f6583
commit 68c312139e
10 changed files with 290 additions and 147 deletions

View File

@ -42,12 +42,12 @@ public class ConfigPeerHandler extends FormHandler {
PeerProfile prof = _context.profileOrganizer().getProfile(h);
if (prof != null) {
try {
prof.setSpeedBonus(Long.parseLong(_speed));
prof.setSpeedBonus(Integer.parseInt(_speed));
} catch (NumberFormatException nfe) {
addFormError(_t("Bad speed value"));
}
try {
prof.setCapacityBonus(Long.parseLong(_capacity));
prof.setCapacityBonus(Integer.parseInt(_capacity));
} catch (NumberFormatException nfe) {
addFormError(_t("Bad capacity value"));
}

View File

@ -48,6 +48,8 @@ class ProfileOrganizerRenderer {
for (Hash peer : peers) {
if (_organizer.getUs().equals(peer)) continue;
PeerProfile prof = _organizer.getProfile(peer);
if (prof == null)
continue;
if (mode == 2) {
RouterInfo info = _context.netDb().lookupRouterInfoLocally(peer);
if (info != null && info.getCapabilities().indexOf("f") >= 0)

View File

@ -45,10 +45,12 @@ class SybilRenderer {
private static final int PAIRMAX = 10;
private static final int MAX = 10;
// multiplied by size - 1, will also get POINTS24 added
private static final double POINTS32 = 10.0;
// multiplied by size - 1, will also get POINTS16 added
private static final double POINTS24 = 10.0;
// multiplied by size - 1
private static final double POINTS32 = 15.0;
// multiplied by size - 1
private static final double POINTS24 = 2.0;
private static final double POINTS16 = 0.25;
private static final double MIN_CLOSE = 242.0;
private static final double MIN_DISPLAY_POINTS = 3.0;
@ -75,6 +77,9 @@ class SybilRenderer {
}
}
/**
* A total score and a List of reason Strings
*/
private static class Points implements Comparable<Points> {
private double points;
private final List<String> reasons;
@ -106,12 +111,13 @@ class SybilRenderer {
}
private static void addPoints(Map<Hash, Points> points, Hash h, double d, String reason) {
DecimalFormat fmt = new DecimalFormat("#0.00");
Points dd = points.get(h);
if (dd != null) {
dd.points += d;
dd.reasons.add(reason);
dd.reasons.add("<b>" + fmt.format(d) + ":</b> " + reason);
} else {
points.put(h, new Points(d, reason));
points.put(h, new Points(d, "<b>" + fmt.format(d) + ":</b> " + reason));
}
}
@ -163,18 +169,23 @@ class SybilRenderer {
buf.append("</p>");
Map<Hash, Points> points = new HashMap<Hash, Points>(64);
// IP analysis
renderIPGroups32(out, buf, ris, points);
renderIPGroups24(out, buf, ris, points);
renderIPGroups16(out, buf, ris);
renderIPGroups16(out, buf, ris, points);
// Pairwise distance analysis
renderPairDistance(out, buf, ris, points);
// Distance to our router analysis
buf.append("<h3>Closest Floodfills to Our Routing Key (Where we Store our RI)</h3>");
renderRouterInfoHTML(out, buf, ourRKey, avgMinDist, ris, points);
buf.append("<h3>Closest Floodfills to Our Router Hash (DHT Neighbors if we are Floodfill)</h3>");
renderRouterInfoHTML(out, buf, us, avgMinDist, ris, points);
// Distance to our published destinations analysis
Map<Hash, TunnelPool> clientInboundPools = _context.tunnelManager().getInboundClientPools();
List<Hash> destinations = new ArrayList<Hash>(clientInboundPools.keySet());
boolean debug = _context.getBooleanProperty(HelperBase.PROP_ADVANCED);
@ -194,6 +205,8 @@ class SybilRenderer {
renderRouterInfoHTML(out, buf, rkey, avgMinDist, ris, points);
}
// TODO Profile analysis
if (!points.isEmpty()) {
List<Hash> warns = new ArrayList<Hash>(points.keySet());
Collections.sort(warns, new PointsComparator(points));
@ -260,8 +273,10 @@ class SybilRenderer {
renderRouterInfo(buf, p.r2, null, false, false);
double point = MIN_CLOSE - distance;
if (point > 0) {
addPoints(points, p.r1.getHash(), point, fmt.format(point) + ": Too close to other floodfill " + p.r2.getHash().toBase64());
addPoints(points, p.r2.getHash(), point, fmt.format(point) + ": Too close to other floodfill " + p.r1.getHash().toBase64());
addPoints(points, p.r1.getHash(), point, "Very close (" + fmt.format(distance) +
") to other floodfill " + p.r2.getHash().toBase64());
addPoints(points, p.r2.getHash(), point, "Very close (" + fmt.format(distance) +
") to other floodfill " + p.r1.getHash().toBase64());
}
}
out.write(buf.toString());
@ -347,7 +362,7 @@ class SybilRenderer {
found = true;
renderRouterInfo(buf, info, null, false, false);
double point = POINTS32 * (count - 1);
addPoints(points, info.getHash(), point, fmt.format(point) + ": Same IP with " + (count - 1) + " other");
addPoints(points, info.getHash(), point, "Same IP with " + (count - 1) + " other");
}
}
if (!found)
@ -396,7 +411,7 @@ class SybilRenderer {
continue;
renderRouterInfo(buf, info, null, false, false);
double point = POINTS24 * (count - 1);
addPoints(points, info.getHash(), point, fmt.format(point) + ": Same /24 IP with " + (count - 1) + " other");
addPoints(points, info.getHash(), point, "Same /24 IP with " + (count - 1) + " other");
}
}
out.write(buf.toString());
@ -404,8 +419,7 @@ class SybilRenderer {
buf.setLength(0);
}
/** no points */
private void renderIPGroups16(Writer out, StringBuilder buf, List<RouterInfo> ris) throws IOException {
private void renderIPGroups16(Writer out, StringBuilder buf, List<RouterInfo> ris, Map<Hash, Points> points) throws IOException {
buf.append("<h3>Floodfills in the Same /16 (4 minimum)</h3>");
int sz = ris.size();
ObjectCounter<Integer> oc = new ObjectCounter<Integer>();
@ -423,6 +437,7 @@ class SybilRenderer {
foo.add(ii);
}
Collections.sort(foo, new FooComparator(oc));
DecimalFormat fmt = new DecimalFormat("#0.00");
for (Integer ii : foo) {
int count = oc.count(ii);
int i = ii.intValue();
@ -439,6 +454,8 @@ class SybilRenderer {
if ((ip[1] & 0xff) != i1)
continue;
renderRouterInfo(buf, info, null, false, false);
double point = POINTS16 * (count - 1);
addPoints(points, info.getHash(), point, "Same /16 IP with " + (count - 1) + " other");
}
}
out.write(buf.toString());
@ -484,7 +501,7 @@ class SybilRenderer {
double point = MIN_CLOSE - dist;
if (point > 0) {
point *= 2.0;
addPoints(points, ri.getHash(), point, fmt.format(point) + ": Too close to our key " + us.toBase64());
addPoints(points, ri.getHash(), point, "Very close (" + fmt.format(dist) + ") to our key " + us.toBase64());
}
if (i >= MAX - 1)
break;
@ -530,7 +547,7 @@ class SybilRenderer {
if (isUs) {
buf.append("<a name=\"our-info\" ></a><b>" + _t("Our info") + ": ").append(hash).append("</b></th></tr><tr><td>\n");
} else {
buf.append("<b>" + _t("Peer info for") + ":</b> ").append(hash).append("\n");
buf.append("<b>" + _t("Router") + ":</b> ").append(hash).append("\n");
if (!full) {
buf.append("[<a href=\"netdb?r=").append(hash.substring(0, 6)).append("\" >").append(_t("Full entry")).append("</a>]");
}
@ -558,14 +575,21 @@ class SybilRenderer {
if (prof != null) {
long heard = prof.getFirstHeardAbout();
if (heard > 0) {
long age = now - heard;
if (age > 0) {
buf.append("<b>First heard about:</b> ")
.append(_t("{0} ago", DataHelper.formatDuration2(age))).append("<br>\n");
} else {
// shouldnt happen
buf.append("<b>First heard about:</b> in ").append(DataHelper.formatDuration2(0-age)).append("???<br>\n");
}
long age = Math.max(now - heard, 1);
buf.append("<b>First heard about:</b> ")
.append(_t("{0} ago", DataHelper.formatDuration2(age))).append("<br>\n");
}
heard = prof.getLastHeardAbout();
if (heard > 0) {
long age = Math.max(now - heard, 1);
buf.append("<b>Last heard about:</b> ")
.append(_t("{0} ago", DataHelper.formatDuration2(age))).append("<br>\n");
}
heard = prof.getLastHeardFrom();
if (heard > 0) {
long age = Math.max(now - heard, 1);
buf.append("<b>Last heard from:</b> ")
.append(_t("{0} ago", DataHelper.formatDuration2(age))).append("<br>\n");
}
// any other profile stuff?
}
@ -596,7 +620,8 @@ class SybilRenderer {
Map<Object, Object> p = addr.getOptionsMap();
for (Map.Entry<Object, Object> e : p.entrySet()) {
String name = (String) e.getKey();
if (name.equals("key") || name.startsWith("ikey") || name.startsWith("itag") || name.startsWith("iport"))
if (name.equals("key") || name.startsWith("ikey") || name.startsWith("itag") ||
name.startsWith("iport") || name.equals("mtu"))
continue;
String val = (String) e.getValue();
buf.append('[').append(_t(DataHelper.stripHTML(name))).append('=');

View File

@ -1,3 +1,16 @@
2015-12-04 zzz
* Console: Fix NPE on /profiles
* Profiles:
- Fix first heard about to be earliest, undeprecate
- Fixup first heard about at profile readin
- Persist good/bad lookup/store DBHistory stats added in 0.7.8
- Remove unused DBHistory methods and fields to save memory
- Change bonus longs to ints to save memory
- Extend profile expiration time from 3 days to 15
- Consolidate getLong()
- Synch fixes
* Sybil tool: Tweaks and enhancements
2015-12-03 zzz
* Console: Add experimental Sybil analysis tool
* NetDb: Fix deadlock (ticket #1722)

View File

@ -18,7 +18,7 @@ public class RouterVersion {
/** deprecated */
public final static String ID = "Monotone";
public final static String VERSION = CoreVersion.VERSION;
public final static long BUILD = 8;
public final static long BUILD = 9;
/** for example "-test" */
public final static String EXTRA = "";

View File

@ -16,17 +16,17 @@ import net.i2p.util.Log;
public class DBHistory {
private final Log _log;
private final RouterContext _context;
private long _successfulLookups;
private long _failedLookups;
//private long _successfulLookups;
//private long _failedLookups;
private RateStat _failedLookupRate;
private RateStat _invalidReplyRate;
private long _lookupReplyNew;
private long _lookupReplyOld;
private long _lookupReplyDuplicate;
private long _lookupReplyInvalid;
private long _lookupsReceived;
private long _avgDelayBetweenLookupsReceived;
private long _lastLookupReceived;
//private long _lookupReplyNew;
//private long _lookupReplyOld;
//private long _lookupReplyDuplicate;
//private long _lookupReplyInvalid;
//private long _lookupsReceived;
//private long _avgDelayBetweenLookupsReceived;
//private long _lastLookupReceived;
private long _lastLookupSuccessful;
private long _lastLookupFailed;
private long _lastStoreSuccessful;
@ -39,49 +39,68 @@ public class DBHistory {
_context = context;
_log = context.logManager().getLog(DBHistory.class);
_statGroup = statGroup;
_lastLookupReceived = -1;
//_lastLookupReceived = -1;
createRates(statGroup);
}
/** how many times we have sent them a db lookup and received the value back from them
* @deprecated unused
*/
public long getSuccessfulLookups() { return _successfulLookups; }
//public long getSuccessfulLookups() { return _successfulLookups; }
/** how many times we have sent them a db lookup and not received the value or a lookup reply
* @deprecated unused
*/
public long getFailedLookups() { return _failedLookups; }
//public long getFailedLookups() { return _failedLookups; }
/** how many peers that we have never seen before did lookups provide us with?
* @deprecated unused
*/
public long getLookupReplyNew() { return _lookupReplyNew; }
//public long getLookupReplyNew() { return _lookupReplyNew; }
/** how many peers that we have already seen did lookups provide us with?
* @deprecated unused
*/
public long getLookupReplyOld() { return _lookupReplyOld; }
//public long getLookupReplyOld() { return _lookupReplyOld; }
/** how many peers that we explicitly asked the peer not to send us did they reply with?
* @deprecated unused
*/
public long getLookupReplyDuplicate() { return _lookupReplyDuplicate; }
//public long getLookupReplyDuplicate() { return _lookupReplyDuplicate; }
/** how many peers that were incorrectly formatted / expired / otherwise illegal did lookups provide us with?
* @deprecated unused
*/
public long getLookupReplyInvalid() { return _lookupReplyInvalid; }
//public long getLookupReplyInvalid() { return _lookupReplyInvalid; }
/** how many lookups this peer has sent us?
* @deprecated unused
*/
public long getLookupsReceived() { return _lookupsReceived; }
//public long getLookupsReceived() { return _lookupsReceived; }
/** how frequently do they send us lookup requests?
* @deprecated unused
*/
public long getAvgDelayBetweenLookupsReceived() { return _avgDelayBetweenLookupsReceived; }
//public long getAvgDelayBetweenLookupsReceived() { return _avgDelayBetweenLookupsReceived; }
/** when did they last send us a request?
* @deprecated unused
*/
public long getLastLookupReceived() { return _lastLookupReceived; }
// public long getLastLookupReceived() { return _lastLookupReceived; }
/**
* Not persisted until 0.9.24
* @since 0.7.8
*/
public long getLastLookupSuccessful() { return _lastLookupSuccessful; }
/**
* Not persisted until 0.9.24
* @since 0.7.8
*/
public long getLastLookupFailed() { return _lastLookupFailed; }
/**
* Not persisted until 0.9.24
* @since 0.7.8
*/
public long getLastStoreSuccessful() { return _lastStoreSuccessful; }
/**
* Not persisted until 0.9.24
* @since 0.7.8
*/
public long getLastStoreFailed() { return _lastStoreFailed; }
/** how many times have they sent us data we didn't ask for and that we've never seen? */
@ -103,8 +122,8 @@ public class DBHistory {
*
*/
public void lookupSuccessful() {
_successfulLookups++;
_failedLookupRate.addData(0, 0);
//_successfulLookups++;
_failedLookupRate.addData(0);
_context.statManager().addRateData("peer.failedLookupRate", 0);
_lastLookupSuccessful = _context.clock().now();
}
@ -113,8 +132,8 @@ public class DBHistory {
* Note that the peer failed to respond to the db lookup in any way
*/
public void lookupFailed() {
_failedLookups++;
_failedLookupRate.addData(1, 0);
//_failedLookups++;
_failedLookupRate.addData(1);
_context.statManager().addRateData("peer.failedLookupRate", 1);
_lastLookupFailed = _context.clock().now();
}
@ -123,22 +142,25 @@ public class DBHistory {
* Note that we successfully stored to a floodfill peer and verified the result
* by asking another floodfill peer
*
* @since 0.7.8
*/
public void storeSuccessful() {
// Fixme, redefined this to include both lookup and store fails,
// need to fix the javadocs
_failedLookupRate.addData(0, 0);
_failedLookupRate.addData(0);
_context.statManager().addRateData("peer.failedLookupRate", 0);
_lastStoreSuccessful = _context.clock().now();
}
/**
* Note that floodfill verify failed
*
* @since 0.7.8
*/
public void storeFailed() {
// Fixme, redefined this to include both lookup and store fails,
// need to fix the javadocs
_failedLookupRate.addData(1, 0);
_failedLookupRate.addData(1);
_lastStoreFailed = _context.clock().now();
}
@ -152,19 +174,21 @@ public class DBHistory {
* themselves if they don't know anyone else)
*/
public void lookupReply(int newPeers, int oldPeers, int invalid, int duplicate) {
_lookupReplyNew += newPeers;
_lookupReplyOld += oldPeers;
_lookupReplyInvalid += invalid;
_lookupReplyDuplicate += duplicate;
//_lookupReplyNew += newPeers;
//_lookupReplyOld += oldPeers;
//_lookupReplyInvalid += invalid;
//_lookupReplyDuplicate += duplicate;
if (invalid > 0) {
_invalidReplyRate.addData(invalid, 0);
_invalidReplyRate.addData(invalid);
}
}
/**
* Note that the peer sent us a lookup
*
*/
/****
public void lookupReceived() {
long now = _context.clock().now();
long delay = now - _lastLookupReceived;
@ -179,6 +203,8 @@ public class DBHistory {
_avgDelayBetweenLookupsReceived = _avgDelayBetweenLookupsReceived - (delay / _lookupsReceived);
}
}
****/
/**
* Note that the peer sent us a data point without us asking for it
* @param wasNew whether we already knew about this data point or not
@ -190,15 +216,15 @@ public class DBHistory {
_unpromptedDbStoreOld++;
}
public void setSuccessfulLookups(long num) { _successfulLookups = num; }
public void setFailedLookups(long num) { _failedLookups = num; }
public void setLookupReplyNew(long num) { _lookupReplyNew = num; }
public void setLookupReplyOld(long num) { _lookupReplyOld = num; }
public void setLookupReplyInvalid(long num) { _lookupReplyInvalid = num; }
public void setLookupReplyDuplicate(long num) { _lookupReplyDuplicate = num; }
public void setLookupsReceived(long num) { _lookupsReceived = num; }
public void setAvgDelayBetweenLookupsReceived(long ms) { _avgDelayBetweenLookupsReceived = ms; }
public void setLastLookupReceived(long when) { _lastLookupReceived = when; }
//public void setSuccessfulLookups(long num) { _successfulLookups = num; }
//public void setFailedLookups(long num) { _failedLookups = num; }
//public void setLookupReplyNew(long num) { _lookupReplyNew = num; }
//public void setLookupReplyOld(long num) { _lookupReplyOld = num; }
//public void setLookupReplyInvalid(long num) { _lookupReplyInvalid = num; }
//public void setLookupReplyDuplicate(long num) { _lookupReplyDuplicate = num; }
//public void setLookupsReceived(long num) { _lookupsReceived = num; }
//public void setAvgDelayBetweenLookupsReceived(long ms) { _avgDelayBetweenLookupsReceived = ms; }
//public void setLastLookupReceived(long when) { _lastLookupReceived = when; }
public void setUnpromptedDbStoreNew(long num) { _unpromptedDbStoreNew = num; }
public void setUnpromptedDbStoreOld(long num) { _unpromptedDbStoreOld = num; }
@ -217,17 +243,22 @@ public class DBHistory {
buf.append("#################").append(NL);
buf.append("# DB history").append(NL);
buf.append("###").append(NL);
add(buf, "successfulLookups", _successfulLookups, "How many times have they successfully given us what we wanted when looking for it?");
add(buf, "failedLookups", _failedLookups, "How many times have we sent them a db lookup and they didn't reply?");
add(buf, "lookupsReceived", _lookupsReceived, "How many lookups have they sent us?");
add(buf, "lookupReplyDuplicate", _lookupReplyDuplicate, "How many of their reply values to our lookups were something we asked them not to send us?");
add(buf, "lookupReplyInvalid", _lookupReplyInvalid, "How many of their reply values to our lookups were invalid (expired, forged, corrupted)?");
add(buf, "lookupReplyNew", _lookupReplyNew, "How many of their reply values to our lookups were brand new to us?");
add(buf, "lookupReplyOld", _lookupReplyOld, "How many of their reply values to our lookups were something we had seen before?");
//add(buf, "successfulLookups", _successfulLookups, "How many times have they successfully given us what we wanted when looking for it?");
//add(buf, "failedLookups", _failedLookups, "How many times have we sent them a db lookup and they didn't reply?");
//add(buf, "lookupsReceived", _lookupsReceived, "How many lookups have they sent us?");
//add(buf, "lookupReplyDuplicate", _lookupReplyDuplicate, "How many of their reply values to our lookups were something we asked them not to send us?");
//add(buf, "lookupReplyInvalid", _lookupReplyInvalid, "How many of their reply values to our lookups were invalid (expired, forged, corrupted)?");
//add(buf, "lookupReplyNew", _lookupReplyNew, "How many of their reply values to our lookups were brand new to us?");
//add(buf, "lookupReplyOld", _lookupReplyOld, "How many of their reply values to our lookups were something we had seen before?");
add(buf, "unpromptedDbStoreNew", _unpromptedDbStoreNew, "How times have they sent us something we didn't ask for and hadn't seen before?");
add(buf, "unpromptedDbStoreOld", _unpromptedDbStoreOld, "How times have they sent us something we didn't ask for but have seen before?");
add(buf, "lastLookupReceived", _lastLookupReceived, "When was the last time they send us a lookup? (milliseconds since the epoch)");
add(buf, "avgDelayBetweenLookupsReceived", _avgDelayBetweenLookupsReceived, "How long is it typically between each db lookup they send us? (in milliseconds)");
//add(buf, "lastLookupReceived", _lastLookupReceived, "When was the last time they send us a lookup? (milliseconds since the epoch)");
//add(buf, "avgDelayBetweenLookupsReceived", _avgDelayBetweenLookupsReceived, "How long is it typically between each db lookup they send us? (in milliseconds)");
// following 4 weren't persisted until 0.9.24
add(buf, "lastLookupSuccessful", _lastLookupSuccessful, "When was the last time a lookup from them succeeded? (milliseconds since the epoch)");
add(buf, "lastLookupFailed", _lastLookupFailed, "When was the last time a lookup from them failed? (milliseconds since the epoch)");
add(buf, "lastStoreSuccessful", _lastStoreSuccessful, "When was the last time a store to them succeeded? (milliseconds since the epoch)");
add(buf, "lastStoreFailed", _lastStoreFailed, "When was the last time a store to them failed? (milliseconds since the epoch)");
out.write(buf.toString().getBytes("UTF-8"));
_failedLookupRate.store(out, "dbHistory.failedLookupRate");
_invalidReplyRate.store(out, "dbHistory.invalidReplyRate");
@ -240,17 +271,22 @@ public class DBHistory {
public void load(Properties props) {
_successfulLookups = getLong(props, "dbHistory.successfulLookups");
_failedLookups = getLong(props, "dbHistory.failedLookups");
_lookupsReceived = getLong(props, "dbHistory.lookupsReceived");
_lookupReplyDuplicate = getLong(props, "dbHistory.lookupReplyDuplicate");
_lookupReplyInvalid = getLong(props, "dbHistory.lookupReplyInvalid");
_lookupReplyNew = getLong(props, "dbHistory.lookupReplyNew");
_lookupReplyOld = getLong(props, "dbHistory.lookupReplyOld");
//_successfulLookups = getLong(props, "dbHistory.successfulLookups");
//_failedLookups = getLong(props, "dbHistory.failedLookups");
//_lookupsReceived = getLong(props, "dbHistory.lookupsReceived");
//_lookupReplyDuplicate = getLong(props, "dbHistory.lookupReplyDuplicate");
//_lookupReplyInvalid = getLong(props, "dbHistory.lookupReplyInvalid");
//_lookupReplyNew = getLong(props, "dbHistory.lookupReplyNew");
//_lookupReplyOld = getLong(props, "dbHistory.lookupReplyOld");
_unpromptedDbStoreNew = getLong(props, "dbHistory.unpromptedDbStoreNew");
_unpromptedDbStoreOld = getLong(props, "dbHistory.unpromptedDbStoreOld");
_lastLookupReceived = getLong(props, "dbHistory.lastLookupReceived");
_avgDelayBetweenLookupsReceived = getLong(props, "dbHistory.avgDelayBetweenLookupsReceived");
//_lastLookupReceived = getLong(props, "dbHistory.lastLookupReceived");
//_avgDelayBetweenLookupsReceived = getLong(props, "dbHistory.avgDelayBetweenLookupsReceived");
// following 4 weren't persisted until 0.9.24
_lastLookupSuccessful = getLong(props, "dbHistory.lastLookupSuccessful");
_lastLookupFailed = getLong(props, "dbHistory.lastLookupFailed");
_lastStoreSuccessful = getLong(props, "dbHistory.lastStoreSuccessful");
_lastStoreFailed = getLong(props, "dbHistory.lastStoreFailed");
try {
_failedLookupRate.load(props, "dbHistory.failedLookupRate", true);
_log.debug("Loading dbHistory.failedLookupRate");
@ -266,7 +302,7 @@ public class DBHistory {
}
}
private void createRates(String statGroup) {
private synchronized void createRates(String statGroup) {
if (_failedLookupRate == null)
_failedLookupRate = new RateStat("dbHistory.failedLookupRate", "How often does this peer to respond to a lookup?", statGroup, new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
if (_invalidReplyRate == null)
@ -276,14 +312,6 @@ public class DBHistory {
}
private final static long getLong(Properties props, String key) {
String val = props.getProperty(key);
if (val != null) {
try {
return Long.parseLong(val);
} catch (NumberFormatException nfe) {
return 0;
}
}
return 0;
return ProfilePersistenceHelper.getLong(props, key);
}
}

View File

@ -24,6 +24,9 @@ import net.i2p.util.Log;
* Once it becomes necessary, we can simply compact the poorly performing profiles
* (keeping only the most basic data) and maintain hundreds of thousands of profiles
* in memory. Beyond that size, we can simply eject the peers (e.g. keeping the best 100,000).
*
* TODO most of the methods should be synchronized.
*
*/
public class PeerProfile {
@ -46,9 +49,10 @@ public class PeerProfile {
private RateStat _tunnelTestResponseTime;
private RateStat _dbIntroduction;
// calculation bonuses
private long _speedBonus;
private long _capacityBonus;
private long _integrationBonus;
// ints to save some space
private int _speedBonus;
private int _capacityBonus;
private int _integrationBonus;
// calculation values
private double _speedValue;
private double _capacityValue;
@ -81,6 +85,8 @@ public class PeerProfile {
}
/**
* Caller should call setLastHeardAbout() and setFirstHeardAbout()
*
* @param peer non-null
*/
public PeerProfile(RouterContext context, Hash peer) {
@ -88,15 +94,18 @@ public class PeerProfile {
}
/**
* Caller should call setLastHeardAbout() and setFirstHeardAbout()
*
* @param peer non-null
* @param expand must be true (see below)
*/
private PeerProfile(RouterContext context, Hash peer, boolean expand) {
_context = context;
_log = context.logManager().getLog(PeerProfile.class);
if (peer == null)
throw new NullPointerException();
_context = context;
_log = context.logManager().getLog(PeerProfile.class);
_peer = peer;
_firstHeardAbout = _context.clock().now();
// this is always true, and there are several places in the router that will NPE
// if it is false, so all need to be fixed before we can have non-expanded profiles
if (expand)
@ -191,14 +200,36 @@ public class PeerProfile {
/**
* When did we first hear about this peer?
* Currently unused, candidate for removal.
* @return greater than zero, set to now in consturctor
*/
public long getFirstHeardAbout() { return _firstHeardAbout; }
public void setFirstHeardAbout(long when) { _firstHeardAbout = when; }
public synchronized long getFirstHeardAbout() { return _firstHeardAbout; }
/** when did we last hear about this peer? */
public long getLastHeardAbout() { return _lastHeardAbout; }
public void setLastHeardAbout(long when) { _lastHeardAbout = when; }
/**
* Set when did we first heard about this peer, only if older.
* Package private, only set by profile management subsystem.
*/
synchronized void setFirstHeardAbout(long when) {
if (when < _firstHeardAbout)
_firstHeardAbout = when;
}
/**
* when did we last hear about this peer?
* @return 0 if unset
*/
public synchronized long getLastHeardAbout() { return _lastHeardAbout; }
/**
* Set when did we last hear about this peer, only if unset or newer
* Also sets FirstHeardAbout if earlier
*/
public synchronized void setLastHeardAbout(long when) {
if (_lastHeardAbout <= 0 || when > _lastHeardAbout)
_lastHeardAbout = when;
// this is called by netdb PersistentDataStore, so fixup first heard
if (when < _firstHeardAbout)
_firstHeardAbout = when;
}
/** when did we last send to this peer successfully? */
public long getLastSendSuccessful() { return _lastSentToSuccessfully; }
@ -244,24 +275,24 @@ public class PeerProfile {
* written to disk to affect how the algorithm ranks speed. Negative values are
* penalties
*/
public long getSpeedBonus() { return _speedBonus; }
public void setSpeedBonus(long bonus) { _speedBonus = bonus; }
public int getSpeedBonus() { return _speedBonus; }
public void setSpeedBonus(int bonus) { _speedBonus = bonus; }
/**
* extra factor added to the capacity ranking - this can be updated in the profile
* written to disk to affect how the algorithm ranks capacity. Negative values are
* penalties
*/
public long getCapacityBonus() { return _capacityBonus; }
public void setCapacityBonus(long bonus) { _capacityBonus = bonus; }
public int getCapacityBonus() { return _capacityBonus; }
public void setCapacityBonus(int bonus) { _capacityBonus = bonus; }
/**
* extra factor added to the integration ranking - this can be updated in the profile
* written to disk to affect how the algorithm ranks integration. Negative values are
* penalties
*/
public long getIntegrationBonus() { return _integrationBonus; }
public void setIntegrationBonus(long bonus) { _integrationBonus = bonus; }
public int getIntegrationBonus() { return _integrationBonus; }
public void setIntegrationBonus(int bonus) { _integrationBonus = bonus; }
/**
* How fast is the peer, taking into consideration both throughput and latency.
@ -429,7 +460,7 @@ public class PeerProfile {
* repeatedly
*
*/
public void expandProfile() {
public synchronized void expandProfile() {
String group = (null == _peer ? "profileUnknown" : _peer.toBase64().substring(0,6));
//if (_sendSuccessSize == null)
// _sendSuccessSize = new RateStat("sendSuccessSize", "How large successfully sent messages are", group, new long[] { 5*60*1000l, 60*60*1000l });

View File

@ -222,8 +222,8 @@ public class ProfileManagerImpl implements ProfileManager {
data.setLastHeardFrom(_context.clock().now());
if (!data.getIsExpandedDB())
return;
DBHistory hist = data.getDBHistory();
hist.lookupReceived();
//DBHistory hist = data.getDBHistory();
//hist.lookupReceived();
}
/**
@ -313,8 +313,7 @@ public class ProfileManagerImpl implements ProfileManager {
public void heardAbout(Hash peer, long when) {
PeerProfile data = getProfile(peer);
//if (data == null) return;
if (when > data.getLastHeardAbout())
data.setLastHeardAbout(when);
data.setLastHeardAbout(when);
}
/**
@ -340,7 +339,6 @@ public class ProfileManagerImpl implements ProfileManager {
PeerProfile prof = _context.profileOrganizer().getProfile(peer);
if (prof == null) {
prof = new PeerProfile(_context, peer);
prof.setFirstHeardAbout(_context.clock().now());
_context.profileOrganizer().addProfile(prof);
}
return prof;

View File

@ -48,12 +48,12 @@ class ProfilePersistenceHelper {
private static final String B64 = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-~";
/**
* If we haven't been able to get a message through to the peer in 3 days,
* If we haven't been able to get a message through to the peer in this much time,
* drop the profile. They may reappear, but if they do, their config may
* have changed (etc).
*
*/
private static final long EXPIRE_AGE = 3*24*60*60*1000;
private static final long EXPIRE_AGE = 15*24*60*60*1000;
private final File _profileDir;
private Hash _us;
@ -263,9 +263,9 @@ class ProfilePersistenceHelper {
file.delete();
}
profile.setCapacityBonus(getLong(props, "capacityBonus"));
profile.setIntegrationBonus(getLong(props, "integrationBonus"));
profile.setSpeedBonus(getLong(props, "speedBonus"));
profile.setCapacityBonus((int) getLong(props, "capacityBonus"));
profile.setIntegrationBonus((int) getLong(props, "integrationBonus"));
profile.setSpeedBonus((int) getLong(props, "speedBonus"));
profile.setLastHeardAbout(getLong(props, "lastHeardAbout"));
profile.setFirstHeardAbout(getLong(props, "firstHeardAbout"));
@ -282,10 +282,10 @@ class ProfilePersistenceHelper {
// In the interest of keeping the in-memory profiles small,
// don't load the DB info at all unless there is something interesting there
// (i.e. floodfills)
// It seems like we do one or two lookups as a part of handshaking?
// Not sure, to be researched.
if (getLong(props, "dbHistory.successfulLookups") > 1 ||
getLong(props, "dbHistory.failedlLokups") > 1) {
if (getLong(props, "dbHistory.lastLookupSuccessful") > 0 ||
getLong(props, "dbHistory.lastLookupFailed") > 0 ||
getLong(props, "dbHistory.lastStoreSuccessful") > 0 ||
getLong(props, "dbHistory.lastStoreFailed") > 0) {
profile.expandDBProfile();
profile.getDBHistory().load(props);
profile.getDbIntroduction().load(props, "dbIntroduction", true);
@ -300,6 +300,7 @@ class ProfilePersistenceHelper {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Loaded the profile for " + peer.toBase64() + " from " + file.getName());
fixupFirstHeardAbout(profile);
return profile;
} catch (IOException e) {
if (_log.shouldLog(Log.WARN))
@ -309,7 +310,60 @@ class ProfilePersistenceHelper {
}
}
private final static long getLong(Properties props, String key) {
/**
* First heard about wasn't always set correctly before,
* set it to the minimum of all recorded timestamps.
*
* @since 0.9.24
*/
private void fixupFirstHeardAbout(PeerProfile p) {
long min = Long.MAX_VALUE;
long t = p.getLastHeardAbout();
if (t > 0 && t < min) min = t;
t = p.getLastSendSuccessful();
if (t > 0 && t < min) min = t;
t = p.getLastSendFailed();
if (t > 0 && t < min) min = t;
t = p.getLastHeardFrom();
if (t > 0 && t < min) min = t;
// the first was never used and the last 4 were never persisted
//DBHistory dh = p.getDBHistory();
//if (dh != null) {
// t = dh.getLastLookupReceived();
// if (t > 0 && t < min) min = t;
// t = dh.getLastLookupSuccessful();
// if (t > 0 && t < min) min = t;
// t = dh.getLastLookupFailed();
// if (t > 0 && t < min) min = t;
// t = dh.getLastStoreSuccessful();
// if (t > 0 && t < min) min = t;
// t = dh.getLastStoreFailed();
// if (t > 0 && t < min) min = t;
//}
TunnelHistory th = p.getTunnelHistory();
if (th != null) {
t = th.getLastAgreedTo();
if (t > 0 && t < min) min = t;
t = th.getLastRejectedCritical();
if (t > 0 && t < min) min = t;
t = th.getLastRejectedBandwidth();
if (t > 0 && t < min) min = t;
t = th.getLastRejectedTransient();
if (t > 0 && t < min) min = t;
t = th.getLastRejectedProbabalistic();
if (t > 0 && t < min) min = t;
t = th.getLastFailed();
if (t > 0 && t < min) min = t;
}
long fha = p.getFirstHeardAbout();
if (min > 0 && min < Long.MAX_VALUE && (fha <= 0 || min < fha)) {
p.setFirstHeardAbout(min);
if (_log.shouldDebug())
_log.debug("Fixed up the FHA time for " + p.getPeer().toBase64() + " to " + (new Date(min)));
}
}
static long getLong(Properties props, String key) {
String val = props.getProperty(key);
if (val != null) {
try {

View File

@ -89,10 +89,10 @@ public class TunnelHistory {
_lifetimeRejected.incrementAndGet();
if (severity >= TUNNEL_REJECT_CRIT) {
_lastRejectedCritical = _context.clock().now();
_rejectRate.addData(1, 1);
_rejectRate.addData(1);
} else if (severity >= TUNNEL_REJECT_BANDWIDTH) {
_lastRejectedBandwidth = _context.clock().now();
_rejectRate.addData(1, 1);
_rejectRate.addData(1);
} else if (severity >= TUNNEL_REJECT_TRANSIENT_OVERLOAD) {
_lastRejectedTransient = _context.clock().now();
// dont increment the reject rate in this case
@ -108,7 +108,7 @@ public class TunnelHistory {
*/
public void incrementFailed(int pct) {
_lifetimeFailed.incrementAndGet();
_failRate.addData(pct, 1);
_failRate.addData(pct);
_lastFailed = _context.clock().now();
}
@ -190,14 +190,6 @@ public class TunnelHistory {
}
private final static long getLong(Properties props, String key) {
String val = props.getProperty(key);
if (val != null) {
try {
return Long.parseLong(val);
} catch (NumberFormatException nfe) {
return 0;
}
}
return 0;
return ProfilePersistenceHelper.getLong(props, key);
}
}