Profiles: Don't decay during first 90 minutes of uptime

Change decay from .75 twice a day to .84 four times a day;
approx. same overall decay in a day (.5)
Parameterize decay variables for clarity
Fix multiple NPEs in ProfileOrganizer CLI
Other cleanups
This commit is contained in:
zzz
2020-03-24 13:03:40 +00:00
parent 7654a0af42
commit f2787a8df6
5 changed files with 84 additions and 35 deletions

View File

@ -2,6 +2,7 @@ package net.i2p.router.peermanager;
import net.i2p.data.router.RouterInfo;
import net.i2p.router.RouterContext;
import net.i2p.router.NetworkDatabaseFacade;
import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseFacade;
import net.i2p.stat.Rate;
import net.i2p.stat.RateAverages;
@ -91,9 +92,13 @@ class CapacityCalculator {
// credit non-floodfill to reduce conn limit issues at floodfills
// TODO only if we aren't floodfill ourselves?
RouterInfo ri = context.netDb().lookupRouterInfoLocally(profile.getPeer());
if (!FloodfillNetworkDatabaseFacade.isFloodfill(ri))
capacity += BONUS_NON_FLOODFILL;
// null for tests
NetworkDatabaseFacade ndb = context.netDb();
if (ndb != null) {
RouterInfo ri = ndb.lookupRouterInfoLocally(profile.getPeer());
if (!FloodfillNetworkDatabaseFacade.isFloodfill(ri))
capacity += BONUS_NON_FLOODFILL;
}
// a tiny tweak to break ties and encourage closeness, -.25 to +.25
capacity -= profile.getXORDistance() * (BONUS_XOR / 128);

View File

@ -55,6 +55,8 @@ class PeerManager {
* Rate contained in the profile, as the Rates must be coalesced.
*/
private static final long REORGANIZE_TIME_LONG = 351*1000;
/** After first two hours of uptime ~= 246 */
static final int REORGANIZES_PER_DAY = (int) (24*60*60*1000L / REORGANIZE_TIME_LONG);
private static final long STORE_TIME = 19*60*60*1000;
private static final long EXPIRE_AGE = 3*24*60*60*1000;
@ -119,8 +121,10 @@ class PeerManager {
public void run() {
long start = System.currentTimeMillis();
long uptime = _context.router().getUptime();
boolean shouldDecay = uptime > 90*60*1000;
try {
_organizer.reorganize(true);
_organizer.reorganize(true, shouldDecay);
} catch (Throwable t) {
_log.log(Log.CRIT, "Error evaluating profiles", t);
}
@ -138,7 +142,6 @@ class PeerManager {
_log.log(Log.CRIT, "Error storing profiles", t);
}
}
long uptime = _context.router().getUptime();
long delay;
if (orgtime > 1000 || uptime > 2*60*60*1000)
delay = REORGANIZE_TIME_LONG;

View File

@ -7,6 +7,7 @@ import java.util.HashSet;
import java.util.Set;
import net.i2p.data.Hash;
import net.i2p.router.CommSystemFacade;
import net.i2p.router.RouterContext;
import net.i2p.stat.RateStat;
import net.i2p.util.Log;
@ -86,8 +87,15 @@ public class PeerProfile {
/** total number of bytes pushed through a single tunnel in a 1 minute period */
private final float _peakTunnel1mThroughput[] = new float[THROUGHPUT_COUNT];
/** periodically cut the measured throughput values */
private static final int DROP_PERIOD_MINUTES = 120;
private static final float DEGRADE_FACTOR = 0.75f;
private static final int DEGRADES_PER_DAY = 4;
// one in this many times, ~= 61
private static final int DEGRADE_PROBABILITY = PeerManager.REORGANIZES_PER_DAY / DEGRADES_PER_DAY;
private static final double TOTAL_DEGRADE_PER_DAY = 0.5d;
// the goal is to cut an unchanged profile in half in 24 hours.
// x**4 = .5; x = 4th root of .5, x = .5**(1/4), x ~= 0.84
private static final float DEGRADE_FACTOR = (float) Math.pow(TOTAL_DEGRADE_PER_DAY, 1.0d / DEGRADES_PER_DAY);
//static { System.out.println("Degrade factor is " + DEGRADE_FACTOR); }
private long _lastCoalesceDate = System.currentTimeMillis();
/**
@ -160,21 +168,33 @@ public class PeerProfile {
/** @since 0.8.11 */
boolean isEstablished() {
return _context.commSystem().isEstablished(_peer);
// null for tests
CommSystemFacade cs = _context.commSystem();
if (cs == null)
return false;
return cs.isEstablished(_peer);
}
/** @since 0.8.11 */
boolean wasUnreachable() {
return _context.commSystem().wasUnreachable(_peer);
// null for tests
CommSystemFacade cs = _context.commSystem();
if (cs == null)
return false;
return cs.wasUnreachable(_peer);
}
/** @since 0.8.11 */
boolean isSameCountry() {
String us = _context.commSystem().getOurCountry();
// null for tests
CommSystemFacade cs = _context.commSystem();
if (cs == null)
return false;
String us = cs.getOurCountry();
return us != null &&
(_bigCountries.contains(us) ||
_context.getProperty(CapacityCalculator.PROP_COUNTRY_BONUS) != null) &&
us.equals(_context.commSystem().getCountry(_peer));
us.equals(cs.getCountry(_peer));
}
/**
@ -212,7 +232,7 @@ public class PeerProfile {
long before = _context.clock().now() - period;
return getLastHeardFrom() < before ||
getLastSendSuccessful() < before ||
_context.commSystem().isEstablished(_peer);
isEstablished();
}
@ -437,6 +457,8 @@ public class PeerProfile {
}
/**
* This is the speed value
*
* @return the average of the three fastest one-minute data transfers, on a per-tunnel basis,
* through this peer. Ever. Except that the peak values are cut in half
* periodically by coalesceThroughput().
@ -523,10 +545,12 @@ public class PeerProfile {
_expandedDB = true;
}
private void coalesceThroughput() {
private void coalesceThroughput(boolean decay) {
long now = System.currentTimeMillis();
long measuredPeriod = now - _lastCoalesceDate;
if (measuredPeriod >= 60*1000) {
// so we don't call random() twice
boolean shouldDecay = decay && _context.random().nextInt(DEGRADE_PROBABILITY) <= 0;
long tot = _peakThroughputCurrentTotal;
float lowPeak = _peakThroughput[THROUGHPUT_COUNT-1];
if (tot > lowPeak) {
@ -539,7 +563,7 @@ public class PeerProfile {
}
}
} else {
if (_context.random().nextInt(DROP_PERIOD_MINUTES) <= 0) {
if (shouldDecay) {
for (int i = 0; i < THROUGHPUT_COUNT; i++)
_peakThroughput[i] *= DEGRADE_FACTOR;
}
@ -547,7 +571,7 @@ public class PeerProfile {
// we degrade the tunnel throughput here too, regardless of the current
// activity
if (_context.random().nextInt(DROP_PERIOD_MINUTES) <= 0) {
if (shouldDecay) {
for (int i = 0; i < THROUGHPUT_COUNT; i++) {
_peakTunnelThroughput[i] *= DEGRADE_FACTOR;
_peakTunnel1mThroughput[i] *= DEGRADE_FACTOR;
@ -558,11 +582,13 @@ public class PeerProfile {
}
}
/** update the stats and rates (this should be called once a minute) */
public void coalesceStats() {
/**
* Update the stats and rates. This is only called by addProfile()
*/
void coalesceStats() {
if (!_expanded) return;
coalesceOnly();
coalesceOnly(false);
updateValues();
if (_log.shouldLog(Log.DEBUG))
@ -573,7 +599,7 @@ public class PeerProfile {
* Caller must next call updateValues()
* @since 0.9.4
*/
void coalesceOnly() {
void coalesceOnly(boolean shouldDecay) {
_coalescing = true;
//_receiveSize.coalesceStats();
@ -587,7 +613,7 @@ public class PeerProfile {
_dbHistory.coalesceStats();
}
coalesceThroughput();
coalesceThroughput(shouldDecay);
_speedValueNew = calculateSpeed();
_capacityValueNew = calculateCapacity();
@ -604,7 +630,7 @@ public class PeerProfile {
*/
void updateValues() {
if (!_coalescing) // can happen
coalesceOnly();
coalesceOnly(false);
_coalescing = false;
_speedValue = _speedValueNew;

View File

@ -21,7 +21,9 @@ import net.i2p.data.DataHelper;
import net.i2p.data.Hash;
import net.i2p.data.router.RouterAddress;
import net.i2p.data.router.RouterInfo;
import net.i2p.router.ClientManagerFacade;
import net.i2p.router.NetworkDatabaseFacade;
import net.i2p.router.Router;
import net.i2p.router.RouterContext;
import net.i2p.router.tunnel.pool.TunnelPeerSelector;
import net.i2p.router.util.MaskedIPSet;
@ -782,9 +784,9 @@ public class ProfileOrganizer {
* this method, but the averages are recalculated.
*
*/
void reorganize() { reorganize(false); }
void reorganize() { reorganize(false, false); }
void reorganize(boolean shouldCoalesce) {
void reorganize(boolean shouldCoalesce, boolean shouldDecay) {
long sortTime = 0;
int coalesceTime = 0;
long thresholdTime = 0;
@ -792,7 +794,9 @@ public class ProfileOrganizer {
int profileCount = 0;
int expiredCount = 0;
long uptime = _context.router().getUptime();
// null for main()
Router r = _context.router();
long uptime = (r != null) ? r.getUptime() : 0L;
long expireOlderThan = -1;
if (uptime > 60*60*1000) {
// dynamically adjust expire time to control memory usage
@ -807,14 +811,14 @@ public class ProfileOrganizer {
if (shouldCoalesce) {
getReadLock();
try {
long coalesceStart = System.currentTimeMillis();
for (PeerProfile prof : _strictCapacityOrder) {
if ( (expireOlderThan > 0) && (prof.getLastSendSuccessful() <= expireOlderThan) ) {
continue;
}
long coalesceStart = System.currentTimeMillis();
prof.coalesceOnly();
coalesceTime += (int)(System.currentTimeMillis()-coalesceStart);
prof.coalesceOnly(shouldDecay);
}
coalesceTime = (int)(System.currentTimeMillis()-coalesceStart);
} finally {
releaseReadLock();
}
@ -1394,7 +1398,8 @@ public class ProfileOrganizer {
// don't allow them in the high-cap pool, what would the point of that be?
if (_thresholdCapacityValue <= profile.getCapacityValue() &&
isSelectable(peer) &&
!_context.commSystem().isInStrictCountry(peer)) {
// null for tests
(_context.commSystem() == null || !_context.commSystem().isInStrictCountry(peer))) {
_highCapacityPeers.put(peer, profile);
if (_log.shouldLog(Log.DEBUG))
_log.debug("High capacity: \t" + peer);
@ -1446,8 +1451,12 @@ public class ProfileOrganizer {
* @return minimum number of peers to be placed in the 'fast' group
*/
protected int getMinimumFastPeers() {
// null for main()
ClientManagerFacade cm = _context.clientManager();
if (cm == null)
return DEFAULT_MAXIMUM_FAST_PEERS;
int def = Math.min(DEFAULT_MAXIMUM_FAST_PEERS,
(6 *_context.clientManager().listClients().size()) + DEFAULT_MINIMUM_FAST_PEERS - 2);
(6 * cm.listClients().size()) + DEFAULT_MINIMUM_FAST_PEERS - 2);
return _context.getProperty(PROP_MINIMUM_FAST_PEERS, def);
}
@ -1484,6 +1493,11 @@ public class ProfileOrganizer {
* </pre>
*/
public static void main(String args[]) {
if (args.length <= 0) {
System.err.println("Usage: profileorganizer file.txt.gz [file2.txt.gz] ...");
System.exit(1);
}
RouterContext ctx = new RouterContext(null); // new net.i2p.router.Router());
ProfileOrganizer organizer = new ProfileOrganizer(ctx);
organizer.setUs(Hash.FAKE_HASH);
@ -1497,27 +1511,26 @@ public class ProfileOrganizer {
organizer.addProfile(profile);
}
organizer.reorganize();
DecimalFormat fmt = new DecimalFormat("0,000.0");
fmt.setPositivePrefix("+");
DecimalFormat fmt = new DecimalFormat("0000.0");
for (Hash peer : organizer.selectAllPeers()) {
PeerProfile profile = organizer.getProfile(peer);
if (!profile.getIsActive()) {
System.out.println("Peer " + profile.getPeer().toBase64().substring(0,4)
System.out.println("Peer " + peer.toBase64().substring(0,4)
+ " [" + (organizer.isFast(peer) ? "IF+R" :
organizer.isHighCapacity(peer) ? "IR " :
organizer.isFailing(peer) ? "IX " : "I ") + "]: "
+ "\t Speed:\t" + fmt.format(profile.getSpeedValue())
+ " Speed:\t" + fmt.format(profile.getSpeedValue())
+ " Capacity:\t" + fmt.format(profile.getCapacityValue())
+ " Integration:\t" + fmt.format(profile.getIntegrationValue())
+ " Active?\t" + profile.getIsActive()
+ " Failing?\t" + profile.getIsFailing());
} else {
System.out.println("Peer " + profile.getPeer().toBase64().substring(0,4)
System.out.println("Peer " + peer.toBase64().substring(0,4)
+ " [" + (organizer.isFast(peer) ? "F+R " :
organizer.isHighCapacity(peer) ? "R " :
organizer.isFailing(peer) ? "X " : " ") + "]: "
+ "\t Speed:\t" + fmt.format(profile.getSpeedValue())
+ " Speed:\t" + fmt.format(profile.getSpeedValue())
+ " Capacity:\t" + fmt.format(profile.getCapacityValue())
+ " Integration:\t" + fmt.format(profile.getIntegrationValue())
+ " Active?\t" + profile.getIsActive()

View File

@ -458,6 +458,8 @@ class ProfilePersistenceHelper {
}
private Hash getHash(String name) {
if (name.length() < PREFIX.length() + 44)
return null;
String key = name.substring(PREFIX.length());
key = key.substring(0, 44);
//Hash h = new Hash();