* i2psnark:

- Add kbucket debugging
   - Eliminate redundant explore keys
   - Add more limits to DHT tracker
   - Delay expiration at startup
   - Only enable updates for dev builds and 1% of release builds
 * Update Manager: Warn on dup registration
This commit is contained in:
zzz
2012-10-29 22:08:38 +00:00
parent 2b80d450fa
commit 05d299816b
5 changed files with 64 additions and 12 deletions

View File

@ -526,11 +526,17 @@ public class KBucketSet<T extends SimpleDataStructure> {
public List<T> getExploreKeys(long age) { public List<T> getExploreKeys(long age) {
List<T> rv = new ArrayList(_buckets.size()); List<T> rv = new ArrayList(_buckets.size());
long old = _context.clock().now() - age; long old = _context.clock().now() - age;
int prevSize = -1;
getReadLock(); getReadLock();
try { try {
for (KBucket b : _buckets) { for (KBucket b : _buckets) {
if (b.getLastChanged() < old || b.getKeyCount() < BUCKET_SIZE * 3 / 4) int curSize = b.getKeyCount();
// The first few buckets are all empty, we only need one
// explore key for all of them.
if ((prevSize != 0 || curSize != 0) &&
(b.getLastChanged() < old || curSize < BUCKET_SIZE * 3 / 4))
rv.add(generateRandomKey(b)); rv.add(generateRandomKey(b));
prevSize = curSize;
} }
} finally { releaseReadLock(); } } finally { releaseReadLock(); }
return rv; return rv;

View File

@ -69,6 +69,9 @@ class DHTNodes {
// begin ConcurrentHashMap methods // begin ConcurrentHashMap methods
/**
* @return known nodes, not total net size
*/
public int size() { public int size() {
return _nodeMap.size(); return _nodeMap.size();
} }
@ -128,11 +131,19 @@ class DHTNodes {
return _kad.getExploreKeys(MAX_BUCKET_AGE); return _kad.getExploreKeys(MAX_BUCKET_AGE);
} }
/**
* Debug info, HTML formatted
* @since 0.9.4
*/
public void renderStatusHTML(StringBuilder buf) {
buf.append(_kad.toString().replace("\n", "<br>\n"));
}
/** */ /** */
private class Cleaner extends SimpleTimer2.TimedEvent { private class Cleaner extends SimpleTimer2.TimedEvent {
public Cleaner() { public Cleaner() {
super(SimpleTimer2.getInstance(), CLEAN_TIME); super(SimpleTimer2.getInstance(), 5 * CLEAN_TIME);
} }
public void timeReached() { public void timeReached() {

View File

@ -39,6 +39,8 @@ class DHTTracker {
private static final long DELTA_EXPIRE_TIME = 3*60*1000; private static final long DELTA_EXPIRE_TIME = 3*60*1000;
private static final int MAX_PEERS = 2000; private static final int MAX_PEERS = 2000;
private static final int MAX_PEERS_PER_TORRENT = 150; private static final int MAX_PEERS_PER_TORRENT = 150;
private static final int ABSOLUTE_MAX_PER_TORRENT = MAX_PEERS_PER_TORRENT * 2;
private static final int MAX_TORRENTS = 400;
DHTTracker(I2PAppContext ctx) { DHTTracker(I2PAppContext ctx) {
_context = ctx; _context = ctx;
@ -62,17 +64,29 @@ class DHTTracker {
_log.debug("Announce " + hash + " for " + ih); _log.debug("Announce " + hash + " for " + ih);
Peers peers = _torrents.get(ih); Peers peers = _torrents.get(ih);
if (peers == null) { if (peers == null) {
if (_torrents.size() >= MAX_TORRENTS)
return;
peers = new Peers(); peers = new Peers();
Peers peers2 = _torrents.putIfAbsent(ih, peers); Peers peers2 = _torrents.putIfAbsent(ih, peers);
if (peers2 != null) if (peers2 != null)
peers = peers2; peers = peers2;
} }
Peer peer = new Peer(hash.getData()); if (peers.size() < ABSOLUTE_MAX_PER_TORRENT) {
Peer peer2 = peers.putIfAbsent(peer, peer); Peer peer = new Peer(hash.getData());
if (peer2 != null) Peer peer2 = peers.putIfAbsent(peer, peer);
peer = peer2; if (peer2 != null)
peer.setLastSeen(_context.clock().now()); peer = peer2;
peer.setLastSeen(_context.clock().now());
} else {
// We could update setLastSeen if he is already
// in there, but that would tend to keep
// the same set of peers.
// So let it expire so new ones can come in.
//Peer peer = peers.get(hash);
//if (peer != null)
// peer.setLastSeen(_context.clock().now());
}
} }
void unannounce(InfoHash ih, Hash hash) { void unannounce(InfoHash ih, Hash hash) {
@ -113,7 +127,7 @@ class DHTTracker {
private class Cleaner extends SimpleTimer2.TimedEvent { private class Cleaner extends SimpleTimer2.TimedEvent {
public Cleaner() { public Cleaner() {
super(SimpleTimer2.getInstance(), CLEAN_TIME); super(SimpleTimer2.getInstance(), 2 * CLEAN_TIME);
} }
public void timeReached() { public void timeReached() {
@ -122,6 +136,7 @@ class DHTTracker {
long now = _context.clock().now(); long now = _context.clock().now();
int torrentCount = 0; int torrentCount = 0;
int peerCount = 0; int peerCount = 0;
boolean tooMany = false;
for (Iterator<Peers> iter = _torrents.values().iterator(); iter.hasNext(); ) { for (Iterator<Peers> iter = _torrents.values().iterator(); iter.hasNext(); ) {
Peers p = iter.next(); Peers p = iter.next();
int recent = 0; int recent = 0;
@ -136,6 +151,7 @@ class DHTTracker {
} }
if (recent > MAX_PEERS_PER_TORRENT) { if (recent > MAX_PEERS_PER_TORRENT) {
// too many, delete at random // too many, delete at random
// TODO sort and remove oldest?
// TODO per-torrent adjustable expiration? // TODO per-torrent adjustable expiration?
for (Iterator<Peer> iterp = p.values().iterator(); iterp.hasNext() && p.size() > MAX_PEERS_PER_TORRENT; ) { for (Iterator<Peer> iterp = p.values().iterator(); iterp.hasNext() && p.size() > MAX_PEERS_PER_TORRENT; ) {
iterp.next(); iterp.next();
@ -143,6 +159,7 @@ class DHTTracker {
peerCount--; peerCount--;
} }
torrentCount++; torrentCount++;
tooMany = true;
} else if (recent <= 0) { } else if (recent <= 0) {
iter.remove(); iter.remove();
} else { } else {
@ -151,6 +168,8 @@ class DHTTracker {
} }
if (peerCount > MAX_PEERS) if (peerCount > MAX_PEERS)
tooMany = true;
if (tooMany)
_expireTime = Math.max(_expireTime - DELTA_EXPIRE_TIME, MIN_EXPIRE_TIME); _expireTime = Math.max(_expireTime - DELTA_EXPIRE_TIME, MIN_EXPIRE_TIME);
else else
_expireTime = Math.min(_expireTime + DELTA_EXPIRE_TIME, MAX_EXPIRE_TIME); _expireTime = Math.min(_expireTime + DELTA_EXPIRE_TIME, MAX_EXPIRE_TIME);
@ -162,7 +181,7 @@ class DHTTracker {
DataHelper.formatDuration(_expireTime) + " expiration"); DataHelper.formatDuration(_expireTime) + " expiration");
_peerCount = peerCount; _peerCount = peerCount;
_torrentCount = torrentCount; _torrentCount = torrentCount;
schedule(CLEAN_TIME); schedule(tooMany ? CLEAN_TIME / 3 : CLEAN_TIME);
} }
} }
} }

View File

@ -613,6 +613,7 @@ public class KRPC implements I2PSessionMuxedListener, DHT {
"Rcvd tokens: ").append(_incomingTokens.size()).append("<br>" + "Rcvd tokens: ").append(_incomingTokens.size()).append("<br>" +
"Pending queries: ").append(_sentQueries.size()).append("<br>"); "Pending queries: ").append(_sentQueries.size()).append("<br>");
_tracker.renderStatusHTML(buf); _tracker.renderStatusHTML(buf);
_knownNodes.renderStatusHTML(buf);
return buf.toString(); return buf.toString();
} }
@ -1518,7 +1519,7 @@ public class KRPC implements I2PSessionMuxedListener, DHT {
private class Cleaner extends SimpleTimer2.TimedEvent { private class Cleaner extends SimpleTimer2.TimedEvent {
public Cleaner() { public Cleaner() {
super(SimpleTimer2.getInstance(), CLEAN_TIME); super(SimpleTimer2.getInstance(), 7 * CLEAN_TIME);
} }
public void timeReached() { public void timeReached() {

View File

@ -558,10 +558,22 @@ public class ConsoleUpdateManager implements UpdateManager {
* Call once for each type/method pair. * Call once for each type/method pair.
*/ */
public void register(Updater updater, UpdateType type, UpdateMethod method, int priority) { public void register(Updater updater, UpdateType type, UpdateMethod method, int priority) {
// DEBUG slow start for snark updates
// For 0.9.4 update, only for dev builds
// For 0.9.5 update, only for dev builds and 1% more
// Remove this in 0.9.6 or 0.9.7
if (method == TORRENT && RouterVersion.BUILD == 0 && _context.random().nextInt(100) != 0) {
if (_log.shouldLog(Log.WARN))
_log.warn("Ignoring torrent registration");
return;
}
RegisteredUpdater ru = new RegisteredUpdater(updater, type, method, priority); RegisteredUpdater ru = new RegisteredUpdater(updater, type, method, priority);
if (_log.shouldLog(Log.INFO)) if (_log.shouldLog(Log.INFO))
_log.info("Registering " + ru); _log.info("Registering " + ru);
_registeredUpdaters.add(ru); if (!_registeredUpdaters.add(ru)) {
if (_log.shouldLog(Log.WARN))
_log.warn("Duplicate registration " + ru);
}
} }
public void unregister(Updater updater, UpdateType type, UpdateMethod method) { public void unregister(Updater updater, UpdateType type, UpdateMethod method) {
@ -575,7 +587,10 @@ public class ConsoleUpdateManager implements UpdateManager {
RegisteredChecker rc = new RegisteredChecker(updater, type, method, priority); RegisteredChecker rc = new RegisteredChecker(updater, type, method, priority);
if (_log.shouldLog(Log.INFO)) if (_log.shouldLog(Log.INFO))
_log.info("Registering " + rc); _log.info("Registering " + rc);
_registeredCheckers.add(rc); if (!_registeredCheckers.add(rc)) {
if (_log.shouldLog(Log.WARN))
_log.warn("Duplicate registration " + rc);
}
} }
public void unregister(Checker updater, UpdateType type, UpdateMethod method) { public void unregister(Checker updater, UpdateType type, UpdateMethod method) {