- Switch from ArrayList to ConcurrentHashSet in
        KBucketImpl to reduce chance of deadlock;
        remove periodic shuffling of the bucket,
        needs to be addressed elsewhere
This commit is contained in:
zzz
2009-11-27 14:39:53 +00:00
parent 13349777ad
commit ea0747171f
4 changed files with 48 additions and 52 deletions

View File

@ -56,19 +56,19 @@ interface KBucket {
* Retrieve all routing table entries stored in the bucket
* @return set of Hash structures
*/
public Set getEntries();
public Set<Hash> getEntries();
/**
* Retrieve hashes stored in the bucket, excluding the ones specified
* @return set of Hash structures
*/
public Set getEntries(Set toIgnoreHashes);
public Set<Hash> getEntries(Set<Hash> toIgnoreHashes);
public void getEntries(SelectionCollector collector);
/**
* Fill the bucket with entries
* @param entries set of Hash structures
*/
public void setEntries(Set entries);
public void setEntries(Set<Hash> entries);
/**
* Generate a random key that would go inside this bucket

View File

@ -9,24 +9,35 @@ package net.i2p.router.networkdb.kademlia;
*/
import java.math.BigInteger;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import net.i2p.I2PAppContext;
import net.i2p.data.DataHelper;
import net.i2p.data.Hash;
import net.i2p.router.RouterContext;
import net.i2p.util.ConcurrentHashSet;
import net.i2p.util.Log;
import net.i2p.util.RandomSource;
class KBucketImpl implements KBucket {
private Log _log;
/** set of Hash objects for the peers in the kbucket */
private final List _entries;
/**
* set of Hash objects for the peers in the kbucketx
*
* jrandom switched from a HashSet to an ArrayList with this change:
* 2005-08-27 jrandom
* * Minor logging and optimization tweaks in the router and SDK
*
* Now we switch back to a ConcurrentHashSet and remove all the
* synchronization, which may or may not be faster than
* a synchronized ArrayList, with checks for existence before
* adding a Hash. But the other benefit is it removes one
* cause of profileMangager/netDb deadlock.
*/
private final Set<Hash> _entries;
/** we center the kbucket set on the given hash, and derive distances from this */
private Hash _local;
/** include if any bits equal or higher to this bit (in big endian order) */
@ -41,7 +52,7 @@ class KBucketImpl implements KBucket {
public KBucketImpl(I2PAppContext context, Hash local) {
_context = context;
_log = context.logManager().getLog(KBucketImpl.class);
_entries = new ArrayList(0); //all but the last 1 or 2 buckets will be empty
_entries = new ConcurrentHashSet(2); //all but the last 1 or 2 buckets will be empty
_lastShuffle = context.clock().now();
setLocal(local);
}
@ -53,9 +64,7 @@ class KBucketImpl implements KBucket {
_end = highOrderBitLimit;
}
public int getKeyCount() {
synchronized (_entries) {
return _entries.size();
}
return _entries.size();
}
public Hash getLocal() { return _local; }
@ -198,46 +207,35 @@ class KBucketImpl implements KBucket {
return true;
}
public Set getEntries() {
Set entries = new HashSet(64);
synchronized (_entries) {
for (int i = 0; i < _entries.size(); i++)
entries.add((Hash)_entries.get(i));
}
public Set<Hash> getEntries() {
Set<Hash> entries = new HashSet(_entries);
return entries;
}
public Set getEntries(Set toIgnoreHashes) {
Set entries = new HashSet(64);
synchronized (_entries) {
for (int i = 0; i < _entries.size(); i++)
entries.add((Hash)_entries.get(i));
entries.removeAll(toIgnoreHashes);
}
public Set<Hash> getEntries(Set toIgnoreHashes) {
Set<Hash> entries = new HashSet(_entries);
entries.removeAll(toIgnoreHashes);
return entries;
}
public void getEntries(SelectionCollector collector) {
synchronized (_entries) {
for (int i = 0; i < _entries.size(); i++)
collector.add((Hash)_entries.get(i));
Set<Hash> entries = new HashSet(_entries);
for (Hash h : entries) {
collector.add(h);
}
}
public void setEntries(Set entries) {
synchronized (_entries) {
_entries.clear();
for (Iterator iter = entries.iterator(); iter.hasNext(); ) {
Hash entry = (Hash)iter.next();
if (!_entries.contains(entry))
_entries.add(entry);
}
}
public void setEntries(Set<Hash> entries) {
_entries.clear();
_entries.addAll(entries);
}
/**
* Todo: shuffling here is a hack and doesn't work since
* wwe witched back to a HashSet implementation
*/
public int add(Hash peer) {
synchronized (_entries) {
if (!_entries.contains(peer))
_entries.add(peer);
_entries.add(peer);
/**********
// Randomize the bucket every once in a while if we are floodfill, so that
// exploration will return better results. See FloodfillPeerSelector.add(Hash).
if (_lastShuffle + SHUFFLE_DELAY < _context.clock().now() &&
@ -245,14 +243,12 @@ class KBucketImpl implements KBucket {
Collections.shuffle(_entries, _context.random());
_lastShuffle = _context.clock().now();
}
return _entries.size();
}
***********/
return _entries.size();
}
public boolean remove(Hash peer) {
synchronized (_entries) {
return _entries.remove(peer);
}
return _entries.remove(peer);
}
/**
@ -332,9 +328,7 @@ class KBucketImpl implements KBucket {
public String toString() {
StringBuilder buf = new StringBuilder(1024);
buf.append("KBucketImpl: ");
synchronized (_entries) {
buf.append(_entries.toString()).append("\n");
}
buf.append(_entries.toString()).append("\n");
buf.append("Low bit: ").append(_begin).append(" high bit: ").append(_end).append('\n');
buf.append("Local key: \n");
if ( (_local != null) && (_local.getData() != null) )

View File

@ -9,6 +9,7 @@ package net.i2p.router.networkdb.kademlia;
*/
import java.math.BigInteger;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
@ -88,9 +89,9 @@ class KBucketSet {
return removed;
}
public Set getAll() { return getAll(new HashSet()); }
public Set getAll(Set toIgnore) {
HashSet all = new HashSet(1024);
public Set<Hash> getAll() { return getAll(Collections.EMPTY_SET); };
public Set<Hash> getAll(Set<Hash> toIgnore) {
Set<Hash> all = new HashSet(1024);
for (int i = 0; i < _buckets.length; i++) {
all.addAll(_buckets[i].getEntries(toIgnore));
}

View File

@ -121,8 +121,9 @@ public class PeerSelector {
_matches = 0;
}
public void add(Hash entry) {
if (_context.profileOrganizer().isFailing(entry))
return;
// deadlock seen here, and we don't mark profiles failing anymore
//if (_context.profileOrganizer().isFailing(entry))
// return;
if (_toIgnore.contains(entry))
return;
RouterInfo info = _context.netDb().lookupRouterInfoLocally(entry);