propagate from branch 'i2p.i2p.zzz.netdbkad' (head bc7310e940f01e68bd6be0ed0681eb624dada332)

to branch 'i2p.i2p' (head 98569f30891693e6888913f50b88d1f37969fe45)
This commit is contained in:
zzz
2013-12-10 02:31:08 +00:00
30 changed files with 256 additions and 1011 deletions

View File

@ -21,6 +21,12 @@
*/
net.i2p.router.RouterContext ctx = (net.i2p.router.RouterContext) net.i2p.I2PAppContext.getGlobalContext();
/*
* Print out the status for the NetDB
*/
out.print("<h2>Router DHT</h2>");
ctx.netDb().renderStatusHTML(out);
/*
* Print out the status for the UpdateManager
*/

View File

@ -476,7 +476,7 @@
<group title="BOB Bridge" packages="net.i2p.BOB" />
<group title="BOB Demos" packages="net.i2p.BOB.Demos.echo.echoclient:net.i2p.BOB.Demos.echo.echoserver" />
<group title="Desktopgui Application" packages="net.i2p.desktopgui:net.i2p.desktopgui.*" />
<group title="I2PSnark Application" packages="org.klomp.snark:org.klomp.snark.*:net.i2p.kademlia" />
<group title="I2PSnark Application" packages="org.klomp.snark:org.klomp.snark.*" />
<group title="I2PTunnel Application" packages="net.i2p.i2ptunnel:net.i2p.i2ptunnel.*" />
<group title="Installer Utilities" packages="net.i2p.installer" />
<group title="Jetty Starter and Logging" packages="net.i2p.jetty" />

View File

@ -17,7 +17,7 @@ import net.i2p.data.SimpleDataStructure;
* a local key, using XOR as the distance metric
*
* Refactored from net.i2p.router.networkdb.kademlia
* @since 0.9.2
* @since 0.9.2 in i2psnark, moved to core in 0.9.10
*/
public interface KBucket<T extends SimpleDataStructure> {

View File

@ -41,7 +41,7 @@ import net.i2p.util.ConcurrentHashSet;
* removing entries, this KBucket will exceed the max size.
*
* Refactored from net.i2p.router.networkdb.kademlia
* @since 0.9.2
* @since 0.9.2 in i2psnark, moved to core in 0.9.10
*/
class KBucketImpl<T extends SimpleDataStructure> implements KBucket<T> {
/**

View File

@ -35,7 +35,7 @@ import net.i2p.util.Log;
* times 2**(B-1) for Kademlia value B.
*
* Refactored from net.i2p.router.networkdb.kademlia
* @since 0.9.2
* @since 0.9.2 in i2psnark, moved to core in 0.9.10
*/
public class KBucketSet<T extends SimpleDataStructure> {
private final Log _log;

View File

@ -4,7 +4,7 @@ import net.i2p.data.SimpleDataStructure;
/**
* Called when a kbucket can no longer be split and is too big
* @since 0.9.2
* @since 0.9.2 in i2psnark, moved to core in 0.9.10
*/
public interface KBucketTrimmer<K extends SimpleDataStructure> {
/**

View File

@ -5,7 +5,7 @@ import net.i2p.data.SimpleDataStructure;
/**
* Removes a random element, but only if the bucket hasn't changed in 5 minutes.
* @since 0.9.2
* @since 0.9.2 in i2psnark, moved to core in 0.9.10
*/
public class RandomIfOldTrimmer<T extends SimpleDataStructure> extends RandomTrimmer<T> {

View File

@ -8,7 +8,7 @@ import net.i2p.data.SimpleDataStructure;
/**
* Removes a random element. Not resistant to flooding.
* @since 0.9.2
* @since 0.9.2 in i2psnark, moved to core in 0.9.10
*/
public class RandomTrimmer<T extends SimpleDataStructure> implements KBucketTrimmer<T> {
protected final I2PAppContext _ctx;
@ -26,6 +26,7 @@ public class RandomTrimmer<T extends SimpleDataStructure> implements KBucketTrim
if (sz < _max)
return true;
T toRemove = e.get(_ctx.random().nextInt(sz));
return kbucket.remove(toRemove);
kbucket.remove(toRemove);
return true;
}
}

View File

@ -4,7 +4,7 @@ import net.i2p.data.SimpleDataStructure;
/**
* Removes nothing and always rejects the add. Flood resistant..
* @since 0.9.2
* @since 0.9.2 in i2psnark, moved to core in 0.9.10
*/
public class RejectTrimmer<T extends SimpleDataStructure> implements KBucketTrimmer<T> {
public boolean trim(KBucket<T> kbucket, T toAdd) {

View File

@ -4,7 +4,7 @@ import net.i2p.data.SimpleDataStructure;
/**
* Visit kbuckets, gathering matches
* @since 0.9.2
* @since 0.9.2 in i2psnark, moved to core in 0.9.10
*/
public interface SelectionCollector<T extends SimpleDataStructure> {
public void add(T entry);

View File

@ -7,9 +7,9 @@ import net.i2p.data.SimpleDataStructure;
/**
* Help sort Hashes in relation to a base key using the XOR metric
*
* @since 0.9.2
* @since 0.9.2 in i2psnark, moved to core in 0.9.10
*/
class XORComparator<T extends SimpleDataStructure> implements Comparator<T> {
public class XORComparator<T extends SimpleDataStructure> implements Comparator<T> {
private final byte[] _base;
/**

View File

@ -1,6 +1,6 @@
<html><body><p>
This is a major rewrite of KBucket, KBucketSet, and KBucketImpl from net.i2p.router.networkdb.kademlia.
The classes are now generic to support SHA1. SHA256, or other key lengths.
The long-term goal is to prove out this new implementation in i2psnark,
then move it to core, then convert the network database to use it.
Packaged in i2psnark since 0.9.2, and moved to core in 0.9.10
so the network database can use it.
</p></body></html>

View File

@ -0,0 +1,161 @@
package net.i2p.kademlia;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
import java.util.List;
import junit.framework.TestCase;
import net.i2p.I2PAppContext;
import net.i2p.data.Hash;
import net.i2p.util.Log;
/**
* Test KBucketSet.
* Newer tests ported from KBSTest in i2p.zzz.kademlia branch
*
* @author comwiz
* @since 0.9.10 moved from net.i2p.router.networkdb.kademlia
*/
public class KBucketSetTest extends TestCase{
private I2PAppContext context;
private KBucketSet<Hash> set;
private Hash usHash;
private Log log;
private static final int K = 8;
private static final int B = 1;
public void setUp(){
context = I2PAppContext.getGlobalContext();
log = context.logManager().getLog(KBucketSet.class);
byte[] us = new byte[Hash.HASH_LENGTH];
context.random().nextBytes(us);
usHash = new Hash(us);
// We use the default RandomTrimmer so add() will never fail
set = new KBucketSet<Hash>(context, usHash, K, B);
// tests may be run in any order so prime it
addRandom(1000);
}
public void testRandom(){
addRandom(1000);
}
private void addRandom(int count) {
for (int i = 0; i < count; i++) {
byte val[] = new byte[Hash.HASH_LENGTH];
context.random().nextBytes(val);
Hash h = new Hash(val);
// in the highly unlikely chance we randomly generate a hash equal to us
assertTrue(set.add(h) || h.equals(usHash));
}
}
public void testSelf() {
// new implementation will never include myself
assertFalse(set.add(usHash));
}
/** @since 0.9.10 */
public void testConcurrent() {
int count = 2500;
int n = 4;
Thread[] threads = new Thread[n];
for (int i = 0; i < n; i++) {
threads[i] = new RTester(count);
}
for (int i = 0; i < n; i++) {
threads[i].start();
}
for (int i = 0; i < n; i++) {
try {
threads[i].join();
} catch (InterruptedException ie) {}
}
}
/** @since 0.9.10 */
private class RTester extends Thread {
private final int _count;
public RTester(int count) {
_count = count;
}
public void run() {
addRandom(_count);
}
}
/** @since 0.9.10 */
public void testAudit() {
int errors = 0;
for (KBucket<Hash> b : set.getBuckets()) {
for (Hash sds : b.getEntries()) {
int range = set.getRange(sds);
if (range < b.getRangeBegin() || range > b.getRangeEnd()) {
log.error("Hash " + sds + " with range " + range +
" does not belong in " + b);
errors++;
}
}
}
assertTrue(errors == 0);
}
/** @since 0.9.10 */
public void testOrder() {
int bits = Hash.HASH_LENGTH * 8;
int errors = 0;
int lastEnd = -1;
for (KBucket<Hash> b : set.getBuckets()) {
int beg = b.getRangeBegin();
if (beg != lastEnd + 1) {
log.error("Out of order: " + b);
errors++;
}
lastEnd = b.getRangeEnd();
}
if (lastEnd != (bits * (1 << (B-1))) - 1) {
log.error("Out of order: last=" + lastEnd);
errors++;
}
assertTrue(errors == 0);
}
/** @since 0.9.10 */
public void testGenRandom() {
int errors = 0;
for (KBucket b : set.getBuckets()) {
for (int j = 0; j < 4000; j++) {
Hash rand = set.generateRandomKey(b);
int range = set.getRange(rand);
if (range < b.getRangeBegin() || range > b.getRangeEnd()) {
log.error("Generate random key failed range=" + range + " for " + rand + " meant for bucket " + b);
errors++;
}
}
}
assertTrue(errors == 0);
}
/** @since 0.9.10 */
public void testExplore() {
List<Hash> keys = set.getExploreKeys(-1000);
assertTrue(keys.size() > 0);
}
/** @since 0.9.10 */
public void testClosest() {
byte val[] = new byte[Hash.HASH_LENGTH];
for (int i = 0; i < 23; i++) {
context.random().nextBytes(val);
Hash h = new Hash(val);
List<Hash> c = set.getClosest(h, i);
assertTrue(c.size() == i);
}
}
}

View File

@ -74,7 +74,8 @@ public abstract class NetworkDatabaseFacade implements Service {
public int getKnownLeaseSets() { return 0; }
public boolean isInitialized() { return true; }
public void rescan() {}
/** @deprecated moved to router console */
/** Debug only - all user info moved to NetDbRenderer in router console */
public void renderStatusHTML(Writer out) throws IOException {}
/** public for NetDbRenderer in routerconsole */
public Set<LeaseSet> getLeases() { return Collections.emptySet(); }

View File

@ -15,6 +15,7 @@ import java.util.Set;
import net.i2p.data.Hash;
import net.i2p.data.TunnelId;
import net.i2p.data.i2np.DatabaseLookupMessage;
import net.i2p.kademlia.KBucketSet;
import net.i2p.router.RouterContext;
import net.i2p.util.Log;
@ -97,7 +98,7 @@ class ExploreJob extends SearchJob {
available--;
}
KBucketSet ks = _facade.getKBuckets();
KBucketSet<Hash> ks = _facade.getKBuckets();
Hash rkey = getContext().routingKeyGenerator().getRoutingKey(getState().getTarget());
// in a few releases, we can (and should) remove this,
// as routers will honor the above flag, and we want the table to include

View File

@ -8,10 +8,13 @@ package net.i2p.router.networkdb.kademlia;
*
*/
import java.util.Collection;
import java.util.HashSet;
import java.util.Set;
import net.i2p.data.Hash;
import net.i2p.kademlia.KBucket;
import net.i2p.kademlia.KBucketSet;
import net.i2p.router.JobImpl;
import net.i2p.router.RouterContext;
import net.i2p.util.Log;
@ -28,6 +31,7 @@ class ExploreKeySelectorJob extends JobImpl {
private KademliaNetworkDatabaseFacade _facade;
private final static long RERUN_DELAY_MS = 60*1000;
private final static long OLD_BUCKET_TIME = 15*60*1000;
public ExploreKeySelectorJob(RouterContext context, KademliaNetworkDatabaseFacade facade) {
super(context);
@ -41,7 +45,7 @@ class ExploreKeySelectorJob extends JobImpl {
requeue(30*RERUN_DELAY_MS);
return;
}
Set<Hash> toExplore = selectKeysToExplore();
Collection<Hash> toExplore = selectKeysToExplore();
_log.info("Filling the explorer pool with: " + toExplore);
if (toExplore != null)
_facade.queueForExploration(toExplore);
@ -53,32 +57,11 @@ class ExploreKeySelectorJob extends JobImpl {
* for it, with a maximum number of keys limited by the exploration pool size
*
*/
private Set<Hash> selectKeysToExplore() {
private Collection<Hash> selectKeysToExplore() {
Set<Hash> alreadyQueued = _facade.getExploreKeys();
if (alreadyQueued.size() > KBucketSet.NUM_BUCKETS) return null;
Set<Hash> toExplore = new HashSet<Hash>(KBucketSet.NUM_BUCKETS - alreadyQueued.size());
for (int i = 0; i < KBucketSet.NUM_BUCKETS; i++) {
KBucket bucket = _facade.getKBuckets().getBucket(i);
if (bucket.getKeyCount() < KBucketSet.BUCKET_SIZE) {
boolean already = false;
for (Hash key : alreadyQueued) {
if (bucket.shouldContain(key)) {
already = true;
_log.debug("Bucket " + i + " is already queued for exploration \t" + key);
break;
}
}
if (!already) {
// no keys are queued for exploring this still-too-small bucket yet
Hash key = bucket.generateRandomKey();
_log.debug("Bucket " + i + " is NOT queued for exploration, and it only has " + bucket.getKeyCount() + " keys, so explore with \t" + key);
toExplore.add(key);
}
} else {
_log.debug("Bucket " + i + " already has enough keys (" + bucket.getKeyCount() + "), no need to explore further");
}
}
return toExplore;
if (alreadyQueued.size() > KademliaNetworkDatabaseFacade.MAX_EXPLORE_QUEUE)
return null;
return _facade.getKBuckets().getExploreKeys(OLD_BUCKET_TIME);
}
}

View File

@ -7,6 +7,7 @@ import java.util.List;
import net.i2p.data.Hash;
import net.i2p.data.i2np.DatabaseLookupMessage;
import net.i2p.kademlia.KBucketSet;
import net.i2p.router.Job;
import net.i2p.router.MessageSelector;
import net.i2p.router.OutNetMessage;
@ -69,7 +70,7 @@ class FloodOnlySearchJob extends FloodSearchJob {
//List<Hash> floodfillPeers = _facade.getFloodfillPeers();
// new
List<Hash> floodfillPeers;
KBucketSet ks = _facade.getKBuckets();
KBucketSet<Hash> ks = _facade.getKBuckets();
if (ks != null) {
Hash rkey = getContext().routingKeyGenerator().getRoutingKey(_key);
// Ideally we would add the key to an exclude list, so we don't try to query a ff peer for itself,

View File

@ -20,6 +20,9 @@ import java.util.TreeSet;
import net.i2p.data.Hash;
import net.i2p.data.RouterAddress;
import net.i2p.data.RouterInfo;
import net.i2p.kademlia.KBucketSet;
import net.i2p.kademlia.SelectionCollector;
import net.i2p.kademlia.XORComparator;
import net.i2p.router.RouterContext;
import net.i2p.router.peermanager.PeerProfile;
import net.i2p.router.util.RandomIterator;
@ -53,7 +56,7 @@ class FloodfillPeerSelector extends PeerSelector {
* @return List of Hash for the peers selected
*/
@Override
List<Hash> selectMostReliablePeers(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets) {
List<Hash> selectMostReliablePeers(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet<Hash> kbuckets) {
return selectNearestExplicitThin(key, maxNumRouters, peersToIgnore, kbuckets, true);
}
@ -68,7 +71,7 @@ class FloodfillPeerSelector extends PeerSelector {
* @return List of Hash for the peers selected
*/
@Override
List<Hash> selectNearestExplicitThin(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets) {
List<Hash> selectNearestExplicitThin(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet<Hash> kbuckets) {
return selectNearestExplicitThin(key, maxNumRouters, peersToIgnore, kbuckets, false);
}
@ -81,7 +84,7 @@ class FloodfillPeerSelector extends PeerSelector {
* @param peersToIgnore can be null
* @return List of Hash for the peers selected
*/
List<Hash> selectNearestExplicitThin(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets, boolean preferConnected) {
List<Hash> selectNearestExplicitThin(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet<Hash> kbuckets, boolean preferConnected) {
if (peersToIgnore == null)
peersToIgnore = Collections.singleton(_context.routerHash());
else
@ -104,7 +107,7 @@ class FloodfillPeerSelector extends PeerSelector {
* List will not include our own hash.
* List is not sorted and not shuffled.
*/
List<Hash> selectFloodfillParticipants(KBucketSet kbuckets) {
List<Hash> selectFloodfillParticipants(KBucketSet<Hash> kbuckets) {
Set<Hash> ignore = Collections.singleton(_context.routerHash());
return selectFloodfillParticipants(ignore, kbuckets);
}
@ -116,7 +119,7 @@ class FloodfillPeerSelector extends PeerSelector {
* List MAY INCLUDE our own hash.
* List is not sorted and not shuffled.
*/
private List<Hash> selectFloodfillParticipants(Set<Hash> toIgnore, KBucketSet kbuckets) {
private List<Hash> selectFloodfillParticipants(Set<Hash> toIgnore, KBucketSet<Hash> kbuckets) {
/*****
if (kbuckets == null) return Collections.EMPTY_LIST;
// TODO this is very slow - use profile getPeersByCapability('f') instead
@ -155,7 +158,7 @@ class FloodfillPeerSelector extends PeerSelector {
* success newer than failure
* Group 3: All others
*/
List<Hash> selectFloodfillParticipants(Hash key, int maxNumRouters, KBucketSet kbuckets) {
List<Hash> selectFloodfillParticipants(Hash key, int maxNumRouters, KBucketSet<Hash> kbuckets) {
Set<Hash> ignore = Collections.singleton(_context.routerHash());
return selectFloodfillParticipants(key, maxNumRouters, ignore, kbuckets);
}
@ -175,7 +178,7 @@ class FloodfillPeerSelector extends PeerSelector {
* @param toIgnore can be null
* @param kbuckets now unused
*/
List<Hash> selectFloodfillParticipants(Hash key, int howMany, Set<Hash> toIgnore, KBucketSet kbuckets) {
List<Hash> selectFloodfillParticipants(Hash key, int howMany, Set<Hash> toIgnore, KBucketSet<Hash> kbuckets) {
if (toIgnore == null) {
toIgnore = Collections.singleton(_context.routerHash());
} else if (!toIgnore.contains(_context.routerHash())) {
@ -193,9 +196,9 @@ class FloodfillPeerSelector extends PeerSelector {
* @param toIgnore can be null
* @param kbuckets now unused
*/
private List<Hash> selectFloodfillParticipantsIncludingUs(Hash key, int howMany, Set<Hash> toIgnore, KBucketSet kbuckets) {
private List<Hash> selectFloodfillParticipantsIncludingUs(Hash key, int howMany, Set<Hash> toIgnore, KBucketSet<Hash> kbuckets) {
List<Hash> ffs = selectFloodfillParticipants(toIgnore, kbuckets);
TreeSet<Hash> sorted = new TreeSet<Hash>(new XORComparator(key));
TreeSet<Hash> sorted = new TreeSet<Hash>(new XORComparator<Hash>(key));
sorted.addAll(ffs);
List<Hash> rv = new ArrayList<Hash>(howMany);
@ -339,7 +342,7 @@ class FloodfillPeerSelector extends PeerSelector {
return Integer.valueOf(rv);
}
private class FloodfillSelectionCollector implements SelectionCollector {
private class FloodfillSelectionCollector implements SelectionCollector<Hash> {
private final TreeSet<Hash> _sorted;
private final List<Hash> _floodfillMatches;
private final Hash _key;
@ -354,7 +357,7 @@ class FloodfillPeerSelector extends PeerSelector {
*/
public FloodfillSelectionCollector(Hash key, Set<Hash> toIgnore, int wanted) {
_key = key;
_sorted = new TreeSet<Hash>(new XORComparator(key));
_sorted = new TreeSet<Hash>(new XORComparator<Hash>(key));
_floodfillMatches = new ArrayList<Hash>(8);
_toIgnore = toIgnore;
_wanted = wanted;
@ -475,7 +478,7 @@ class FloodfillPeerSelector extends PeerSelector {
* @return List of Hash for the peers selected, ordered
*/
@Override
List<Hash> selectNearest(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets) {
List<Hash> selectNearest(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet<Hash> kbuckets) {
Hash rkey = _context.routingKeyGenerator().getRoutingKey(key);
if (peersToIgnore != null && peersToIgnore.contains(Hash.FAKE_HASH)) {
// return non-ff

View File

@ -16,6 +16,8 @@ import net.i2p.data.Hash;
import net.i2p.data.RouterInfo;
import net.i2p.data.i2np.DatabaseLookupMessage;
import net.i2p.data.i2np.I2NPMessage;
import net.i2p.kademlia.KBucketSet;
import net.i2p.kademlia.XORComparator;
import net.i2p.router.CommSystemFacade;
import net.i2p.router.Job;
import net.i2p.router.MessageSelector;
@ -93,7 +95,7 @@ class IterativeSearchJob extends FloodSearchJob {
_timeoutMs = Math.min(timeoutMs, MAX_SEARCH_TIME);
_expiration = _timeoutMs + ctx.clock().now();
_rkey = ctx.routingKeyGenerator().getRoutingKey(key);
_toTry = new TreeSet<Hash>(new XORComparator(_rkey));
_toTry = new TreeSet<Hash>(new XORComparator<Hash>(_rkey));
_unheardFrom = new HashSet<Hash>(CONCURRENT_SEARCHES);
_failedPeers = new HashSet<Hash>(TOTAL_SEARCH_LIMIT);
_sentTime = new ConcurrentHashMap<Hash, Long>(TOTAL_SEARCH_LIMIT);
@ -109,7 +111,7 @@ class IterativeSearchJob extends FloodSearchJob {
}
// pick some floodfill peers and send out the searches
List<Hash> floodfillPeers;
KBucketSet ks = _facade.getKBuckets();
KBucketSet<Hash> ks = _facade.getKBuckets();
if (ks != null) {
// Ideally we would add the key to an exclude list, so we don't try to query a ff peer for itself,
// but we're passing the rkey not the key, so we do it below instead in certain cases.

View File

@ -1,83 +0,0 @@
package net.i2p.router.networkdb.kademlia;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
import java.util.Set;
import net.i2p.data.Hash;
/**
* Group, without inherent ordering, a set of keys a certain distance away from
* a local key, using XOR as the distance metric
*
*/
interface KBucket {
/**
* lowest order high bit for difference keys
*/
public int getRangeBegin();
/**
* highest high bit for the difference keys
*
*/
public int getRangeEnd();
/**
* Set the range low and high bits for difference keys
*/
public void setRange(int lowOrderBitLimit, int highOrderBitLimit);
/**
* Number of keys already contained in this kbuckey
*/
public int getKeyCount();
/**
* whether or not the key qualifies as part of this bucket
*
*/
public boolean shouldContain(Hash key);
/**
* Add the peer to the bucket
*
* @return number of keys in the bucket after the addition
*/
public int add(Hash key);
/**
* Remove the key from the bucket
* @return true if the key existed in the bucket before removing it, else false
*/
public boolean remove(Hash key);
/**
* Retrieve all routing table entries stored in the bucket
* @return set of Hash structures
*/
public Set<Hash> getEntries();
/**
* Retrieve hashes stored in the bucket, excluding the ones specified
* @return set of Hash structures
* @deprecated makes a copy, remove toIgnore in KBS instead
*/
public Set<Hash> getEntries(Set<Hash> toIgnoreHashes);
public void getEntries(SelectionCollector collector);
/**
* Fill the bucket with entries
* @param entries set of Hash structures
*/
public void setEntries(Set<Hash> entries);
/**
* Generate a random key that would go inside this bucket
*
*/
public Hash generateRandomKey();
public LocalHash getLocal();
}

View File

@ -1,474 +0,0 @@
package net.i2p.router.networkdb.kademlia;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
import java.math.BigInteger;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
import net.i2p.I2PAppContext;
import net.i2p.data.DataHelper;
import net.i2p.data.Hash;
import net.i2p.util.ConcurrentHashSet;
import net.i2p.util.Log;
import net.i2p.util.RandomSource;
class KBucketImpl implements KBucket {
private Log _log;
/**
* set of Hash objects for the peers in the kbucketx
*
* jrandom switched from a HashSet to an ArrayList with this change:
* 2005-08-27 jrandom
* * Minor logging and optimization tweaks in the router and SDK
*
* Now we switch back to a ConcurrentHashSet and remove all the
* synchronization, which may or may not be faster than
* a synchronized ArrayList, with checks for existence before
* adding a Hash. But the other benefit is it removes one
* cause of profileMangager/netDb deadlock.
*/
private final Set<Hash> _entries;
/** we center the kbucket set on the given hash, and derive distances from this */
private LocalHash _local;
/** include if any bits equal or higher to this bit (in big endian order) */
private int _begin;
/** include if no bits higher than this bit (inclusive) are set */
private int _end;
/** when did we last shake things up */
private long _lastShuffle;
private I2PAppContext _context;
public KBucketImpl(I2PAppContext context, LocalHash local) {
_context = context;
_log = context.logManager().getLog(KBucketImpl.class);
_entries = new ConcurrentHashSet<Hash>(2); //all but the last 1 or 2 buckets will be empty
_lastShuffle = context.clock().now();
setLocal(local);
}
/** for testing - use above constructor for production to get common caching */
public KBucketImpl(I2PAppContext context, Hash local) {
this(context, new LocalHash(local));
}
public int getRangeBegin() { return _begin; }
public int getRangeEnd() { return _end; }
public void setRange(int lowOrderBitLimit, int highOrderBitLimit) {
_begin = lowOrderBitLimit;
_end = highOrderBitLimit;
}
public int getKeyCount() {
return _entries.size();
}
public LocalHash getLocal() { return _local; }
private void setLocal(LocalHash local) {
_local = local;
// we want to make sure we've got the cache in place before calling cachedXor
_local.prepareCache();
if (_log.shouldLog(Log.DEBUG))
_log.debug("Local hash reset to " + DataHelper.toHexString(local.getData()));
}
private byte[] distanceFromLocal(Hash key) {
if (key == null)
throw new IllegalArgumentException("Null key for distanceFromLocal?");
return _local.cachedXor(key);
}
public boolean shouldContain(Hash key) {
byte distance[] = distanceFromLocal(key);
// rather than use a BigInteger and compare, we do it manually by
// checking the bits
boolean tooLarge = distanceIsTooLarge(distance);
if (tooLarge) {
if (false && _log.shouldLog(Log.DEBUG))
_log.debug("too large [" + _begin + "-->" + _end + "] "
+ "\nLow: " + BigInteger.ZERO.setBit(_begin).toString(16)
+ "\nCur: " + DataHelper.toHexString(distance)
+ "\nHigh: " + BigInteger.ZERO.setBit(_end).toString(16));
return false;
}
boolean tooSmall = distanceIsTooSmall(distance);
if (tooSmall) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("too small [" + _begin + "-->" + _end + "] distance: " + DataHelper.toHexString(distance));
return false;
}
// this bed is juuuuust right
return true;
/*
// woohah, incredibly excessive object creation! whee!
BigInteger kv = new BigInteger(1, distanceFromLocal(key));
int lowComp = kv.compareTo(_lowerBounds);
int highComp = kv.compareTo(_upperBounds);
//_log.debug("kv.compareTo(low) = " + lowComp + " kv.compareTo(high) " + highComp);
if ( (lowComp >= 0) && (highComp < 0) ) return true;
return false;
*/
}
private final boolean distanceIsTooLarge(byte distance[]) {
int upperLimitBit = Hash.HASH_LENGTH*8 - _end;
// It is too large if there are any bits set before the upperLimitBit
int upperLimitByte = upperLimitBit > 0 ? upperLimitBit / 8 : 0;
if (upperLimitBit <= 0)
return false;
for (int i = 0; i < distance.length; i++) {
if (i < upperLimitByte) {
if (distance[i] != 0x00) {
// outright too large
return true;
}
} else if (i == upperLimitByte) {
if (distance[i] == 0x00) {
// no bits set through the high bit
return false;
} else {
int upperVal = 1 << (upperLimitBit % 8);
if (distance[i] > upperVal) {
// still too large, but close
return true;
} else if (distance[i] == upperVal) {
// ok, it *may* equal the upper limit,
// if the rest of the bytes are 0
for (int j = i+1; j < distance.length; j++) {
if (distance[j] != 0x00) {
// nope
return true;
}
}
// w00t, the rest is made of 0x00 bytes, so it
// exactly matches the upper limit. kooky, very improbable,
// but possible
return false;
}
}
} else if (i > upperLimitByte) {
// no bits set before or at the upper limit, so its
// definitely not too large
return false;
}
}
_log.log(Log.CRIT, "wtf, gravity broke: distance=" + DataHelper.toHexString(distance)
+ ", end=" + _end, new Exception("moo"));
return true;
}
/**
* Is the distance too small?
*
*/
private final boolean distanceIsTooSmall(byte distance[]) {
int beginBit = Hash.HASH_LENGTH*8 - _begin;
// It is too small if there are no bits set before the beginBit
int beginByte = beginBit > 0 ? beginBit / 8 : 0;
if (beginByte >= distance.length) {
if (_begin == 0)
return false;
else
return true;
}
for (int i = 0; i < distance.length; i++) {
if ( (i < beginByte) && (distance[i] != 0x00) ) {
return false;
} else {
if (i != beginByte) {
// zero value and too early... keep going
continue;
} else {
int beginVal = 1 << (_begin % 8);
if (distance[i] >= beginVal) {
return false;
} else {
// no bits set prior to the beginVal
return true;
}
}
}
}
_log.log(Log.CRIT, "wtf, gravity broke! distance=" + DataHelper.toHexString(distance)
+ " begin=" + _begin
+ " beginBit=" + beginBit
+ " beginByte=" + beginByte, new Exception("moo"));
return true;
}
/**
* @return unmodifiable view
*/
public Set<Hash> getEntries() {
return Collections.unmodifiableSet(_entries);
}
/**
* @deprecated makes a copy, remove toIgnore in KBS instead
*/
public Set<Hash> getEntries(Set<Hash> toIgnoreHashes) {
Set<Hash> entries = new HashSet<Hash>(_entries);
entries.removeAll(toIgnoreHashes);
return entries;
}
public void getEntries(SelectionCollector collector) {
for (Hash h : _entries) {
collector.add(h);
}
}
public void setEntries(Set<Hash> entries) {
_entries.clear();
_entries.addAll(entries);
}
/**
* Todo: shuffling here is a hack and doesn't work since
* we switched back to a HashSet implementation
*/
public int add(Hash peer) {
_entries.add(peer);
/**********
// Randomize the bucket every once in a while if we are floodfill, so that
// exploration will return better results. See FloodfillPeerSelector.add(Hash).
if (_lastShuffle + SHUFFLE_DELAY < _context.clock().now() &&
!SearchJob.onlyQueryFloodfillPeers((RouterContext)_context)) {
Collections.shuffle(_entries, _context.random());
_lastShuffle = _context.clock().now();
}
***********/
return _entries.size();
}
public boolean remove(Hash peer) {
return _entries.remove(peer);
}
/**
* Generate a random key to go within this bucket
*
* WARNING - Something is seriously broken here. testRand2() fails right away.
* ExploreKeySelectorJob is now disabled, ExploreJob just searches for a random
* key instead.
*/
public Hash generateRandomKey() {
BigInteger variance = new BigInteger((_end-_begin)-1, _context.random());
variance = variance.setBit(_begin);
//_log.debug("Random variance for " + _size + " bits: " + variance);
byte data[] = variance.toByteArray();
byte hash[] = new byte[Hash.HASH_LENGTH];
if (data.length <= Hash.HASH_LENGTH) {
System.arraycopy(data, 0, hash, hash.length - data.length, data.length);
} else {
System.arraycopy(data, data.length - hash.length, hash, 0, hash.length);
}
Hash key = new Hash(hash);
data = distanceFromLocal(key);
hash = new byte[Hash.HASH_LENGTH];
if (data.length <= Hash.HASH_LENGTH) {
System.arraycopy(data, 0, hash, hash.length - data.length, data.length);
} else {
System.arraycopy(data, data.length - hash.length, hash, 0, hash.length);
}
key = new Hash(hash);
return key;
}
public Hash getRangeBeginKey() {
BigInteger lowerBounds = getLowerBounds();
if ( (_local != null) && (_local.getData() != null) ) {
lowerBounds = lowerBounds.xor(new BigInteger(1, _local.getData()));
}
byte data[] = lowerBounds.toByteArray();
byte hash[] = new byte[Hash.HASH_LENGTH];
if (data.length <= Hash.HASH_LENGTH) {
System.arraycopy(data, 0, hash, hash.length - data.length, data.length);
} else {
System.arraycopy(data, data.length - hash.length, hash, 0, hash.length);
}
Hash key = new Hash(hash);
return key;
}
public Hash getRangeEndKey() {
BigInteger upperBounds = getUpperBounds();
if ( (_local != null) && (_local.getData() != null) ) {
upperBounds = upperBounds.xor(new BigInteger(1, _local.getData()));
}
byte data[] = upperBounds.toByteArray();
byte hash[] = new byte[Hash.HASH_LENGTH];
if (data.length <= Hash.HASH_LENGTH) {
System.arraycopy(data, 0, hash, hash.length - data.length, data.length);
} else {
System.arraycopy(data, data.length - hash.length, hash, 0, hash.length);
}
Hash key = new Hash(hash);
return key;
}
private BigInteger getUpperBounds() {
return BigInteger.ZERO.setBit(_end);
}
private BigInteger getLowerBounds() {
if (_begin == 0)
return BigInteger.ZERO;
else
return BigInteger.ZERO.setBit(_begin);
}
@Override
public String toString() {
StringBuilder buf = new StringBuilder(1024);
buf.append("KBucketImpl: ");
buf.append(_entries.toString()).append("\n");
buf.append("Low bit: ").append(_begin).append(" high bit: ").append(_end).append('\n');
buf.append("Local key: \n");
if ( (_local != null) && (_local.getData() != null) )
buf.append(toString(_local.getData())).append('\n');
else
buf.append("[undefined]\n");
buf.append("Low and high keys:\n");
buf.append(toString(getRangeBeginKey().getData())).append('\n');
buf.append(toString(getRangeEndKey().getData())).append('\n');
buf.append("Low and high deltas:\n");
buf.append(getLowerBounds().toString(2)).append('\n');
buf.append(getUpperBounds().toString(2)).append('\n');
return buf.toString();
}
/**
* Test harness to make sure its assigning keys to the right buckets
*
* WARNING - Something is seriously broken here. testRand2() fails right away.
*/
public static void main(String args[]) {
testRand2();
testRand();
testLimits();
try { Thread.sleep(10000); } catch (InterruptedException ie) {}
}
private static void testLimits() {
int low = 1;
int high = 3;
Log log = I2PAppContext.getGlobalContext().logManager().getLog(KBucketImpl.class);
KBucketImpl bucket = new KBucketImpl(I2PAppContext.getGlobalContext(), Hash.FAKE_HASH);
bucket.setRange(low, high);
Hash lowerBoundKey = bucket.getRangeBeginKey();
Hash upperBoundKey = bucket.getRangeEndKey();
boolean okLow = bucket.shouldContain(lowerBoundKey);
boolean okHigh = bucket.shouldContain(upperBoundKey);
if (okLow && okHigh)
log.debug("Limit test ok");
else
log.error("Limit test failed! ok low? " + okLow + " ok high? " + okHigh);
}
private static void testRand() {
//StringBuilder buf = new StringBuilder(2048);
int low = 1;
int high = 3;
Log log = I2PAppContext.getGlobalContext().logManager().getLog(KBucketImpl.class);
LocalHash local = new LocalHash(Hash.FAKE_HASH);
local.prepareCache();
KBucketImpl bucket = new KBucketImpl(I2PAppContext.getGlobalContext(), local);
bucket.setRange(low, high);
//Hash lowerBoundKey = bucket.getRangeBeginKey();
//Hash upperBoundKey = bucket.getRangeEndKey();
for (int i = 0; i < 100000; i++) {
Hash rnd = bucket.generateRandomKey();
//buf.append(toString(rnd.getData())).append('\n');
boolean ok = bucket.shouldContain(rnd);
if (!ok) {
//byte diff[] = bucket.getLocal().cachedXor(rnd);
//BigInteger dv = new BigInteger(1, diff);
//log.error("WTF! bucket doesn't want: \n" + toString(rnd.getData())
// + "\nDelta: \n" + toString(diff) + "\nDelta val: \n" + dv.toString(2)
// + "\nBucket: \n"+bucket, new Exception("WTF"));
log.error("wtf, bucket doesnt want a key that it generated. i == " + i);
log.error("\nLow: " + DataHelper.toHexString(bucket.getRangeBeginKey().getData())
+ "\nVal: " + DataHelper.toHexString(rnd.getData())
+ "\nHigh:" + DataHelper.toHexString(bucket.getRangeEndKey().getData()));
try { Thread.sleep(1000); } catch (InterruptedException e) {}
System.exit(0);
} else {
//_log.debug("Ok, bucket wants: \n" + toString(rnd.getData()));
}
//_log.info("Low/High:\n" + toString(lowBounds.toByteArray()) + "\n" + toString(highBounds.toByteArray()));
}
log.info("Passed 100,000 random key generations against the null hash");
}
private static void testRand2() {
Log log = I2PAppContext.getGlobalContext().logManager().getLog(KBucketImpl.class);
int low = 1;
int high = 200;
byte hash[] = new byte[Hash.HASH_LENGTH];
RandomSource.getInstance().nextBytes(hash);
LocalHash local = new LocalHash(hash);
local.prepareCache();
KBucketImpl bucket = new KBucketImpl(I2PAppContext.getGlobalContext(), local);
bucket.setRange(low, high);
//Hash lowerBoundKey = bucket.getRangeBeginKey();
//Hash upperBoundKey = bucket.getRangeEndKey();
for (int i = 0; i < 100000; i++) {
Hash rnd = bucket.generateRandomKey();
//buf.append(toString(rnd.getData())).append('\n');
boolean ok = bucket.shouldContain(rnd);
if (!ok) {
//byte diff[] = bucket.getLocal().cachedXor(rnd);
//BigInteger dv = new BigInteger(1, diff);
//log.error("WTF! bucket doesn't want: \n" + toString(rnd.getData())
// + "\nDelta: \n" + toString(diff) + "\nDelta val: \n" + dv.toString(2)
// + "\nBucket: \n"+bucket, new Exception("WTF"));
log.error("wtf, bucket doesnt want a key that it generated. i == " + i);
log.error("\nLow: " + DataHelper.toHexString(bucket.getRangeBeginKey().getData())
+ "\nVal: " + DataHelper.toHexString(rnd.getData())
+ "\nHigh:" + DataHelper.toHexString(bucket.getRangeEndKey().getData()));
try { Thread.sleep(1000); } catch (InterruptedException e) {}
System.exit(0);
} else {
//_log.debug("Ok, bucket wants: \n" + toString(rnd.getData()));
}
}
log.info("Passed 100,000 random key generations against a random hash");
}
private final static String toString(byte b[]) {
if (true) return DataHelper.toHexString(b);
StringBuilder buf = new StringBuilder(b.length);
for (int i = 0; i < b.length; i++) {
buf.append(toString(b[i]));
buf.append(" ");
}
return buf.toString();
}
private final static String toString(byte b) {
StringBuilder buf = new StringBuilder(8);
for (int i = 7; i >= 0; i--) {
boolean bb = (0 != (b & (1<<i)));
if (bb)
buf.append("1");
else
buf.append("0");
}
return buf.toString();
}
}

View File

@ -1,219 +0,0 @@
package net.i2p.router.networkdb.kademlia;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
import java.math.BigInteger;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger;
import net.i2p.I2PAppContext;
import net.i2p.data.DataHelper;
import net.i2p.data.Hash;
import net.i2p.util.Log;
/**
* In memory storage of buckets sorted by the XOR metric from the local router's
* identity, with bucket N containing routers BASE^N through BASE^N+1 away, up through
* 2^256 bits away (since we use SHA256).
*
*/
class KBucketSet {
private final Log _log;
private final I2PAppContext _context;
private final LocalHash _us;
private final KBucket _buckets[];
private final AtomicInteger _size = new AtomicInteger();
public final static int BASE = 8; // must go into KEYSIZE_BITS evenly
public final static int KEYSIZE_BITS = Hash.HASH_LENGTH * 8;
public final static int NUM_BUCKETS = KEYSIZE_BITS/BASE;
private final static BigInteger BASE_I = new BigInteger(""+(1<<BASE));
public final static int BUCKET_SIZE = 500; // # values at which we start periodic trimming (500 ~= 250Kb)
public KBucketSet(I2PAppContext context, Hash us) {
_us = new LocalHash(us);
_context = context;
_log = context.logManager().getLog(KBucketSet.class);
_buckets = createBuckets();
context.statManager().createRateStat("netDb.KBSGetAllTime", "Time to add all Hashes to the Collector", "NetworkDatabase", new long[] { 60*60*1000 });
}
/**
* Return true if the peer is new to the bucket it goes in, or false if it was
* already in it
*/
public boolean add(Hash peer) {
int bucket = pickBucket(peer);
if (bucket >= 0) {
int oldSize = _buckets[bucket].getKeyCount();
int numInBucket = _buckets[bucket].add(peer);
if (numInBucket != oldSize)
_size.incrementAndGet();
if (numInBucket > BUCKET_SIZE) {
// perhaps queue up coalesce job? naaahh.. lets let 'er grow for now
}
if (_log.shouldLog(Log.DEBUG))
_log.debug("Peer " + peer + " added to bucket " + bucket);
return oldSize != numInBucket;
} else {
throw new IllegalArgumentException("Unable to pick a bucket. wtf!");
}
}
/**
* Not an exact count (due to concurrency issues) but generally correct
*
*/
public int size() {
return _size.get();
/*
int size = 0;
for (int i = 0; i < _buckets.length; i++)
size += _buckets[i].getKeyCount();
return size;
*/
}
public boolean remove(Hash entry) {
int bucket = pickBucket(entry);
KBucket kbucket = getBucket(bucket);
boolean removed = kbucket.remove(entry);
if (removed)
_size.decrementAndGet();
return removed;
}
/** @since 0.8.8 */
public void clear() {
for (int i = 0; i < _buckets.length; i++) {
_buckets[i].setEntries(Collections.<Hash> emptySet());
}
_size.set(0);
_us.clearXorCache();
}
public Set<Hash> getAll() { return getAll(Collections.<Hash> emptySet()); };
public Set<Hash> getAll(Set<Hash> toIgnore) {
Set<Hash> all = new HashSet<Hash>(1024);
for (int i = 0; i < _buckets.length; i++) {
all.addAll(_buckets[i].getEntries());
}
all.removeAll(toIgnore);
return all;
}
public void getAll(SelectionCollector collector) {
long start = _context.clock().now();
for (int i = 0; i < _buckets.length; i++)
_buckets[i].getEntries(collector);
_context.statManager().addRateData("netDb.KBSGetAllTime", _context.clock().now() - start, 0);
}
public int pickBucket(Hash key) {
for (int i = 0; i < NUM_BUCKETS; i++) {
if (_buckets[i].shouldContain(key))
return i;
}
_log.error("Key does not fit in any bucket?! WTF!\nKey : ["
+ DataHelper.toHexString(key.getData()) + "]"
+ "\nUs : [" + toString(_us.getData()) + "]"
+ "\nDelta: ["
+ DataHelper.toHexString(DataHelper.xor(_us.getData(), key.getData()))
+ "]", new Exception("WTF"));
displayBuckets();
return -1;
}
public KBucket getBucket(int bucket) { return _buckets[bucket]; }
protected KBucket[] createBuckets() {
KBucket[] buckets = new KBucket[NUM_BUCKETS];
for (int i = 0; i < NUM_BUCKETS-1; i++) {
buckets[i] = createBucket(i*BASE, (i+1)*BASE);
}
buckets[NUM_BUCKETS-1] = createBucket(BASE*(NUM_BUCKETS-1), BASE*(NUM_BUCKETS) + 1);
return buckets;
}
protected KBucket createBucket(int start, int end) {
KBucket bucket = new KBucketImpl(_context, _us);
bucket.setRange(start, end);
_log.debug("Creating a bucket from " + start + " to " + (end));
return bucket;
}
public void displayBuckets() {
_log.info(toString());
}
@Override
public String toString() {
BigInteger us = new BigInteger(1, _us.getData());
StringBuilder buf = new StringBuilder(1024);
buf.append("Bucket set rooted on: ").append(us.toString()).append(" (aka ").append(us.toString(2)).append("): \n");
for (int i = 0; i < NUM_BUCKETS; i++) {
buf.append("* Bucket ").append(i).append("/").append(NUM_BUCKETS-1).append(": )\n");
buf.append("Start: ").append("2^").append(_buckets[i].getRangeBegin()).append(")\n");
buf.append("End: ").append("2^").append(_buckets[i].getRangeEnd()).append(")\n");
buf.append("Contents:").append(_buckets[i].toString()).append("\n");
}
return buf.toString();
}
final static String toString(byte b[]) {
byte val[] = new byte[Hash.HASH_LENGTH];
if (b.length < 32)
System.arraycopy(b, 0, val, Hash.HASH_LENGTH-b.length-1, b.length);
else
System.arraycopy(b, Hash.HASH_LENGTH-b.length, val, 0, val.length);
StringBuilder buf = new StringBuilder(KEYSIZE_BITS);
for (int i = 0; i < val.length; i++) {
for (int j = 7; j >= 0; j--) {
boolean bb = (0 != (val[i] & (1<<j)));
if (bb)
buf.append("1");
else
buf.append("0");
}
buf.append(" ");
}
// buf.append(Integer.toBinaryString(val[i]));
return buf.toString();
}
public static void main(String args[]) {
I2PAppContext context = I2PAppContext.getGlobalContext();
Log log = context.logManager().getLog(KBucketSet.class);
KBucketSet set = new KBucketSet(context, Hash.FAKE_HASH);
testSelf(set, log);
testRandom(set, 1000, context, log);
}
private static void testSelf(KBucketSet set, Log log) {
boolean added = set.add(Hash.FAKE_HASH);
if (!added)
log.error("Unable to add self...");
else
log.debug("Added self");
}
private static void testRandom(KBucketSet set, int count, I2PAppContext context, Log log) {
for (int i = 0; i < count; i++) {
byte val[] = new byte[Hash.HASH_LENGTH];
context.random().nextBytes(val);
boolean added = set.add(new Hash(val));
if (!added)
log.error("Unable to add random key [" + DataHelper.toHexString(val) + "]");
else
log.debug("Added random key");
}
}
}

View File

@ -9,6 +9,8 @@ package net.i2p.router.networkdb.kademlia;
*/
import java.io.IOException;
import java.io.Writer;
import java.util.Collection;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
@ -25,6 +27,9 @@ import net.i2p.data.RouterAddress;
import net.i2p.data.RouterInfo;
import net.i2p.data.i2np.DatabaseLookupMessage;
import net.i2p.data.i2np.DatabaseStoreMessage;
import net.i2p.kademlia.KBucketSet;
import net.i2p.kademlia.RejectTrimmer;
import net.i2p.kademlia.SelectionCollector;
import net.i2p.router.Job;
import net.i2p.router.NetworkDatabaseFacade;
import net.i2p.router.Router;
@ -41,7 +46,7 @@ import net.i2p.util.Log;
*/
public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
protected final Log _log;
private KBucketSet _kb; // peer hashes sorted into kbuckets, but within kbuckets, unsorted
private KBucketSet<Hash> _kb; // peer hashes sorted into kbuckets, but within kbuckets, unsorted
private DataStore _ds; // hash to DataStructure mapping, persisted when necessary
/** where the data store is pushing the data */
private String _dbDir;
@ -132,7 +137,14 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
*/
protected final static long PUBLISH_JOB_DELAY = 5*60*1000l;
private static final int MAX_EXPLORE_QUEUE = 128;
static final int MAX_EXPLORE_QUEUE = 128;
/**
* kad K
* Was 500 in old implementation but that was with B ~= -8!
*/
private static final int BUCKET_SIZE = 16;
private static final int KAD_B = 3;
public KademliaNetworkDatabaseFacade(RouterContext context) {
_context = context;
@ -168,7 +180,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
return _reseedChecker;
}
KBucketSet getKBuckets() { return _kb; }
KBucketSet<Hash> getKBuckets() { return _kb; }
DataStore getDataStore() { return _ds; }
long getLastExploreNewDate() { return _lastExploreNew; }
@ -185,13 +197,13 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
return Collections.unmodifiableSet(_exploreKeys);
}
public void removeFromExploreKeys(Set<Hash> toRemove) {
public void removeFromExploreKeys(Collection<Hash> toRemove) {
if (!_initialized) return;
_exploreKeys.removeAll(toRemove);
_context.statManager().addRateData("netDb.exploreKeySet", _exploreKeys.size(), 0);
}
public void queueForExploration(Set<Hash> keys) {
public void queueForExploration(Collection<Hash> keys) {
if (!_initialized) return;
for (Iterator<Hash> iter = keys.iterator(); iter.hasNext() && _exploreKeys.size() < MAX_EXPLORE_QUEUE; ) {
_exploreKeys.add(iter.next());
@ -240,7 +252,8 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
_log.info("Starting up the kademlia network database");
RouterInfo ri = _context.router().getRouterInfo();
String dbDir = _context.getProperty(PROP_DB_DIR, DEFAULT_DB_DIR);
_kb = new KBucketSet(_context, ri.getIdentity().getHash());
_kb = new KBucketSet<Hash>(_context, ri.getIdentity().getHash(),
BUCKET_SIZE, KAD_B, new RejectTrimmer<Hash>());
try {
_ds = new PersistentDataStore(_context, dbDir, this);
} catch (IOException ioe) {
@ -368,7 +381,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
return count.size();
}
private class CountRouters implements SelectionCollector {
private class CountRouters implements SelectionCollector<Hash> {
private int _count;
public int size() { return _count; }
public void add(Hash entry) {
@ -1045,4 +1058,13 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
}
_context.jobQueue().addJob(new StoreJob(_context, this, key, ds, onSuccess, onFailure, sendTimeout, toIgnore));
}
/**
* Debug info, HTML formatted
* @since 0.9.10
*/
@Override
public void renderStatusHTML(Writer out) throws IOException {
out.write(_kb.toString().replace("\n", "<br>\n"));
}
}

View File

@ -17,6 +17,8 @@ import java.util.TreeMap;
import net.i2p.data.Hash;
import net.i2p.data.RouterInfo;
import net.i2p.kademlia.KBucketSet;
import net.i2p.kademlia.SelectionCollector;
import net.i2p.router.RouterContext;
import net.i2p.router.util.HashDistance;
import net.i2p.util.Log;
@ -41,7 +43,7 @@ class PeerSelector {
*
* @return ordered list of Hash objects
*/
List<Hash> selectMostReliablePeers(Hash key, int numClosest, Set<Hash> alreadyChecked, KBucketSet kbuckets) {
List<Hash> selectMostReliablePeers(Hash key, int numClosest, Set<Hash> alreadyChecked, KBucketSet<Hash> kbuckets) {
// get the peers closest to the key
return selectNearestExplicit(key, numClosest, alreadyChecked, kbuckets);
}
@ -54,7 +56,7 @@ class PeerSelector {
*
* @return List of Hash for the peers selected, ordered by bucket (but intra bucket order is not defined)
*/
List<Hash> selectNearestExplicit(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets) {
List<Hash> selectNearestExplicit(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet<Hash> kbuckets) {
//if (true)
return selectNearestExplicitThin(key, maxNumRouters, peersToIgnore, kbuckets);
@ -94,7 +96,7 @@ class PeerSelector {
*
* @return List of Hash for the peers selected, ordered by bucket (but intra bucket order is not defined)
*/
List<Hash> selectNearestExplicitThin(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets) {
List<Hash> selectNearestExplicitThin(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet<Hash> kbuckets) {
if (peersToIgnore == null)
peersToIgnore = new HashSet<Hash>(1);
peersToIgnore.add(_context.routerHash());
@ -109,7 +111,7 @@ class PeerSelector {
}
/** UNUSED */
private class MatchSelectionCollector implements SelectionCollector {
private class MatchSelectionCollector implements SelectionCollector<Hash> {
private TreeMap<BigInteger, Hash> _sorted;
private Hash _key;
private Set<Hash> _toIgnore;
@ -200,7 +202,7 @@ class PeerSelector {
* @param peersToIgnore can be null
* @return List of Hash for the peers selected, ordered by bucket (but intra bucket order is not defined)
*/
List<Hash> selectNearest(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets) {
List<Hash> selectNearest(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet<Hash> kbuckets) {
// sure, this may not be exactly correct per kademlia (peers on the border of a kbucket in strict kademlia
// would behave differently) but I can see no reason to keep around an /additional/ more complicated algorithm.
// later if/when selectNearestExplicit gets costly, we may revisit this (since kbuckets let us cache the distance()

View File

@ -10,6 +10,7 @@ import java.util.Set;
import java.util.TreeSet;
import net.i2p.data.Hash;
import net.i2p.kademlia.XORComparator;
import net.i2p.router.RouterContext;
/**
@ -61,7 +62,7 @@ class SearchState {
private Set<Hash> locked_getClosest(Set<Hash> peers, int max, Hash target) {
if (_attemptedPeers.size() <= max)
return new HashSet<Hash>(_attemptedPeers);
TreeSet<Hash> closest = new TreeSet<Hash>(new XORComparator(target));
TreeSet<Hash> closest = new TreeSet<Hash>(new XORComparator<Hash>(target));
closest.addAll(_attemptedPeers);
Set<Hash> rv = new HashSet<Hash>(max);
int i = 0;

View File

@ -1,10 +0,0 @@
package net.i2p.router.networkdb.kademlia;
import net.i2p.data.Hash;
/**
* Visit kbuckets, gathering matches
*/
interface SelectionCollector {
public void add(Hash entry);
}

View File

@ -18,6 +18,7 @@ import net.i2p.data.RouterInfo;
import net.i2p.data.TunnelId;
import net.i2p.data.i2np.DatabaseStoreMessage;
import net.i2p.data.i2np.I2NPMessage;
import net.i2p.kademlia.KBucketSet;
import net.i2p.router.Job;
import net.i2p.router.JobImpl;
import net.i2p.router.OutNetMessage;
@ -231,7 +232,7 @@ class StoreJob extends JobImpl {
private List<Hash> getClosestFloodfillRouters(Hash key, int numClosest, Set<Hash> alreadyChecked) {
Hash rkey = getContext().routingKeyGenerator().getRoutingKey(key);
KBucketSet ks = _facade.getKBuckets();
KBucketSet<Hash> ks = _facade.getKBuckets();
if (ks == null) return new ArrayList<Hash>();
return ((FloodfillPeerSelector)_peerSelector).selectFloodfillParticipants(rkey, numClosest, alreadyChecked, ks);
}

View File

@ -1,36 +0,0 @@
package net.i2p.router.networkdb.kademlia;
import java.util.Comparator;
import net.i2p.data.Hash;
/**
* Help sort Hashes in relation to a base key using the XOR metric.
*/
class XORComparator implements Comparator<Hash> {
private final byte[] _base;
/**
* @param target key to compare distances with
*/
public XORComparator(Hash target) {
_base = target.getData();
}
/**
* getData() of args must be non-null
*/
public int compare(Hash lhs, Hash rhs) {
byte lhsb[] = lhs.getData();
byte rhsb[] = rhs.getData();
for (int i = 0; i < _base.length; i++) {
int ld = (lhsb[i] ^ _base[i]) & 0xff;
int rd = (rhsb[i] ^ _base[i]) & 0xff;
if (ld < rd)
return -1;
if (ld > rd)
return 1;
}
return 0;
}
}

View File

@ -1,77 +0,0 @@
package net.i2p.router.networkdb.kademlia;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
import junit.framework.TestCase;
import net.i2p.I2PAppContext;
import net.i2p.data.Hash;
import net.i2p.util.RandomSource;
/**
* Test KBucketImpl
*
* @author comwiz
*/
public class KBucketImplTest extends TestCase{
private I2PAppContext context;
public void setUp(){
context = I2PAppContext.getGlobalContext();
}
public void testLimits() {
int low = 0;
int high = 4;
KBucketImpl bucket = new KBucketImpl(I2PAppContext.getGlobalContext(), Hash.FAKE_HASH);
bucket.setRange(low, high);
Hash lowerBoundKey = bucket.getRangeBeginKey();
Hash upperBoundKey = bucket.getRangeEndKey();
assertTrue(bucket.shouldContain(lowerBoundKey));//
assertTrue(bucket.shouldContain(upperBoundKey));
}
public void testRand() {
int low = 1;
int high = 2000;
LocalHash local = new LocalHash(Hash.FAKE_HASH);
local.prepareCache();
KBucketImpl bucket = new KBucketImpl(I2PAppContext.getGlobalContext(), local);
bucket.setRange(low, high);
Hash lowerBoundKey = bucket.getRangeBeginKey();
Hash upperBoundKey = bucket.getRangeEndKey();
for (int i = 0; i < 1000; i++) {
Hash rnd = bucket.generateRandomKey();
assertTrue(bucket.shouldContain(rnd));//
}
}
public void testRand2() {
int low = 1;
int high = 2000;
byte hash[] = new byte[Hash.HASH_LENGTH];
RandomSource.getInstance().nextBytes(hash);
LocalHash local = new LocalHash(hash);
local.prepareCache();
KBucketImpl bucket = new KBucketImpl(I2PAppContext.getGlobalContext(), local);
bucket.setRange(low, high);
Hash lowerBoundKey = bucket.getRangeBeginKey();
Hash upperBoundKey = bucket.getRangeEndKey();
for (int i = 0; i < 1000; i++) {
Hash rnd = bucket.generateRandomKey();
assertTrue(bucket.shouldContain(rnd));
}
}
}

View File

@ -1,41 +0,0 @@
package net.i2p.router.networkdb.kademlia;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
import junit.framework.TestCase;
import net.i2p.I2PAppContext;
import net.i2p.data.Hash;
/**
* Test KBucketSet
*
* @author comwiz
*/
public class KBucketSetTest extends TestCase{
private I2PAppContext context;
private KBucketSet set;
public void setUp(){
context = I2PAppContext.getGlobalContext();
set = new KBucketSet(context, Hash.FAKE_HASH);
}
public void testRandom(){
for (int i = 0; i < 1000; i++) {
byte val[] = new byte[Hash.HASH_LENGTH];
context.random().nextBytes(val);
assertTrue(set.add(new Hash(val)));
}
}
public void testSelf() {
assertTrue(set.add(Hash.FAKE_HASH));
}
}