diff --git a/apps/i2psnark/java/src/net/i2p/kademlia/KBucket.java b/apps/i2psnark/java/src/net/i2p/kademlia/KBucket.java new file mode 100644 index 0000000000..075547fbd4 --- /dev/null +++ b/apps/i2psnark/java/src/net/i2p/kademlia/KBucket.java @@ -0,0 +1,75 @@ +package net.i2p.kademlia; +/* + * free (adj.): unencumbered; not under the control of others + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat + * your children, but it might. Use at your own risk. + * + */ + +import java.util.Set; + +import net.i2p.data.SimpleDataStructure; + +/** + * Group, without inherent ordering, a set of keys a certain distance away from + * a local key, using XOR as the distance metric + * + * Refactored from net.i2p.router.networkdb.kademlia + */ +public interface KBucket { + + /** + * Lowest order high bit for difference keys. + * The lower-bounds distance of this bucket is 2**begin. + * If begin == 0, this is the closest bucket. + */ + public int getRangeBegin(); + + /** + * Highest high bit for the difference keys. + * The upper-bounds distance of this bucket is (2**(end+1)) - 1. + * If begin == end, the bucket cannot be split further. + * If end == (numbits - 1), this is the furthest bucket. + */ + public int getRangeEnd(); + + /** + * Number of keys already contained in this kbucket + */ + public int getKeyCount(); + + /** + * Add the peer to the bucket + * + * @return true if added + */ + public boolean add(T key); + + /** + * Remove the key from the bucket + * @return true if the key existed in the bucket before removing it, else false + */ + public boolean remove(T key); + + /** + * Update the last-changed timestamp to now. + */ + public void setLastChanged(); + + /** + * The last-changed timestamp + */ + public long getLastChanged(); + + /** + * Retrieve all routing table entries stored in the bucket + * @return set of Hash structures + */ + public Set getEntries(); + + public void getEntries(SelectionCollector collector); + + public void clear(); +} diff --git a/apps/i2psnark/java/src/net/i2p/kademlia/KBucketImpl.java b/apps/i2psnark/java/src/net/i2p/kademlia/KBucketImpl.java new file mode 100644 index 0000000000..f73a4e0712 --- /dev/null +++ b/apps/i2psnark/java/src/net/i2p/kademlia/KBucketImpl.java @@ -0,0 +1,149 @@ +package net.i2p.kademlia; +/* + * free (adj.): unencumbered; not under the control of others + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat + * your children, but it might. Use at your own risk. + * + */ + +import java.util.Collections; +import java.util.Set; + +import net.i2p.I2PAppContext; +import net.i2p.data.SimpleDataStructure; +import net.i2p.util.ConcurrentHashSet; + +/** + * A concurrent implementation using ConcurrentHashSet. + * The max size (K) may be temporarily exceeded due to concurrency, + * a pending split, or the behavior of the supplied trimmer, + * as explained below. + * The creator is responsible for splits. + * + * This class has no knowledge of the DHT base used for XORing, + * and thus there are no validity checks in add/remove. + * + * The begin and end values are immutable. + * All entries in this bucket will have at least one bit different + * from us in the range [begin, end] inclusive. + * Splits must be implemented by creating two new buckets + * and discarding this one. + * + * The keys are kept in a Set and are NOT sorted by last-seen. + * Per-key last-seen-time, failures, etc. must be tracked elsewhere. + * + * If this bucket is full (i.e. begin == end && size == max) + * then add() will call KBucketTrimmer.trim() do + * (possibly) remove older entries, and indicate whether + * to add the new entry. If the trimmer returns true without + * removing entries, this KBucket will exceed the max size. + * + * Refactored from net.i2p.router.networkdb.kademlia + */ +class KBucketImpl implements KBucket { + /** + * set of Hash objects for the peers in the kbucket + */ + private final Set _entries; + /** include if any bits equal or higher to this bit (in big endian order) */ + private final int _begin; + /** include if no bits higher than this bit (inclusive) are set */ + private final int _end; + private final int _max; + private final KBucketSet.KBucketTrimmer _trimmer; + /** when did we last shake things up */ + private long _lastChanged; + private final I2PAppContext _context; + + /** + * All entries in this bucket will have at least one bit different + * from us in the range [begin, end] inclusive. + */ + public KBucketImpl(I2PAppContext context, int begin, int end, int max, KBucketSet.KBucketTrimmer trimmer) { + if (begin > end) + throw new IllegalArgumentException(begin + " > " + end); + _context = context; + _entries = new ConcurrentHashSet(max + 4); + _begin = begin; + _end = end; + _max = max; + _trimmer = trimmer; + } + + public int getRangeBegin() { return _begin; } + + public int getRangeEnd() { return _end; } + + public int getKeyCount() { + return _entries.size(); + } + + /** + * @return an unmodifiable view; not a copy + */ + public Set getEntries() { + return Collections.unmodifiableSet(_entries); + } + + public void getEntries(SelectionCollector collector) { + for (T h : _entries) { + collector.add(h); + } + } + + public void clear() { + _entries.clear(); + } + + /** + * Sets last-changed if rv is true OR if the peer is already present. + * Calls the trimmer if begin == end and we are full. + * If begin != end then add it and caller must do bucket splitting. + * @return true if added + */ + public boolean add(T peer) { + if (_begin != _end || _entries.size() < _max || + _entries.contains(peer) || _trimmer.trim(this, peer)) { + // do this even if already contains, to call setLastChanged() + boolean rv = _entries.add(peer); + setLastChanged(); + return rv; + } + return false; + } + + /** + * @return if removed. Does NOT set lastChanged. + */ + public boolean remove(T peer) { + boolean rv = _entries.remove(peer); + //if (rv) + // setLastChanged(); + return rv; + } + + /** + * Update the last-changed timestamp to now. + */ + public void setLastChanged() { + _lastChanged = _context.clock().now(); + } + + /** + * The last-changed timestamp, which actually indicates last-added or last-seen. + */ + public long getLastChanged() { + return _lastChanged; + } + + @Override + public String toString() { + StringBuilder buf = new StringBuilder(1024); + buf.append(_entries.size()); + buf.append(" entries in (").append(_begin).append(',').append(_end); + buf.append(") : ").append(_entries.toString()); + return buf.toString(); + } +} diff --git a/apps/i2psnark/java/src/net/i2p/kademlia/KBucketSet.java b/apps/i2psnark/java/src/net/i2p/kademlia/KBucketSet.java new file mode 100644 index 0000000000..e61c4a6fd2 --- /dev/null +++ b/apps/i2psnark/java/src/net/i2p/kademlia/KBucketSet.java @@ -0,0 +1,854 @@ +package net.i2p.kademlia; +/* + * free (adj.): unencumbered; not under the control of others + * Written by jrandom in 2003 and released into the public domain + * with no warranty of any kind, either expressed or implied. + * It probably won't make your computer catch on fire, or eat + * your children, but it might. Use at your own risk. + * + */ + +import java.math.BigInteger; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashSet; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +import net.i2p.I2PAppContext; +import net.i2p.data.DataHelper; +import net.i2p.data.SimpleDataStructure; +import net.i2p.util.Log; + +/** + * In-memory storage of buckets sorted by the XOR metric from the base (us) + * passed in via the constructor. + * This starts with one bucket covering the whole key space, and + * may eventually be split to a max of the number of bits in the data type + * (160 for SHA1Hash or 256 for Hash), + * times 2**(B-1) for Kademlia value B. + * + * Refactored from net.i2p.router.networkdb.kademlia + */ +public class KBucketSet { + private final Log _log; + private final I2PAppContext _context; + private final T _us; + + /** + * The bucket list is locked by _bucketsLock, however the individual + * buckets are not locked. Users may see buckets that have more than + * the maximum k entries, or may have adds and removes silently fail + * when they appear to succeed. + * + * Closest values are in bucket 0, furthest are in the last bucket. + */ + private final List _buckets; + private final Range _rangeCalc; + private final KBucketTrimmer _trimmer; + + /** + * Locked for reading only when traversing all the buckets. + * Locked for writing only when splitting a bucket. + * Adds/removes/gets from individual buckets are not locked. + */ + private final ReentrantReadWriteLock _bucketsLock = new ReentrantReadWriteLock(false); + + private final int KEYSIZE_BITS; + private final int NUM_BUCKETS; + private final int BUCKET_SIZE; + private final int B_VALUE; + private final int B_FACTOR; + + /** + * Use the default trim strategy, which removes a random entry. + * @param us the local identity (typically a SHA1Hash or Hash) + * The class must have a zero-argument constructor. + * @param max the Kademlia value "k", the max per bucket, k >= 4 + * @param b the Kademlia value "b", split buckets an extra 2**(b-1) times, + * b > 0, use 1 for bittorrent, Kademlia paper recommends 5 + */ + public KBucketSet(I2PAppContext context, T us, int max, int b) { + this(context, us, max, b, new RandomTrimmer(context, max)); + } + + /** + * Use the supplied trim strategy. + */ + public KBucketSet(I2PAppContext context, T us, int max, int b, KBucketTrimmer trimmer) { + _us = us; + _context = context; + _log = context.logManager().getLog(KBucketSet.class); + _trimmer = trimmer; + if (max <= 4 || b <= 0 || b > 8) + throw new IllegalArgumentException(); + KEYSIZE_BITS = us.length() * 8; + B_VALUE = b; + B_FACTOR = 1 << (b - 1); + NUM_BUCKETS = KEYSIZE_BITS * B_FACTOR; + BUCKET_SIZE = max; + _buckets = createBuckets(); + _rangeCalc = new Range(us, B_VALUE); + // this verifies the zero-argument constructor + makeKey(new byte[us.length()]); + } + + private void getReadLock() { + _bucketsLock.readLock().lock(); + } + + /** + * Get the lock if we can. Non-blocking. + * @return true if the lock was acquired + */ + private boolean tryReadLock() { + return _bucketsLock.readLock().tryLock(); + } + + private void releaseReadLock() { + _bucketsLock.readLock().unlock(); + } + + /** @return true if the lock was acquired */ + private boolean getWriteLock() { + try { + boolean rv = _bucketsLock.writeLock().tryLock(3000, TimeUnit.MILLISECONDS); + if ((!rv) && _log.shouldLog(Log.WARN)) + _log.warn("no lock, size is: " + _bucketsLock.getQueueLength(), new Exception("rats")); + return rv; + } catch (InterruptedException ie) {} + return false; + } + + private void releaseWriteLock() { + _bucketsLock.writeLock().unlock(); + } + + /** + * @return true if the peer is new to the bucket it goes in, or false if it was + * already in it. Always returns false on an attempt to add ourselves. + * + */ + public boolean add(T peer) { + KBucket bucket; + getReadLock(); + try { + bucket = getBucket(peer); + } finally { releaseReadLock(); } + if (bucket != null) { + if (bucket.add(peer)) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Peer " + peer + " added to bucket " + bucket); + if (shouldSplit(bucket)) { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Splitting bucket " + bucket); + split(bucket.getRangeBegin()); + //testAudit(this, _log); + } + return true; + } else { + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Peer " + peer + " NOT added to bucket " + bucket); + return false; + } + } else { + if (_log.shouldLog(Log.WARN)) + _log.warn("Failed to add, probably us: " + peer); + return false; + } + } + + /** + * No lock required. + * FIXME will split the closest buckets too far if B > 1 and K < 2**B + * Won't ever really happen and if it does it still works. + */ + private boolean shouldSplit(KBucket b) { + return + b.getRangeBegin() != b.getRangeEnd() && + b.getKeyCount() > BUCKET_SIZE; + } + + /** + * Grabs the write lock. + * Caller must NOT have the read lock. + * The bucket should be splittable (range start != range end). + * @param r the range start of the bucket to be split + */ + private void split(int r) { + if (!getWriteLock()) + return; + try { + locked_split(r); + } finally { releaseWriteLock(); } + } + + /** + * Creates two or more new buckets. The old bucket is replaced and discarded. + * + * Caller must hold write lock + * The bucket should be splittable (range start != range end). + * @param r the range start of the bucket to be split + */ + private void locked_split(int r) { + int b = pickBucket(r); + while (shouldSplit(_buckets.get(b))) { + KBucket b0 = _buckets.get(b); + // Each bucket gets half the keyspace. + // When B_VALUE = 1, or the bucket is larger than B_FACTOR, then + // e.g. 0-159 => 0-158, 159-159 + // When B_VALUE > 1, and the bucket is smaller than B_FACTOR, then + // e.g. 1020-1023 => 1020-1021, 1022-1023 + int s1, e1, s2, e2; + s1 = b0.getRangeBegin(); + e2 = b0.getRangeEnd(); + if (B_FACTOR > 1 && + (s1 & (B_FACTOR - 1)) == 0 && + ((e2 + 1) & (B_FACTOR - 1)) == 0 && + e2 > s1 + B_FACTOR) { + // The bucket is a "whole" kbucket with a range > B_FACTOR, + // so it should be split into two "whole" kbuckets each with + // a range >= B_FACTOR. + // Log split + s2 = e2 + 1 - B_FACTOR; + } else { + // The bucket is the smallest "whole" kbucket with a range == B_FACTOR, + // or B_VALUE > 1 and the bucket has already been split. + // Start or continue splitting down to a depth B_VALUE. + // Linear split + s2 = s1 + ((1 + e2 - s1) / 2); + } + e1 = s2 - 1; + if (_log.shouldLog(Log.INFO)) + _log.info("Splitting (" + s1 + ',' + e2 + ") -> (" + s1 + ',' + e1 + ") (" + s2 + ',' + e2 + ')'); + KBucket b1 = createBucket(s1, e1); + KBucket b2 = createBucket(s2, e2); + for (T key : b0.getEntries()) { + if (getRange(key) < s2) + b1.add(key); + else + b2.add(key); + } + _buckets.set(b, b1); + _buckets.add(b + 1, b2); + if (_log.shouldLog(Log.DEBUG)) + _log.debug("Split bucket at idx " + b + + ":\n" + b0 + + "\ninto: " + b1 + + "\nand: " + b2); + //if (_log.shouldLog(Log.DEBUG)) + // _log.debug("State is now: " + toString()); + + if (b2.getKeyCount() > BUCKET_SIZE) { + // should be rare... too hard to call _trimmer from here + // (and definitely not from inside the write lock) + if (_log.shouldLog(Log.INFO)) + _log.info("All went into 2nd bucket after split"); + } + // loop if all the entries went in the first bucket + } + } + + /** + * The current number of entries. + */ + public int size() { + int rv = 0; + getReadLock(); + try { + for (KBucket b : _buckets) { + rv += b.getKeyCount(); + } + } finally { releaseReadLock(); } + return rv; + } + + public boolean remove(T entry) { + KBucket kbucket; + getReadLock(); + try { + kbucket = getBucket(entry); + } finally { releaseReadLock(); } + boolean removed = kbucket.remove(entry); + return removed; + } + + /** @since 0.8.8 */ + public void clear() { + getReadLock(); + try { + for (KBucket b : _buckets) { + b.clear(); + } + } finally { releaseReadLock(); } + _rangeCalc.clear(); + } + + /** + * @return a copy in a new set + */ + public Set getAll() { + Set all = new HashSet(256); + getReadLock(); + try { + for (KBucket b : _buckets) { + all.addAll(b.getEntries()); + } + } finally { releaseReadLock(); } + return all; + } + + /** + * @return a copy in a new set + */ + public Set getAll(Set toIgnore) { + Set all = getAll(); + all.removeAll(toIgnore); + return all; + } + + public void getAll(SelectionCollector collector) { + getReadLock(); + try { + for (KBucket b : _buckets) { + b.getEntries(collector); + } + } finally { releaseReadLock(); } + } + + /** + * The keys closest to us. + * Returned list will never contain us. + * @return non-null, closest first + */ + public List getClosest(int max) { + return getClosest(max, Collections.EMPTY_SET); + } + + /** + * The keys closest to us. + * Returned list will never contain us. + * @return non-null, closest first + */ + public List getClosest(int max, Collection toIgnore) { + List rv = new ArrayList(max); + int count = 0; + getReadLock(); + try { + // start at first (closest) bucket + for (int i = 0; i < _buckets.size() && count < max; i++) { + Set entries = _buckets.get(i).getEntries(); + // add the whole bucket except for ignores, + // extras will be trimmed after sorting + for (T e : entries) { + if (!toIgnore.contains(e)) { + rv.add(e); + count++; + } + } + } + } finally { releaseReadLock(); } + Comparator comp = new XORComparator(_us); + Collections.sort(rv, comp); + int sz = rv.size(); + for (int i = sz - 1; i >= max; i--) { + rv.remove(i); + } + return rv; + } + + /** + * The keys closest to the key. + * Returned list will never contain us. + * @return non-null, closest first + */ + public List getClosest(T key, int max) { + return getClosest(key, max, Collections.EMPTY_SET); + } + + /** + * The keys closest to the key. + * Returned list will never contain us. + * @return non-null, closest first + */ + public List getClosest(T key, int max, Collection toIgnore) { + if (key.equals(_us)) + return getClosest(max, toIgnore); + List rv = new ArrayList(max); + int count = 0; + getReadLock(); + try { + int start = pickBucket(key); + // start at closest bucket, then to the smaller (closer to us) buckets + for (int i = start; i >= 0 && count < max; i--) { + Set entries = _buckets.get(i).getEntries(); + for (T e : entries) { + if (!toIgnore.contains(e)) { + rv.add(e); + count++; + } + } + } + // then the farther from us buckets if necessary + for (int i = start + 1; i < _buckets.size() && count < max; i++) { + Set entries = _buckets.get(i).getEntries(); + for (T e : entries) { + if (!toIgnore.contains(e)) { + rv.add(e); + count++; + } + } + } + } finally { releaseReadLock(); } + Comparator comp = new XORComparator(key); + Collections.sort(rv, comp); + int sz = rv.size(); + for (int i = sz - 1; i >= max; i--) { + rv.remove(i); + } + return rv; + } + + /** + * The bucket number (NOT the range number) that the xor of the key goes in + * Caller must hold read lock + * @return 0 to max-1 or -1 for us + */ + private int pickBucket(T key) { + int range = getRange(key); + if (range < 0) + return -1; + int rv = pickBucket(range); + if (rv >= 0) { + return rv; + } + _log.error("Key does not fit in any bucket?! WTF!\nKey : [" + + DataHelper.toHexString(key.getData()) + "]" + + "\nUs : " + _us + + "\nDelta: [" + + DataHelper.toHexString(DataHelper.xor(_us.getData(), key.getData())) + + "]", new Exception("WTF")); + _log.error(toString()); + throw new IllegalStateException("pickBucket returned " + rv); + //return -1; + } + + /** + * Returned list is a copy of the bucket list, closest first, + * with the actual buckets (not a copy). + * + * Primarily for testing. You shouldn't ever need to get all the buckets. + * Use getClosest() or getAll() instead to get the keys. + * + * @return non-null + */ + List> getBuckets() { + getReadLock(); + try { + return new ArrayList(_buckets); + } finally { releaseReadLock(); } + } + + /** + * The bucket that the xor of the key goes in + * Caller must hold read lock + * @return null if key is us + */ + private KBucket getBucket(T key) { + int bucket = pickBucket(key); + if (bucket < 0) + return null; + return _buckets.get(bucket); + } + + /** + * The bucket number that contains this range number + * Caller must hold read lock or write lock + * @return 0 to max-1 or -1 for us + */ + private int pickBucket(int range) { + // If B is small, a linear search from back to front + // is most efficient since most of the keys are at the end... + // If B is larger, there's a lot of sub-buckets + // of equal size to be checked so a binary search is better + if (B_VALUE <= 3) { + for (int i = _buckets.size() - 1; i >= 0; i--) { + KBucket b = _buckets.get(i); + if (range >= b.getRangeBegin() && range <= b.getRangeEnd()) + return i; + } + return -1; + } else { + KBucket dummy = new DummyBucket(range); + return Collections.binarySearch(_buckets, dummy, new BucketComparator()); + } + } + + private List createBuckets() { + // just an initial size + List buckets = new ArrayList(4 * B_FACTOR); + buckets.add(createBucket(0, NUM_BUCKETS -1)); + return buckets; + } + + private KBucket createBucket(int start, int end) { + if (end - start >= B_FACTOR && + (((end + 1) & B_FACTOR - 1) != 0 || + (start & B_FACTOR - 1) != 0)) + throw new IllegalArgumentException("Sub-bkt crosses K-bkt boundary: " + start + '-' + end); + KBucket bucket = new KBucketImpl(_context, start, end, BUCKET_SIZE, _trimmer); + return bucket; + } + + /** + * The number of bits minus 1 (range number) for the xor of the key. + * Package private for testing only. Others shouldn't need this. + * @return 0 to max-1 or -1 for us + */ + int getRange(T key) { + return _rangeCalc.getRange(key); + } + + /** + * For every bucket that hasn't been updated in this long, + * generate a random key that would be a member of that bucket. + * The returned keys may be searched for to "refresh" the buckets. + * @return non-null, closest first + */ + public List getExploreKeys(long age) { + List rv = new ArrayList(_buckets.size()); + long old = _context.clock().now() - age; + getReadLock(); + try { + for (KBucket b : _buckets) { + if (b.getLastChanged() < old) + rv.add(generateRandomKey(b)); + } + } finally { releaseReadLock(); } + return rv; + } + + /** + * Generate a random key to go within this bucket + * Package private for testing only. Others shouldn't need this. + */ + T generateRandomKey(KBucket bucket) { + int begin = bucket.getRangeBegin(); + int end = bucket.getRangeEnd(); + // number of fixed bits, out of B_VALUE - 1 bits + int fixed = 0; + int bsz = 1 + end - begin; + // compute fixed = B_VALUE - log2(bsz) + // e.g for B=4, B_FACTOR=8, sz 4-> fixed 1, sz 2->fixed 2, sz 1 -> fixed 3 + while (bsz < B_FACTOR) { + fixed++; + bsz <<= 1; + } + int fixedBits = 0; + if (fixed > 0) { + // 0x01, 03, 07, 0f, ... + int mask = (1 << fixed) - 1; + // fixed bits masked from begin + fixedBits = (begin >> (B_VALUE - (fixed + 1))) & mask; + } + int obegin = begin; + int oend = end; + begin >>= (B_VALUE - 1); + end >>= (B_VALUE - 1); + // we need randomness for [0, begin) bits + BigInteger variance; + // 00000000rrrr + if (begin > 0) + variance = new BigInteger(begin - fixed, _context.random()); + else + variance = BigInteger.ZERO; + // we need nonzero randomness for [begin, end] bits + int numNonZero = 1 + end - begin; + if (numNonZero == 1) { + // 00001000rrrr + variance = variance.setBit(begin); + // fixed bits as the 'main' bucket is split + // 00001fffrrrr + if (fixed > 0) + variance = variance.or(BigInteger.valueOf(fixedBits).shiftLeft(begin - fixed)); + } else { + // dont span main bucket boundaries with depth > 1 + if (fixed > 0) + throw new IllegalStateException("WTF " + bucket); + BigInteger nonz; + if (numNonZero <= 62) { + // add one to ensure nonzero + long nz = 1 + _context.random().nextLong((1l << numNonZero) - 1); + nonz = BigInteger.valueOf(nz); + } else { + // loop to ensure nonzero + do { + nonz = new BigInteger(numNonZero, _context.random()); + } while (nonz.equals(BigInteger.ZERO)); + } + // shift left and or-in the nonzero randomness + if (begin > 0) + nonz = nonz.shiftLeft(begin); + // 0000nnnnrrrr + variance = variance.or(nonz); + } + + if (_log.shouldLog(Log.DEBUG)) + _log.debug("SB(" + obegin + ',' + oend + ") KB(" + begin + ',' + end + ") fixed=" + fixed + " fixedBits=" + fixedBits + " numNonZ=" + numNonZero); + byte data[] = variance.toByteArray(); + T key = makeKey(data); + byte[] hash = DataHelper.xor(key.getData(), _us.getData()); + T rv = makeKey(hash); + + // DEBUG + //int range = getRange(rv); + //if (range < obegin || range > oend) { + // throw new IllegalStateException("Generate random key failed range=" + range + " for " + rv + " meant for bucket " + bucket); + //} + + return rv; + } + + /** + * Make a new SimpleDataStrucure from the data + * @param data size <= SDS length, else throws IAE + * Can be 1 bigger if top byte is zero + */ + private T makeKey(byte[] data) { + int len = _us.length(); + int dlen = data.length; + if (dlen > len + 1 || + (dlen == len + 1 && data[0] != 0)) + throw new IllegalArgumentException("bad length " + dlen + " > " + len); + T rv; + try { + rv = (T) _us.getClass().newInstance(); + } catch (Exception e) { + _log.error("fail", e); + throw new RuntimeException(e); + } + if (dlen == len) { + rv.setData(data); + } else { + byte[] ndata = new byte[len]; + if (dlen == len + 1) { + // one bigger + System.arraycopy(data, 1, ndata, 0, len); + } else { + // smaller + System.arraycopy(data, 0, ndata, len - dlen, dlen); + } + rv.setData(ndata); + } + return rv; + } + + private static class Range { + private final int _bValue; + private final BigInteger _bigUs; + private final Map _distanceCache; + + public Range(T us, int bValue) { + _bValue = bValue; + _bigUs = new BigInteger(1, us.getData()); + _distanceCache = new LHM(256); + } + + /** @return 0 to max-1 or -1 for us */ + public int getRange(T key) { + Integer rv; + synchronized (_distanceCache) { + rv = _distanceCache.get(key); + if (rv == null) { + // easy way when _bValue == 1 + //rv = Integer.valueOf(_bigUs.xor(new BigInteger(1, key.getData())).bitLength() - 1); + BigInteger xor = _bigUs.xor(new BigInteger(1, key.getData())); + int range = xor.bitLength() - 1; + if (_bValue > 1) { + int toShift = range + 1 - _bValue; + int highbit = range; + range <<= _bValue - 1; + if (toShift >= 0) { + int extra = xor.clearBit(highbit).shiftRight(toShift).intValue(); + range += extra; + //Log log = I2PAppContext.getGlobalContext().logManager().getLog(KBucketSet.class); + //if (log.shouldLog(Log.DEBUG)) + // log.debug("highbit " + highbit + " toshift " + toShift + " extra " + extra + " new " + range); + } + } + rv = Integer.valueOf(range); + _distanceCache.put(key, rv); + } + } + return rv.intValue(); + } + + public void clear() { + synchronized (_distanceCache) { + _distanceCache.clear(); + } + } + } + + private static class LHM extends LinkedHashMap { + private final int _max; + + public LHM(int max) { + super(max, 0.75f, true); + _max = max; + } + + @Override + protected boolean removeEldestEntry(Map.Entry eldest) { + return size() > _max; + } + } + + /** + * For Collections.binarySearch. + * getRangeBegin == getRangeEnd. + */ + private static class DummyBucket implements KBucket { + private final int r; + + public DummyBucket(int range) { + r = range; + } + + public int getRangeBegin() { return r; } + public int getRangeEnd() { return r; } + + public int getKeyCount() { + return 0; + } + + public Set getEntries() { + throw new UnsupportedOperationException(); + } + + public void getEntries(SelectionCollector collector) { + throw new UnsupportedOperationException(); + } + + public void clear() {} + + public boolean add(T peer) { + throw new UnsupportedOperationException(); + } + + public boolean remove(T peer) { + return false; + } + + public void setLastChanged() {} + + public long getLastChanged() { + return 0; + } + } + + /** + * For Collections.binarySearch. + * Returns equal for any overlap. + */ + private static class BucketComparator implements Comparator { + public int compare(KBucket l, KBucket r) { + if (l.getRangeEnd() < r.getRangeBegin()) + return -1; + if (l.getRangeBegin() > r.getRangeEnd()) + return 1; + return 0; + } + } + + /** + * Called when a kbucket can no longer be split and is too big + */ + public interface KBucketTrimmer { + /** + * Called from add() just before adding the entry. + * You may call getEntries() and/or remove() from here. + * Do NOT call add(). + * To always discard a newer entry, always return false. + * + * @param kbucket the kbucket that is now too big + * @param justAdded the entry that was just added, causing it to be too big + * @return true to actually add the entry. + */ + public boolean trim(KBucket kbucket, K toAdd); + } + + /** + * Removes a random element. Not resistant to flooding. + */ + public static class RandomTrimmer implements KBucketTrimmer { + protected final I2PAppContext _ctx; + private final int _max; + + public RandomTrimmer(I2PAppContext ctx, int max) { + _ctx = ctx; + _max = max; + } + + public boolean trim(KBucket kbucket, T toAdd) { + List e = new ArrayList(kbucket.getEntries()); + int sz = e.size(); + // concurrency + if (sz < _max) + return true; + T toRemove = e.get(_ctx.random().nextInt(sz)); + return kbucket.remove(toRemove); + } + } + + /** + * Removes a random element, but only if the bucket hasn't changed in 5 minutes. + */ + public static class RandomIfOldTrimmer extends RandomTrimmer { + + public RandomIfOldTrimmer(I2PAppContext ctx, int max) { + super(ctx, max); + } + + public boolean trim(KBucket kbucket, T toAdd) { + if (kbucket.getLastChanged() > _ctx.clock().now() - 5*60*1000) + return false; + return super.trim(kbucket, toAdd); + } + } + + /** + * Removes nothing and always rejects the add. Flood resistant.. + */ + public static class RejectTrimmer implements KBucketTrimmer { + public boolean trim(KBucket kbucket, T toAdd) { + return false; + } + } + + @Override + public String toString() { + StringBuilder buf = new StringBuilder(1024); + buf.append("Bucket set rooted on: ").append(_us.toString()) + .append(" K= ").append(BUCKET_SIZE) + .append(" B= ").append(B_VALUE) + .append(" with ").append(size()) + .append(" keys in ").append(_buckets.size()).append(" buckets:\n"); + getReadLock(); + try { + int len = _buckets.size(); + for (int i = 0; i < len; i++) { + KBucket b = _buckets.get(i); + buf.append("* Bucket ").append(i).append("/").append(len).append(": "); + buf.append(b.toString()).append("\n"); + } + } finally { releaseReadLock(); } + return buf.toString(); + } +} diff --git a/apps/i2psnark/java/src/net/i2p/kademlia/SelectionCollector.java b/apps/i2psnark/java/src/net/i2p/kademlia/SelectionCollector.java new file mode 100644 index 0000000000..8d4b9972ae --- /dev/null +++ b/apps/i2psnark/java/src/net/i2p/kademlia/SelectionCollector.java @@ -0,0 +1,10 @@ +package net.i2p.kademlia; + +import net.i2p.data.SimpleDataStructure; + +/** + * Visit kbuckets, gathering matches + */ +public interface SelectionCollector { + public void add(T entry); +} diff --git a/apps/i2psnark/java/src/net/i2p/kademlia/XORComparator.java b/apps/i2psnark/java/src/net/i2p/kademlia/XORComparator.java new file mode 100644 index 0000000000..d11823f49b --- /dev/null +++ b/apps/i2psnark/java/src/net/i2p/kademlia/XORComparator.java @@ -0,0 +1,27 @@ +package net.i2p.kademlia; + +import java.util.Comparator; + +import net.i2p.data.DataHelper; +import net.i2p.data.SimpleDataStructure; + +/** + * Help sort Hashes in relation to a base key using the XOR metric + * + */ +class XORComparator implements Comparator { + private final byte[] _base; + + /** + * @param target key to compare distances with + */ + public XORComparator(T target) { + _base = target.getData(); + } + + public int compare(T lhs, T rhs) { + byte lhsDelta[] = DataHelper.xor(lhs.getData(), _base); + byte rhsDelta[] = DataHelper.xor(rhs.getData(), _base); + return DataHelper.compareTo(lhsDelta, rhsDelta); + } +} diff --git a/apps/i2psnark/java/src/org/klomp/snark/dht/NodeInfoComparator.java b/apps/i2psnark/java/src/org/klomp/snark/dht/NodeInfoComparator.java index 66eb57bbab..9995dfe579 100644 --- a/apps/i2psnark/java/src/org/klomp/snark/dht/NodeInfoComparator.java +++ b/apps/i2psnark/java/src/org/klomp/snark/dht/NodeInfoComparator.java @@ -16,15 +16,15 @@ import net.i2p.data.DataHelper; * @author zzz */ class NodeInfoComparator implements Comparator { - private final SHA1Hash _base; + private final byte[] _base; public NodeInfoComparator(SHA1Hash h) { - _base = h; + _base = h.getData(); } public int compare(NodeInfo lhs, NodeInfo rhs) { - byte lhsDelta[] = DataHelper.xor(lhs.getNID().getData(), _base.getData()); - byte rhsDelta[] = DataHelper.xor(rhs.getNID().getData(), _base.getData()); + byte lhsDelta[] = DataHelper.xor(lhs.getNID().getData(), _base); + byte rhsDelta[] = DataHelper.xor(rhs.getNID().getData(), _base); return DataHelper.compareTo(lhsDelta, rhsDelta); } diff --git a/apps/i2psnark/java/src/org/klomp/snark/dht/SHA1Comparator.java b/apps/i2psnark/java/src/org/klomp/snark/dht/SHA1Comparator.java deleted file mode 100644 index 36c355e491..0000000000 --- a/apps/i2psnark/java/src/org/klomp/snark/dht/SHA1Comparator.java +++ /dev/null @@ -1,31 +0,0 @@ -package org.klomp.snark.dht; -/* - * From zzzot, modded and relicensed to GPLv2 - */ - -import java.util.Comparator; - -import net.i2p.crypto.SHA1Hash; -import net.i2p.data.DataHelper; - -/** - * Closest to a InfoHash or NID key. - * Use for InfoHashes and NIDs. - * - * @since 0.8.4 - * @author zzz - */ -class SHA1Comparator implements Comparator { - private final byte[] _base; - - public SHA1Comparator(SHA1Hash h) { - _base = h.getData(); - } - - public int compare(SHA1Hash lhs, SHA1Hash rhs) { - byte lhsDelta[] = DataHelper.xor(lhs.getData(), _base); - byte rhsDelta[] = DataHelper.xor(rhs.getData(), _base); - return DataHelper.compareTo(lhsDelta, rhsDelta); - } - -}