propagate from branch 'i2p.i2p' (head 8066e0ff00b526c6971e77de44ff2d322f25069a)

to branch 'i2p.i2p.zzz.dhtsnark' (head f857dd921a7c806c85eb80419f4f9fdd3b6428a2)
This commit is contained in:
zzz
2012-05-23 16:56:13 +00:00
14 changed files with 1962 additions and 0 deletions

View File

@ -0,0 +1,110 @@
package org.klomp.snark.dht;
/*
* From zzzot, modded and relicensed to GPLv2
*/
import java.util.ArrayList;
import java.util.Comparator;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import java.util.TreeSet;
import java.util.concurrent.ConcurrentHashMap;
import net.i2p.I2PAppContext;
import net.i2p.crypto.SHA1Hash;
import net.i2p.data.DataHelper;
import net.i2p.util.Log;
import net.i2p.util.SimpleScheduler;
import net.i2p.util.SimpleTimer;
/**
* All the nodes we know about, stored as a mapping from
* node ID to a Destination and Port.
* Also uses the keySet as a subsitute for kbuckets.
*
* Swap this out for a real DHT later.
*
* @since 0.8.4
* @author zzz
*/
public class DHTNodes extends ConcurrentHashMap<NID, NodeInfo> {
private final I2PAppContext _context;
private long _expireTime;
private final Log _log;
/** stagger with other cleaners */
private static final long CLEAN_TIME = 237*1000;
private static final long MAX_EXPIRE_TIME = 60*60*1000;
private static final long MIN_EXPIRE_TIME = 5*60*1000;
private static final long DELTA_EXPIRE_TIME = 7*60*1000;
private static final int MAX_PEERS = 9999;
public DHTNodes(I2PAppContext ctx) {
super();
_context = ctx;
_expireTime = MAX_EXPIRE_TIME;
_log = _context.logManager().getLog(DHTNodes.class);
SimpleScheduler.getInstance().addPeriodicEvent(new Cleaner(), CLEAN_TIME);
}
/**
* Fake DHT
* @param sha1 either a InfoHash or a NID
*/
List<NodeInfo> findClosest(SHA1Hash h, int numWant) {
// sort the whole thing
Set<NID> all = new TreeSet(new SHA1Comparator(h));
all.addAll(keySet());
int sz = all.size();
int max = Math.min(numWant, sz);
// return the first ones
List<NodeInfo> rv = new ArrayList(max);
int count = 0;
for (NID nid : all) {
if (count++ >= max)
break;
NodeInfo nInfo = get(nid);
if (nInfo == null)
continue;
rv.add(nInfo);
}
return rv;
}
/**** used CHM methods to be replaced:
public Collection<NodeInfo> values() {}
public NodeInfo get(NID nID) {}
public NodeInfo putIfAbssent(NID nID, NodeInfo nInfo) {}
public int size() {}
****/
/** */
private class Cleaner implements SimpleTimer.TimedEvent {
public void timeReached() {
long now = _context.clock().now();
int peerCount = 0;
for (Iterator<NodeInfo> iter = DHTNodes.this.values().iterator(); iter.hasNext(); ) {
NodeInfo peer = iter.next();
if (peer.lastSeen() < now - _expireTime)
iter.remove();
else
peerCount++;
}
if (peerCount > MAX_PEERS)
_expireTime = Math.max(_expireTime - DELTA_EXPIRE_TIME, MIN_EXPIRE_TIME);
else
_expireTime = Math.min(_expireTime + DELTA_EXPIRE_TIME, MAX_EXPIRE_TIME);
if (_log.shouldLog(Log.INFO))
_log.info("DHT storage cleaner done, now with " +
peerCount + " peers, " +
DataHelper.formatDuration(_expireTime) + " expiration");
}
}
}

View File

@ -0,0 +1,132 @@
package org.klomp.snark.dht;
/*
* From zzzot, relicensed to GPLv2
*/
import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import net.i2p.I2PAppContext;
import net.i2p.data.DataHelper;
import net.i2p.data.Hash;
import net.i2p.util.Log;
import net.i2p.util.SimpleScheduler;
import net.i2p.util.SimpleTimer;
/**
* The tracker stores peers, i.e. Dest hashes (not nodes).
*
* @since 0.8.4
* @author zzz
*/
class DHTTracker {
private final I2PAppContext _context;
private final Torrents _torrents;
private long _expireTime;
private final Log _log;
/** stagger with other cleaners */
private static final long CLEAN_TIME = 199*1000;
/** make this longer than postman's tracker */
private static final long MAX_EXPIRE_TIME = 95*60*1000;
private static final long MIN_EXPIRE_TIME = 5*60*1000;
private static final long DELTA_EXPIRE_TIME = 7*60*1000;
private static final int MAX_PEERS = 9999;
DHTTracker(I2PAppContext ctx) {
_context = ctx;
_torrents = new Torrents();
_expireTime = MAX_EXPIRE_TIME;
_log = _context.logManager().getLog(DHTTracker.class);
SimpleScheduler.getInstance().addPeriodicEvent(new Cleaner(), CLEAN_TIME);
}
void stop() {
_torrents.clear();
// no way to stop the cleaner
}
void announce(InfoHash ih, Hash hash) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Announce " + hash + " for " + ih);
Peers peers = _torrents.get(ih);
if (peers == null) {
peers = new Peers();
Peers peers2 = _torrents.putIfAbsent(ih, peers);
if (peers2 != null)
peers = peers2;
}
Peer peer = new Peer(hash.getData());
Peer peer2 = peers.putIfAbsent(peer, peer);
if (peer2 != null)
peer = peer2;
peer.setLastSeen(_context.clock().now());
}
void unannounce(InfoHash ih, Hash hash) {
Peers peers = _torrents.get(ih);
if (peers == null)
return;
Peer peer = new Peer(hash.getData());
peers.remove(peer);
}
/**
* Caller's responsibility to remove himself from the list
* @return list or empty list (never null)
*/
List<Hash> getPeers(InfoHash ih, int max) {
Peers peers = _torrents.get(ih);
if (peers == null)
return Collections.EMPTY_LIST;
int size = peers.size();
List<Hash> rv = new ArrayList(peers.values());
if (max < size) {
Collections.shuffle(rv, _context.random());
rv = rv.subList(0, max);
}
return rv;
}
private class Cleaner implements SimpleTimer.TimedEvent {
public void timeReached() {
long now = _context.clock().now();
int torrentCount = 0;
int peerCount = 0;
for (Iterator<Peers> iter = _torrents.values().iterator(); iter.hasNext(); ) {
Peers p = iter.next();
int recent = 0;
for (Iterator<Peer> iterp = p.values().iterator(); iterp.hasNext(); ) {
Peer peer = iterp.next();
if (peer.lastSeen() < now - _expireTime)
iterp.remove();
else {
recent++;
peerCount++;
}
}
if (recent <= 0)
iter.remove();
else
torrentCount++;
}
if (peerCount > MAX_PEERS)
_expireTime = Math.max(_expireTime - DELTA_EXPIRE_TIME, MIN_EXPIRE_TIME);
else
_expireTime = Math.min(_expireTime + DELTA_EXPIRE_TIME, MAX_EXPIRE_TIME);
if (_log.shouldLog(Log.INFO))
_log.info("DHT tracker cleaner done, now with " +
torrentCount + " torrents, " +
peerCount + " peers, " +
DataHelper.formatDuration(_expireTime) + " expiration");
}
}
}

View File

@ -0,0 +1,19 @@
package org.klomp.snark.dht;
/*
* From zzzot, modded and relicensed to GPLv2
*/
import net.i2p.crypto.SHA1Hash;
/**
* A 20-byte SHA1 info hash
*
* @since 0.8.4
* @author zzz
*/
public class InfoHash extends SHA1Hash {
public InfoHash(byte[] data) {
super(data);
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,32 @@
package org.klomp.snark.dht;
/*
* GPLv2
*/
import net.i2p.I2PAppContext;
import net.i2p.data.ByteArray;
/**
* Used for both incoming and outgoing message IDs
*
* @since 0.8.4
* @author zzz
*/
public class MsgID extends ByteArray {
private static final int MY_TOK_LEN = 8;
/** outgoing - generate a random ID */
public MsgID(I2PAppContext ctx) {
super(null);
byte[] data = new byte[MY_TOK_LEN];
ctx.random().nextBytes(data);
setData(data);
setValid(MY_TOK_LEN);
}
/** incoming - save the ID (arbitrary length) */
public MsgID(byte[] data) {
super(data);
}
}

View File

@ -0,0 +1,19 @@
package org.klomp.snark.dht;
/*
* From zzzot, modded and relicensed to GPLv2
*/
import net.i2p.crypto.SHA1Hash;
/**
* A 20-byte peer ID, used as a Map key in lots of places.
*
* @since 0.8.4
* @author zzz
*/
public class NID extends SHA1Hash {
public NID(byte[] data) {
super(data);
}
}

View File

@ -0,0 +1,181 @@
package org.klomp.snark.dht;
/*
* From zzzot, modded and relicensed to GPLv2
*/
import net.i2p.data.DataHelper;
import net.i2p.data.Destination;
import net.i2p.data.Hash;
import net.i2p.data.SimpleDataStructure;
/*
* A Node ID, Hash, and port, and an optional Destination.
* This is what DHTNodes remembers. The DHT tracker just stores Hashes.
* getData() returns the 54 byte compact info (NID, Hash, port).
*
* Things are a little tricky in KRPC since we exchange Hashes and don't
* always have the Destination.
*
* @since 0.8.4
* @author zzz
*/
public class NodeInfo extends SimpleDataStructure {
private long lastSeen;
private NID nID;
private Hash hash;
private Destination dest;
private int port;
public static final int LENGTH = NID.HASH_LENGTH + Hash.HASH_LENGTH + 2;
/**
* Use this if we have the full destination
* @throws IllegalArgumentException
*/
public NodeInfo(NID nID, Destination dest, int port) {
super();
this.nID = nID;
this.dest = dest;
this.hash = dest.calculateHash();
this.port = port;
initialize(nID, this.hash, port);
}
/**
* No Destination yet available
* @deprecated unused
* @throws IllegalArgumentException
*/
public NodeInfo(NID nID, Hash hash, int port) {
super();
this.nID = nID;
this.hash = hash;
this.port = port;
initialize(nID, hash, port);
}
/**
* No Destination yet available
* @param compactInfo 20 byte node ID, 32 byte destHash, 2 byte port
* @throws IllegalArgumentException
*/
public NodeInfo(byte[] compactInfo) {
super(compactInfo);
initialize(compactInfo);
}
/**
* No Destination yet available
* @param compactInfo 20 byte node ID, 32 byte destHash, 2 byte port
* @param offset starting at this offset in compactInfo
* @throws IllegalArgumentException
* @throws AIOOBE
*/
public NodeInfo(byte[] compactInfo, int offset) {
super();
byte[] d = new byte[LENGTH];
System.arraycopy(compactInfo, offset, d, 0, LENGTH);
setData(d);
initialize(d);
}
/**
* Creates data structures from the compact info
* @throws IllegalArgumentException
*/
private void initialize(byte[] compactInfo) {
if (compactInfo.length != LENGTH)
throw new IllegalArgumentException("Bad compact info length");
byte[] ndata = new byte[NID.HASH_LENGTH];
System.arraycopy(compactInfo, 0, ndata, 0, NID.HASH_LENGTH);
this.nID = new NID(ndata);
byte[] hdata = new byte[Hash.HASH_LENGTH];
System.arraycopy(compactInfo, NID.HASH_LENGTH, hdata, 0, Hash.HASH_LENGTH);
this.hash = new Hash(hdata);
this.port = (int) DataHelper.fromLong(compactInfo, NID.HASH_LENGTH + Hash.HASH_LENGTH, 2);
}
/**
* Creates 54-byte compact info
* @throws IllegalArgumentException
*/
private void initialize(NID nID, Hash hash, int port) {
if (port < 0 || port > 65535)
throw new IllegalArgumentException("Bad port");
byte[] compactInfo = new byte[LENGTH];
System.arraycopy(nID.getData(), 0, compactInfo, 0, NID.HASH_LENGTH);
System.arraycopy(hash.getData(), 0, compactInfo, NID.HASH_LENGTH, Hash.HASH_LENGTH);
DataHelper.toLong(compactInfo, NID.HASH_LENGTH + Hash.HASH_LENGTH, 2, port);
setData(compactInfo);
}
public int length() {
return LENGTH;
}
public NID getNID() {
return this.nID;
}
/** @return may be null if we don't have it */
public Destination getDestination() {
return this.dest;
}
public Hash getHash() {
return this.hash;
}
@Override
public Hash calculateHash() {
return this.hash;
}
/**
* This can come in later but the hash must match.
* @throws IllegalArgumentException if hash of dest doesn't match previous hash
*/
public void setDestination(Destination dest) throws IllegalArgumentException {
if (!dest.calculateHash().equals(this.hash))
throw new IllegalArgumentException("Hash mismatch, was: " + this.hash + " new: " + dest.calculateHash());
if (this.dest == null)
this.dest = dest;
else if (!this.dest.equals(dest))
throw new IllegalArgumentException("Dest mismatch, was: " + this.dest+ " new: " + dest);
// else keep the old to reduce object churn
}
public int getPort() {
return this.port;
}
public long lastSeen() {
return lastSeen;
}
public void setLastSeen(long now) {
lastSeen = now;
}
@Override
public int hashCode() {
return super.hashCode() ^ nID.hashCode() ^ port;
}
@Override
public boolean equals(Object o) {
try {
NodeInfo ni = (NodeInfo) o;
// assume dest matches, ignore it
return this.hash.equals(ni.hash) && nID.equals(ni.nID) && port == ni.port;
} catch (Exception e) {
return false;
}
}
public String toString() {
return "NodeInfo: " + nID + ' ' + hash + " port: " + port;
}
}

View File

@ -0,0 +1,31 @@
package org.klomp.snark.dht;
/*
* From zzzot, modded and relicensed to GPLv2
*/
import java.util.Comparator;
import net.i2p.crypto.SHA1Hash;
import net.i2p.data.DataHelper;
/**
* Closest to a InfoHash or NID key.
* Use for NodeInfos.
*
* @since 0.8.4
* @author zzz
*/
class NodeInfoComparator implements Comparator<NodeInfo> {
private final SHA1Hash _base;
public NodeInfoComparator(SHA1Hash h) {
_base = h;
}
public int compare(NodeInfo lhs, NodeInfo rhs) {
byte lhsDelta[] = DataHelper.xor(lhs.getNID().getData(), _base.getData());
byte rhsDelta[] = DataHelper.xor(rhs.getNID().getData(), _base.getData());
return DataHelper.compareTo(lhsDelta, rhsDelta);
}
}

View File

@ -0,0 +1,30 @@
package org.klomp.snark.dht;
/*
* From zzzot, modded and relicensed to GPLv2
*/
import net.i2p.data.Hash;
/**
* A single peer for a single torrent.
* This is what the DHT tracker remembers.
*
* @since 0.8.4
* @author zzz
*/
public class Peer extends Hash {
private long lastSeen;
public Peer(byte[] data) {
super(data);
}
public long lastSeen() {
return lastSeen;
}
public void setLastSeen(long now) {
lastSeen = now;
}
}

View File

@ -0,0 +1,21 @@
package org.klomp.snark.dht;
/*
* From zzzot, modded and relicensed to GPLv2
*/
import java.util.concurrent.ConcurrentHashMap;
import net.i2p.data.Hash;
/**
* All the peers for a single torrent
*
* @since 0.8.4
* @author zzz
*/
public class Peers extends ConcurrentHashMap<Hash, Peer> {
public Peers() {
super();
}
}

View File

@ -0,0 +1,31 @@
package org.klomp.snark.dht;
/*
* From zzzot, modded and relicensed to GPLv2
*/
import java.util.Comparator;
import net.i2p.crypto.SHA1Hash;
import net.i2p.data.DataHelper;
/**
* Closest to a InfoHash or NID key.
* Use for InfoHashes and NIDs.
*
* @since 0.8.4
* @author zzz
*/
class SHA1Comparator implements Comparator<SHA1Hash> {
private final byte[] _base;
public SHA1Comparator(SHA1Hash h) {
_base = h.getData();
}
public int compare(SHA1Hash lhs, SHA1Hash rhs) {
byte lhsDelta[] = DataHelper.xor(lhs.getData(), _base);
byte rhsDelta[] = DataHelper.xor(rhs.getData(), _base);
return DataHelper.compareTo(lhsDelta, rhsDelta);
}
}

View File

@ -0,0 +1,39 @@
package org.klomp.snark.dht;
/*
* GPLv2
*/
import net.i2p.I2PAppContext;
import net.i2p.data.ByteArray;
import net.i2p.data.DataHelper;
/**
* Used for Both outgoing and incoming tokens
*
* @since 0.8.4
* @author zzz
*/
public class Token extends ByteArray {
private static final int MY_TOK_LEN = 8;
private final long lastSeen;
/** outgoing - generate a random token */
public Token(I2PAppContext ctx) {
super(null);
byte[] data = new byte[MY_TOK_LEN];
ctx.random().nextBytes(data);
setData(data);
lastSeen = ctx.clock().now();
}
/** incoming - save the token (arbitrary length) */
public Token(I2PAppContext ctx, byte[] data) {
super(data);
lastSeen = ctx.clock().now();
}
public long lastSeen() {
return lastSeen;
}
}

View File

@ -0,0 +1,20 @@
package org.klomp.snark.dht;
/*
* GPLv2
*/
import net.i2p.crypto.SHA1Hash;
import net.i2p.data.DataHelper;
/**
* Used to index incoming Tokens
*
* @since 0.8.4
* @author zzz
*/
public class TokenKey extends SHA1Hash {
public TokenKey(NID nID, InfoHash ih) {
super(DataHelper.xor(nID.getData(), ih.getData()));
}
}

View File

@ -0,0 +1,19 @@
package org.klomp.snark.dht;
/*
* From zzzot, relicensed to GPLv2
*/
import java.util.concurrent.ConcurrentHashMap;
/**
* All the torrents
*
* @since 0.8.4
* @author zzz
*/
public class Torrents extends ConcurrentHashMap<InfoHash, Peers> {
public Torrents() {
super();
}
}