i2psnark: type arguments, unused imports

This commit is contained in:
str4d
2013-11-21 12:43:45 +00:00
parent 2f4765665d
commit c32b451733
27 changed files with 158 additions and 184 deletions

View File

@ -53,7 +53,7 @@ class KBucketImpl<T extends SimpleDataStructure> implements KBucket<T> {
/** include if no bits higher than this bit (inclusive) are set */
private final int _end;
private final int _max;
private final KBucketTrimmer _trimmer;
private final KBucketTrimmer<T> _trimmer;
/** when did we last shake things up */
private long _lastChanged;
private final I2PAppContext _context;
@ -62,11 +62,11 @@ class KBucketImpl<T extends SimpleDataStructure> implements KBucket<T> {
* All entries in this bucket will have at least one bit different
* from us in the range [begin, end] inclusive.
*/
public KBucketImpl(I2PAppContext context, int begin, int end, int max, KBucketTrimmer trimmer) {
public KBucketImpl(I2PAppContext context, int begin, int end, int max, KBucketTrimmer<T> trimmer) {
if (begin > end)
throw new IllegalArgumentException(begin + " > " + end);
_context = context;
_entries = new ConcurrentHashSet(max + 4);
_entries = new ConcurrentHashSet<T>(max + 4);
_begin = begin;
_end = end;
_max = max;

View File

@ -50,9 +50,9 @@ public class KBucketSet<T extends SimpleDataStructure> {
*
* Closest values are in bucket 0, furthest are in the last bucket.
*/
private final List<KBucket> _buckets;
private final List<KBucket<T>> _buckets;
private final Range<T> _rangeCalc;
private final KBucketTrimmer _trimmer;
private final KBucketTrimmer<T> _trimmer;
/**
* Locked for reading only when traversing all the buckets.
@ -76,13 +76,13 @@ public class KBucketSet<T extends SimpleDataStructure> {
* b > 0, use 1 for bittorrent, Kademlia paper recommends 5
*/
public KBucketSet(I2PAppContext context, T us, int max, int b) {
this(context, us, max, b, new RandomTrimmer(context, max));
this(context, us, max, b, new RandomTrimmer<T>(context, max));
}
/**
* Use the supplied trim strategy.
*/
public KBucketSet(I2PAppContext context, T us, int max, int b, KBucketTrimmer trimmer) {
public KBucketSet(I2PAppContext context, T us, int max, int b, KBucketTrimmer<T> trimmer) {
_us = us;
_context = context;
_log = context.logManager().getLog(KBucketSet.class);
@ -95,7 +95,7 @@ public class KBucketSet<T extends SimpleDataStructure> {
NUM_BUCKETS = KEYSIZE_BITS * B_FACTOR;
BUCKET_SIZE = max;
_buckets = createBuckets();
_rangeCalc = new Range(us, B_VALUE);
_rangeCalc = new Range<T>(us, B_VALUE);
// this verifies the zero-argument constructor
makeKey(new byte[us.length()]);
}
@ -137,7 +137,7 @@ public class KBucketSet<T extends SimpleDataStructure> {
*
*/
public boolean add(T peer) {
KBucket bucket;
KBucket<T> bucket;
getReadLock();
try {
bucket = getBucket(peer);
@ -170,7 +170,7 @@ public class KBucketSet<T extends SimpleDataStructure> {
* FIXME will split the closest buckets too far if B > 1 and K < 2**B
* Won't ever really happen and if it does it still works.
*/
private boolean shouldSplit(KBucket b) {
private boolean shouldSplit(KBucket<T> b) {
return
b.getRangeBegin() != b.getRangeEnd() &&
b.getKeyCount() > BUCKET_SIZE;
@ -263,7 +263,7 @@ public class KBucketSet<T extends SimpleDataStructure> {
int rv = 0;
getReadLock();
try {
for (KBucket b : _buckets) {
for (KBucket<T> b : _buckets) {
rv += b.getKeyCount();
}
} finally { releaseReadLock(); }
@ -271,7 +271,7 @@ public class KBucketSet<T extends SimpleDataStructure> {
}
public boolean remove(T entry) {
KBucket kbucket;
KBucket<T> kbucket;
getReadLock();
try {
kbucket = getBucket(entry);
@ -284,7 +284,7 @@ public class KBucketSet<T extends SimpleDataStructure> {
public void clear() {
getReadLock();
try {
for (KBucket b : _buckets) {
for (KBucket<T> b : _buckets) {
b.clear();
}
} finally { releaseReadLock(); }
@ -295,10 +295,10 @@ public class KBucketSet<T extends SimpleDataStructure> {
* @return a copy in a new set
*/
public Set<T> getAll() {
Set<T> all = new HashSet(256);
Set<T> all = new HashSet<T>(256);
getReadLock();
try {
for (KBucket b : _buckets) {
for (KBucket<T> b : _buckets) {
all.addAll(b.getEntries());
}
} finally { releaseReadLock(); }
@ -317,7 +317,7 @@ public class KBucketSet<T extends SimpleDataStructure> {
public void getAll(SelectionCollector<T> collector) {
getReadLock();
try {
for (KBucket b : _buckets) {
for (KBucket<T> b : _buckets) {
b.getEntries(collector);
}
} finally { releaseReadLock(); }
@ -329,7 +329,7 @@ public class KBucketSet<T extends SimpleDataStructure> {
* @return non-null, closest first
*/
public List<T> getClosest(int max) {
return getClosest(max, Collections.EMPTY_SET);
return getClosest(max, Collections.<T> emptySet());
}
/**
@ -338,7 +338,7 @@ public class KBucketSet<T extends SimpleDataStructure> {
* @return non-null, closest first
*/
public List<T> getClosest(int max, Collection<T> toIgnore) {
List<T> rv = new ArrayList(max);
List<T> rv = new ArrayList<T>(max);
int count = 0;
getReadLock();
try {
@ -355,7 +355,7 @@ public class KBucketSet<T extends SimpleDataStructure> {
}
}
} finally { releaseReadLock(); }
Comparator comp = new XORComparator(_us);
Comparator<T> comp = new XORComparator<T>(_us);
Collections.sort(rv, comp);
int sz = rv.size();
for (int i = sz - 1; i >= max; i--) {
@ -370,7 +370,7 @@ public class KBucketSet<T extends SimpleDataStructure> {
* @return non-null, closest first
*/
public List<T> getClosest(T key, int max) {
return getClosest(key, max, Collections.EMPTY_SET);
return getClosest(key, max, Collections.<T> emptySet());
}
/**
@ -381,7 +381,7 @@ public class KBucketSet<T extends SimpleDataStructure> {
public List<T> getClosest(T key, int max, Collection<T> toIgnore) {
if (key.equals(_us))
return getClosest(max, toIgnore);
List<T> rv = new ArrayList(max);
List<T> rv = new ArrayList<T>(max);
int count = 0;
getReadLock();
try {
@ -407,7 +407,7 @@ public class KBucketSet<T extends SimpleDataStructure> {
}
}
} finally { releaseReadLock(); }
Comparator comp = new XORComparator(key);
Comparator<T> comp = new XORComparator<T>(key);
Collections.sort(rv, comp);
int sz = rv.size();
for (int i = sz - 1; i >= max; i--) {
@ -452,7 +452,7 @@ public class KBucketSet<T extends SimpleDataStructure> {
List<KBucket<T>> getBuckets() {
getReadLock();
try {
return new ArrayList(_buckets);
return new ArrayList<KBucket<T>>(_buckets);
} finally { releaseReadLock(); }
}
@ -461,7 +461,7 @@ public class KBucketSet<T extends SimpleDataStructure> {
* Caller must hold read lock
* @return null if key is us
*/
private KBucket getBucket(T key) {
private KBucket<T> getBucket(T key) {
int bucket = pickBucket(key);
if (bucket < 0)
return null;
@ -480,30 +480,30 @@ public class KBucketSet<T extends SimpleDataStructure> {
// of equal size to be checked so a binary search is better
if (B_VALUE <= 3) {
for (int i = _buckets.size() - 1; i >= 0; i--) {
KBucket b = _buckets.get(i);
KBucket<T> b = _buckets.get(i);
if (range >= b.getRangeBegin() && range <= b.getRangeEnd())
return i;
}
return -1;
} else {
KBucket dummy = new DummyBucket(range);
return Collections.binarySearch(_buckets, dummy, new BucketComparator());
KBucket<T> dummy = new DummyBucket<T>(range);
return Collections.binarySearch(_buckets, dummy, new BucketComparator<T>());
}
}
private List<KBucket> createBuckets() {
private List<KBucket<T>> createBuckets() {
// just an initial size
List<KBucket> buckets = new ArrayList(4 * B_FACTOR);
List<KBucket<T>> buckets = new ArrayList<KBucket<T>>(4 * B_FACTOR);
buckets.add(createBucket(0, NUM_BUCKETS -1));
return buckets;
}
private KBucket createBucket(int start, int end) {
private KBucket<T> createBucket(int start, int end) {
if (end - start >= B_FACTOR &&
(((end + 1) & B_FACTOR - 1) != 0 ||
(start & B_FACTOR - 1) != 0))
throw new IllegalArgumentException("Sub-bkt crosses K-bkt boundary: " + start + '-' + end);
KBucket bucket = new KBucketImpl(_context, start, end, BUCKET_SIZE, _trimmer);
KBucket<T> bucket = new KBucketImpl<T>(_context, start, end, BUCKET_SIZE, _trimmer);
return bucket;
}
@ -524,11 +524,11 @@ public class KBucketSet<T extends SimpleDataStructure> {
* @return non-null, closest first
*/
public List<T> getExploreKeys(long age) {
List<T> rv = new ArrayList(_buckets.size());
List<T> rv = new ArrayList<T>(_buckets.size());
long old = _context.clock().now() - age;
getReadLock();
try {
for (KBucket b : _buckets) {
for (KBucket<T> b : _buckets) {
int curSize = b.getKeyCount();
// Always explore the closest bucket
if ((b.getRangeBegin() == 0) ||
@ -543,7 +543,7 @@ public class KBucketSet<T extends SimpleDataStructure> {
* Generate a random key to go within this bucket
* Package private for testing only. Others shouldn't need this.
*/
T generateRandomKey(KBucket bucket) {
T generateRandomKey(KBucket<T> bucket) {
int begin = bucket.getRangeBegin();
int end = bucket.getRangeEnd();
// number of fixed bits, out of B_VALUE - 1 bits
@ -662,7 +662,7 @@ public class KBucketSet<T extends SimpleDataStructure> {
public Range(T us, int bValue) {
_bValue = bValue;
_bigUs = new BigInteger(1, us.getData());
_distanceCache = new LHMCache(256);
_distanceCache = new LHMCache<T, Integer>(256);
}
/** @return 0 to max-1 or -1 for us */
@ -748,8 +748,8 @@ public class KBucketSet<T extends SimpleDataStructure> {
* For Collections.binarySearch.
* Returns equal for any overlap.
*/
private static class BucketComparator implements Comparator<KBucket> {
public int compare(KBucket l, KBucket r) {
private static class BucketComparator<T extends SimpleDataStructure> implements Comparator<KBucket<T>> {
public int compare(KBucket<T> l, KBucket<T> r) {
if (l.getRangeEnd() < r.getRangeBegin())
return -1;
if (l.getRangeBegin() > r.getRangeEnd())
@ -770,7 +770,7 @@ public class KBucketSet<T extends SimpleDataStructure> {
try {
int len = _buckets.size();
for (int i = 0; i < len; i++) {
KBucket b = _buckets.get(i);
KBucket<T> b = _buckets.get(i);
buf.append("* Bucket ").append(i).append("/").append(len).append(": ");
buf.append(b.toString()).append("\n");
}

View File

@ -20,7 +20,7 @@ public class RandomTrimmer<T extends SimpleDataStructure> implements KBucketTrim
}
public boolean trim(KBucket<T> kbucket, T toAdd) {
List<T> e = new ArrayList(kbucket.getEntries());
List<T> e = new ArrayList<T>(kbucket.getEntries());
int sz = e.size();
// concurrency
if (sz < _max)

View File

@ -4,7 +4,6 @@
*/
package org.klomp.snark;
import java.util.Arrays;
import java.util.Properties;
import net.i2p.client.I2PSessionException;

View File

@ -45,7 +45,7 @@ class ConnectionAcceptor implements Runnable
private PeerAcceptor peeracceptor;
private Thread thread;
private final I2PSnarkUtil _util;
private final ObjectCounter<Hash> _badCounter = new ObjectCounter();
private final ObjectCounter<Hash> _badCounter = new ObjectCounter<Hash>();
private final SimpleTimer2.TimedEvent _cleaner;
private volatile boolean stop;

View File

@ -43,8 +43,8 @@ abstract class ExtensionHandler {
* @return bencoded outgoing handshake message
*/
public static byte[] getHandshake(int metasize, boolean pexAndMetadata, boolean dht) {
Map<String, Object> handshake = new HashMap();
Map<String, Integer> m = new HashMap();
Map<String, Object> handshake = new HashMap<String, Object>();
Map<String, Integer> m = new HashMap<String, Integer>();
if (pexAndMetadata) {
m.put(TYPE_METADATA, Integer.valueOf(ID_METADATA));
m.put(TYPE_PEX, Integer.valueOf(ID_PEX));
@ -276,7 +276,7 @@ abstract class ExtensionHandler {
/** REQUEST and REJECT are the same except for message type */
private static void sendMessage(Peer peer, int type, int piece) {
Map<String, Object> map = new HashMap();
Map<String, Object> map = new HashMap<String, Object>();
map.put("msg_type", Integer.valueOf(type));
map.put("piece", Integer.valueOf(piece));
byte[] payload = BEncoder.bencode(map);
@ -291,7 +291,7 @@ abstract class ExtensionHandler {
}
private static void sendPiece(Peer peer, int piece, byte[] data) {
Map<String, Object> map = new HashMap();
Map<String, Object> map = new HashMap<String, Object>();
map.put("msg_type", Integer.valueOf(TYPE_DATA));
map.put("piece", Integer.valueOf(piece));
map.put("total_size", Integer.valueOf(data.length));
@ -334,7 +334,7 @@ abstract class ExtensionHandler {
if (ids.length < HASH_LENGTH)
return;
int len = Math.min(ids.length, (I2PSnarkUtil.MAX_CONNECTIONS - 1) * HASH_LENGTH);
List<PeerID> peers = new ArrayList(len / HASH_LENGTH);
List<PeerID> peers = new ArrayList<PeerID>(len / HASH_LENGTH);
for (int off = 0; off < len; off += HASH_LENGTH) {
byte[] hash = new byte[HASH_LENGTH];
System.arraycopy(ids, off, hash, 0, HASH_LENGTH);
@ -382,7 +382,7 @@ abstract class ExtensionHandler {
public static void sendPEX(Peer peer, List<Peer> pList) {
if (pList.isEmpty())
return;
Map<String, Object> map = new HashMap();
Map<String, Object> map = new HashMap<String, Object>();
byte[] peers = new byte[HASH_LENGTH * pList.size()];
int off = 0;
for (Peer p : pList) {
@ -406,7 +406,7 @@ abstract class ExtensionHandler {
* @since DHT
*/
public static void sendDHT(Peer peer, int qport, int rport) {
Map<String, Object> map = new HashMap();
Map<String, Object> map = new HashMap<String, Object>();
map.put("port", Integer.valueOf(qport));
map.put("rport", Integer.valueOf(rport));
byte[] payload = BEncoder.bencode(map);

View File

@ -3,7 +3,6 @@ package org.klomp.snark;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
@ -11,8 +10,6 @@ import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.StringTokenizer;
import net.i2p.I2PAppContext;
import net.i2p.I2PException;
import net.i2p.client.I2PSession;
@ -33,7 +30,6 @@ import net.i2p.util.FileUtil;
import net.i2p.util.Log;
import net.i2p.util.SecureDirectory;
import net.i2p.util.SecureFile;
import net.i2p.util.SimpleScheduler;
import net.i2p.util.SimpleTimer;
import net.i2p.util.Translate;
@ -94,10 +90,10 @@ public class I2PSnarkUtil {
_context = ctx;
_log = _context.logManager().getLog(Snark.class);
_baseName = baseName;
_opts = new HashMap();
_opts = new HashMap<String, String>();
//setProxy("127.0.0.1", 4444);
setI2CPConfig("127.0.0.1", 7654, null);
_banlist = new ConcurrentHashSet();
_banlist = new ConcurrentHashSet<Hash>();
_maxUploaders = Snark.MAX_TOTAL_UPLOADERS;
_maxUpBW = DEFAULT_MAX_UP_BW;
_maxConnections = MAX_CONNECTIONS;
@ -220,8 +216,8 @@ public class I2PSnarkUtil {
_log.debug("Connecting to I2P", new Exception("I did it"));
Properties opts = _context.getProperties();
if (_opts != null) {
for (Iterator iter = _opts.keySet().iterator(); iter.hasNext(); ) {
String key = (String)iter.next();
for (Iterator<String> iter = _opts.keySet().iterator(); iter.hasNext(); ) {
String key = iter.next();
opts.setProperty(key, _opts.get(key).toString());
}
}
@ -577,7 +573,7 @@ public class I2PSnarkUtil {
*/
public List<String> getOpenTrackers() {
if (!shouldUseOpenTrackers())
return Collections.EMPTY_LIST;
return Collections.emptyList();
return _openTrackers;
}

View File

@ -6,7 +6,6 @@ import java.io.InputStream;
import java.util.HashMap;
import java.util.Map;
import net.i2p.I2PAppContext;
import net.i2p.data.DataHelper;
import net.i2p.util.RandomSource;
@ -190,7 +189,7 @@ class MagnetState {
*/
public MetaInfo buildMetaInfo() throws Exception {
// top map has nothing in it but the info map (no announce)
Map<String, BEValue> map = new HashMap();
Map<String, BEValue> map = new HashMap<String, BEValue>();
InputStream is = new ByteArrayInputStream(metainfoBytes);
BDecoder dec = new BDecoder(is);
BEValue bev = dec.bdecodeMap();

View File

@ -133,7 +133,7 @@ public class MagnetURI {
}
if (idx < 0 || idx > uri.length())
return null;
List<String> rv = new ArrayList();
List<String> rv = new ArrayList<String>();
while (true) {
String p = uri.substring(idx);
uri = p;

View File

@ -26,8 +26,6 @@ import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.SequenceInputStream;
import java.util.Iterator;
import net.i2p.I2PAppContext;
import net.i2p.client.streaming.I2PSocket;
import net.i2p.data.Base64;

View File

@ -24,7 +24,6 @@ import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import net.i2p.I2PAppContext;
import net.i2p.data.DataHelper;
import net.i2p.util.Log;
@ -73,7 +72,7 @@ class PeerCheckerTask implements Runnable
// Keep track of peers we remove now,
// we will add them back to the end of the list.
List<Peer> removed = new ArrayList();
List<Peer> removed = new ArrayList<Peer>();
int uploadLimit = coordinator.allowedUploaders();
boolean overBWLimit = coordinator.overUpBWLimit();
DHT dht = _util.getDHT();

View File

@ -43,7 +43,7 @@ class PeerConnectionOut implements Runnable
private boolean quit;
// Contains Messages.
private final List<Message> sendQueue = new ArrayList();
private final List<Message> sendQueue = new ArrayList<Message>();
private static final AtomicLong __id = new AtomicLong();
private final long _id;
@ -116,10 +116,10 @@ class PeerConnectionOut implements Runnable
// And remove piece messages if we are choking.
// this should get fixed for starvation
Iterator it = sendQueue.iterator();
Iterator<Message> it = sendQueue.iterator();
while (m == null && it.hasNext())
{
Message nm = (Message)it.next();
Message nm = it.next();
if (nm.type == Message.PIECE)
{
if (state.choking) {
@ -274,10 +274,10 @@ class PeerConnectionOut implements Runnable
boolean removed = false;
synchronized(sendQueue)
{
Iterator it = sendQueue.iterator();
Iterator<Message> it = sendQueue.iterator();
while (it.hasNext())
{
Message m = (Message)it.next();
Message m = it.next();
if (m.type == type)
{
it.remove();
@ -360,13 +360,13 @@ class PeerConnectionOut implements Runnable
/** reransmit requests not received in 7m */
private static final int REQ_TIMEOUT = (2 * SEND_TIMEOUT) + (60 * 1000);
void retransmitRequests(List requests)
void retransmitRequests(List<Request> requests)
{
long now = System.currentTimeMillis();
Iterator it = requests.iterator();
Iterator<Request> it = requests.iterator();
while (it.hasNext())
{
Request req = (Request)it.next();
Request req = it.next();
if(now > req.sendTime + REQ_TIMEOUT) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Retransmit request " + req + " to peer " + peer);
@ -375,12 +375,12 @@ class PeerConnectionOut implements Runnable
}
}
void sendRequests(List requests)
void sendRequests(List<Request> requests)
{
Iterator it = requests.iterator();
Iterator<Request> it = requests.iterator();
while (it.hasNext())
{
Request req = (Request)it.next();
Request req = it.next();
sendRequest(req);
}
}
@ -391,10 +391,10 @@ class PeerConnectionOut implements Runnable
// (multiple choke/unchokes received cause duplicate requests in the queue)
synchronized(sendQueue)
{
Iterator it = sendQueue.iterator();
Iterator<Message> it = sendQueue.iterator();
while (it.hasNext())
{
Message m = (Message)it.next();
Message m = it.next();
if (m.type == Message.REQUEST && m.piece == req.getPiece() &&
m.begin == req.off && m.length == req.len)
{
@ -419,10 +419,10 @@ class PeerConnectionOut implements Runnable
int total = 0;
synchronized(sendQueue)
{
Iterator it = sendQueue.iterator();
Iterator<Message> it = sendQueue.iterator();
while (it.hasNext())
{
Message m = (Message)it.next();
Message m = it.next();
if (m.type == Message.PIECE)
total += m.length;
}
@ -489,10 +489,10 @@ class PeerConnectionOut implements Runnable
// See if it is still in our send queue
synchronized(sendQueue)
{
Iterator it = sendQueue.iterator();
Iterator<Message> it = sendQueue.iterator();
while (it.hasNext())
{
Message m = (Message)it.next();
Message m = it.next();
if (m.type == Message.REQUEST
&& m.piece == req.getPiece()
&& m.begin == req.off
@ -530,10 +530,10 @@ class PeerConnectionOut implements Runnable
{
synchronized (sendQueue)
{
Iterator it = sendQueue.iterator();
Iterator<Message> it = sendQueue.iterator();
while (it.hasNext())
{
Message m = (Message)it.next();
Message m = it.next();
if (m.type == Message.PIECE
&& m.piece == piece
&& m.begin == begin

View File

@ -151,12 +151,12 @@ class PeerCoordinator implements PeerListener
this.listener = listener;
this.snark = torrent;
wantedPieces = new ArrayList();
wantedPieces = new ArrayList<Piece>();
setWantedPieces();
partialPieces = new ArrayList(getMaxConnections() + 1);
peers = new LinkedBlockingQueue();
partialPieces = new ArrayList<PartialPiece>(getMaxConnections() + 1);
peers = new LinkedBlockingQueue<Peer>();
magnetState = new MagnetState(infohash, metainfo);
pexPeers = new ConcurrentHashSet();
pexPeers = new ConcurrentHashSet<PeerID>();
// Install a timer to check the uploaders.
// Randomize the first start time so multiple tasks are spread out,
@ -218,7 +218,7 @@ class PeerCoordinator implements PeerListener
/** for web page detailed stats */
public List<Peer> peerList()
{
return new ArrayList(peers);
return new ArrayList<Peer>(peers);
}
public byte[] getID()
@ -412,7 +412,7 @@ class PeerCoordinator implements PeerListener
public void halt()
{
halted = true;
List<Peer> removed = new ArrayList();
List<Peer> removed = new ArrayList<Peer>();
synchronized(peers)
{
// Stop peer checker task.
@ -613,7 +613,7 @@ class PeerCoordinator implements PeerListener
// linked list will contain all interested peers that we choke.
// At the start are the peers that have us unchoked at the end the
// other peer that are interested, but are choking us.
List<Peer> interested = new LinkedList();
List<Peer> interested = new LinkedList<Peer>();
int count = 0;
int unchokedCount = 0;
int maxUploaders = allowedUploaders();
@ -729,7 +729,7 @@ class PeerCoordinator implements PeerListener
}
Piece piece = null;
List<Piece> requested = new ArrayList();
List<Piece> requested = new ArrayList<Piece>();
int wantedSize = END_GAME_THRESHOLD + 1;
synchronized(wantedPieces)
{
@ -833,7 +833,7 @@ class PeerCoordinator implements PeerListener
_log.debug("Updated piece priorities called but no priorities to set?");
return;
}
List<Piece> toCancel = new ArrayList();
List<Piece> toCancel = new ArrayList<Piece>();
synchronized(wantedPieces) {
// Add incomplete and previously unwanted pieces to the list
// Temp to avoid O(n**2)
@ -1019,7 +1019,7 @@ class PeerCoordinator implements PeerListener
// Announce to the world we have it!
// Disconnect from other seeders when we get the last piece
List<Peer> toDisconnect = done ? new ArrayList() : null;
List<Peer> toDisconnect = done ? new ArrayList<Peer>() : null;
for (Peer p : peers) {
if (p.isConnected())
{

View File

@ -16,7 +16,7 @@ class PeerCoordinatorSet implements Iterable<PeerCoordinator> {
private final Map<SHA1Hash, PeerCoordinator> _coordinators;
public PeerCoordinatorSet() {
_coordinators = new ConcurrentHashMap();
_coordinators = new ConcurrentHashMap<SHA1Hash, PeerCoordinator>();
}
public Iterator<PeerCoordinator> iterator() {

View File

@ -24,7 +24,6 @@ import java.io.IOException;
import java.net.UnknownHostException;
import java.util.Map;
import net.i2p.I2PAppContext;
import net.i2p.data.Base32;
import net.i2p.data.Base64;
import net.i2p.data.DataHelper;

View File

@ -56,7 +56,7 @@ class PeerState implements DataLoader
final PeerConnectionOut out;
// Outstanding request
private final List<Request> outstandingRequests = new ArrayList();
private final List<Request> outstandingRequests = new ArrayList<Request>();
/** the tail (NOT the head) of the request queue */
private Request lastRequest = null;
@ -451,7 +451,7 @@ class PeerState implements DataLoader
synchronized List<Request> returnPartialPieces()
{
Set<Integer> pcs = getRequestedPieces();
List<Request> rv = new ArrayList(pcs.size());
List<Request> rv = new ArrayList<Request>(pcs.size());
for (Integer p : pcs) {
Request req = getLowestOutstandingRequest(p.intValue());
if (req != null) {
@ -469,7 +469,7 @@ class PeerState implements DataLoader
* @return all pieces we are currently requesting, or empty Set
*/
synchronized private Set<Integer> getRequestedPieces() {
Set<Integer> rv = new HashSet(outstandingRequests.size() + 1);
Set<Integer> rv = new HashSet<Integer>(outstandingRequests.size() + 1);
for (Request req : outstandingRequests) {
rv.add(Integer.valueOf(req.getPiece()));
if (pendingRequest != null)

View File

@ -18,7 +18,7 @@ class Piece implements Comparable {
public Piece(int id) {
this.id = id;
this.peers = new HashSet(I2PSnarkUtil.MAX_CONNECTIONS / 2);
this.peers = new HashSet<PeerID>(I2PSnarkUtil.MAX_CONNECTIONS / 2);
// defer creating requests to save memory
}
@ -82,7 +82,7 @@ class Piece implements Comparable {
public void setRequested(Peer peer, boolean requested) {
if (requested) {
if (this.requests == null)
this.requests = new HashSet(2);
this.requests = new HashSet<PeerID>(2);
this.requests.add(peer.getPeerID());
} else {
if (this.requests != null)

View File

@ -25,7 +25,6 @@ import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Properties;
import java.util.Random;
@ -34,7 +33,6 @@ import java.util.StringTokenizer;
import net.i2p.I2PAppContext;
import net.i2p.client.streaming.I2PServerSocket;
import net.i2p.data.Destination;
import net.i2p.util.I2PThread;
import net.i2p.util.Log;
/**
@ -762,7 +760,7 @@ public class Snark
PeerCoordinator coord = coordinator;
if (coord != null)
return coord.peerList();
return Collections.EMPTY_LIST;
return Collections.emptyList();
}
/**

View File

@ -35,7 +35,6 @@ import net.i2p.util.Log;
import net.i2p.util.OrderedProperties;
import net.i2p.util.SecureDirectory;
import net.i2p.util.SecureFileOutputStream;
import net.i2p.util.SimpleScheduler;
import net.i2p.util.SimpleTimer;
import net.i2p.util.SimpleTimer2;
@ -148,20 +147,20 @@ public class SnarkManager implements CompleteListener {
* @since 0.9.6
*/
public SnarkManager(I2PAppContext ctx, String ctxPath, String ctxName) {
_snarks = new ConcurrentHashMap();
_magnets = new ConcurrentHashSet();
_snarks = new ConcurrentHashMap<String, Snark>();
_magnets = new ConcurrentHashSet<String>();
_addSnarkLock = new Object();
_context = ctx;
_contextPath = ctxPath;
_contextName = ctxName;
_log = _context.logManager().getLog(SnarkManager.class);
_messages = new LinkedBlockingQueue();
_messages = new LinkedBlockingQueue<String>();
_util = new I2PSnarkUtil(_context, ctxName);
String cfile = ctxName + CONFIG_FILE_SUFFIX;
_configFile = new File(cfile);
if (!_configFile.isAbsolute())
_configFile = new File(_context.getConfigDir(), cfile);
_trackerMap = new ConcurrentHashMap(4);
_trackerMap = new ConcurrentHashMap<String, Tracker>(4);
loadConfig(null);
}
@ -240,8 +239,8 @@ public class SnarkManager implements CompleteListener {
/** newest last */
public List<String> getMessages() {
if (_messages.isEmpty())
return Collections.EMPTY_LIST;
return new ArrayList(_messages);
return Collections.emptyList();
return new ArrayList<String>(_messages);
}
/** @since 0.9 */
@ -595,7 +594,7 @@ public class SnarkManager implements CompleteListener {
try { port = Integer.parseInt(i2cpPort); } catch (NumberFormatException nfe) {}
}
Map<String, String> opts = new HashMap();
Map<String, String> opts = new HashMap<String, String>();
if (i2cpOpts == null) i2cpOpts = "";
StringTokenizer tok = new StringTokenizer(i2cpOpts, " \t\n");
while (tok.hasMoreTokens()) {
@ -604,7 +603,7 @@ public class SnarkManager implements CompleteListener {
if (split > 0)
opts.put(pair.substring(0, split), pair.substring(split+1));
}
Map<String, String> oldOpts = new HashMap();
Map<String, String> oldOpts = new HashMap<String, String>();
String oldI2CPOpts = _config.getProperty(PROP_I2CP_OPTS);
if (oldI2CPOpts == null) oldI2CPOpts = "";
tok = new StringTokenizer(oldI2CPOpts, " \t\n");
@ -737,7 +736,7 @@ public class SnarkManager implements CompleteListener {
*/
private List<String> getOpenTrackers() {
if (!_util.shouldUseOpenTrackers())
return Collections.EMPTY_LIST;
return Collections.emptyList();
return getListConfig(PROP_OPENTRACKERS, I2PSnarkUtil.DEFAULT_OPENTRACKERS);
}
@ -782,7 +781,7 @@ public class SnarkManager implements CompleteListener {
if (val == null)
val = dflt;
if (val == null)
return Collections.EMPTY_LIST;
return Collections.emptyList();
return Arrays.asList(val.split(","));
}
@ -833,7 +832,7 @@ public class SnarkManager implements CompleteListener {
* An unsynchronized copy.
*/
public Set<String> listTorrentFiles() {
return new HashSet(_snarks.keySet());
return new HashSet<String>(_snarks.keySet());
}
/**
@ -1386,7 +1385,7 @@ public class SnarkManager implements CompleteListener {
* @return failure message or null on success
*/
private String validateTorrent(MetaInfo info) {
List files = info.getFiles();
List<List<String>> files = info.getFiles();
if ( (files != null) && (files.size() > MAX_FILES_PER_TORRENT) ) {
return _("Too many files in \"{0}\" ({1}), deleting it!", info.getName(), files.size());
} else if ( (files == null) && (info.getName().endsWith(".torrent")) ) {
@ -1402,7 +1401,7 @@ public class SnarkManager implements CompleteListener {
return _("Torrent \"{0}\" has no data, deleting it!", info.getName());
} else if (info.getTotalLength() > Storage.MAX_TOTAL_SIZE) {
System.out.println("torrent info: " + info.toString());
List lengths = info.getLengths();
List<Long> lengths = info.getLengths();
if (lengths != null)
for (int i = 0; i < lengths.size(); i++)
System.out.println("File " + i + " is " + lengths.get(i) + " long.");
@ -1688,8 +1687,8 @@ public class SnarkManager implements CompleteListener {
// Don't remove magnet torrents that don't have a torrent file yet
existingNames.removeAll(_magnets);
// now lets see which ones have been removed...
for (Iterator iter = existingNames.iterator(); iter.hasNext(); ) {
String name = (String)iter.next();
for (Iterator<String> iter = existingNames.iterator(); iter.hasNext(); ) {
String name = iter.next();
if (foundNames.contains(name)) {
// known and still there. noop
} else {

View File

@ -75,7 +75,7 @@ public class Storage
public static final int MAX_PIECES = 10*1024;
public static final long MAX_TOTAL_SIZE = MAX_PIECE_SIZE * (long) MAX_PIECES;
private static final Map<String, String> _filterNameCache = new ConcurrentHashMap();
private static final Map<String, String> _filterNameCache = new ConcurrentHashMap<String, String>();
private static final boolean _isWindows = SystemVersion.isWindows();
@ -101,7 +101,7 @@ public class Storage
total_length = metainfo.getTotalLength();
List<List<String>> files = metainfo.getFiles();
int sz = files != null ? files.size() : 1;
_torrentFiles = new ArrayList(sz);
_torrentFiles = new ArrayList<TorrentFile>(sz);
}
/**
@ -127,7 +127,7 @@ public class Storage
_torrentFiles = getFiles(baseFile);
long total = 0;
ArrayList<Long> lengthsList = new ArrayList();
ArrayList<Long> lengthsList = new ArrayList<Long>();
for (TorrentFile tf : _torrentFiles)
{
long length = tf.length;
@ -160,10 +160,10 @@ public class Storage
bitfield = new BitField(pieces);
needed = 0;
List<List<String>> files = new ArrayList();
List<List<String>> files = new ArrayList<List<String>>();
for (TorrentFile tf : _torrentFiles)
{
List<String> file = new ArrayList();
List<String> file = new ArrayList<String>();
StringTokenizer st = new StringTokenizer(tf.name, File.separator);
while (st.hasMoreTokens())
{
@ -218,11 +218,11 @@ public class Storage
{
if (base.getAbsolutePath().equals("/"))
throw new IOException("Don't seed root");
List<File> files = new ArrayList();
List<File> files = new ArrayList<File>();
addFiles(files, base);
int size = files.size();
List<TorrentFile> rv = new ArrayList(size);
List<TorrentFile> rv = new ArrayList<TorrentFile>(size);
for (File f : files) {
rv.add(new TorrentFile(base, f));
@ -550,7 +550,7 @@ public class Storage
if (f.equals(_torrentFiles.get(j).RAFfile)) {
// Rename and start the check over again
// Copy path since metainfo list is unmodifiable
path = new ArrayList(path);
path = new ArrayList<String>(path);
int last = path.size() - 1;
String lastPath = path.get(last);
int dot = lastPath.lastIndexOf('.');

View File

@ -21,8 +21,6 @@
package org.klomp.snark;
import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.net.MalformedURLException;
@ -38,7 +36,6 @@ import java.util.Locale;
import java.util.Random;
import java.util.Set;
import net.i2p.I2PAppContext;
import net.i2p.data.DataHelper;
import net.i2p.data.Hash;
import net.i2p.util.ConvertToHash;
@ -139,8 +136,8 @@ public class TrackerClient implements Runnable {
this.port = PORT; //(port == -1) ? 9 : port;
this.infoHash = urlencode(snark.getInfoHash());
this.peerID = urlencode(snark.getID());
this.trackers = new ArrayList(2);
this.backupTrackers = new ArrayList(2);
this.trackers = new ArrayList<TCTracker>(2);
this.backupTrackers = new ArrayList<TCTracker>(2);
}
public synchronized void start() {
@ -274,7 +271,7 @@ public class TrackerClient implements Runnable {
primary = meta.getAnnounce();
else if (additionalTrackerURL != null)
primary = additionalTrackerURL;
Set<Hash> trackerHashes = new HashSet(8);
Set<Hash> trackerHashes = new HashSet<Hash>(8);
// primary tracker
if (primary != null) {
@ -533,7 +530,7 @@ public class TrackerClient implements Runnable {
if (coordinator.needOutboundPeers()) {
// we only want to talk to new people if we need things
// from them (duh)
List<Peer> ordered = new ArrayList(peers);
List<Peer> ordered = new ArrayList<Peer>(peers);
Random r = _util.getContext().random();
Collections.shuffle(ordered, r);
Iterator<Peer> it = ordered.iterator();
@ -598,7 +595,7 @@ public class TrackerClient implements Runnable {
if (!pids.isEmpty()) {
if (_log.shouldLog(Log.INFO))
_log.info("Got " + pids.size() + " from PEX");
List<Peer> peers = new ArrayList(pids.size());
List<Peer> peers = new ArrayList<Peer>(pids.size());
for (PeerID pID : pids) {
peers.add(new Peer(pID, snark.getID(), snark.getInfoHash(), snark.getMetaInfo()));
}
@ -652,7 +649,7 @@ public class TrackerClient implements Runnable {
// now try these peers
if ((!stop) && !hashes.isEmpty()) {
List<Peer> peers = new ArrayList(hashes.size());
List<Peer> peers = new ArrayList<Peer>(hashes.size());
for (Hash h : hashes) {
try {
PeerID pID = new PeerID(h.getData(), _util);

View File

@ -59,7 +59,7 @@ class TrackerInfo
this(be.bdecodeMap().getMap(), my_id, infohash, metainfo, util);
}
private TrackerInfo(Map m, byte[] my_id, byte[] infohash, MetaInfo metainfo, I2PSnarkUtil util)
private TrackerInfo(Map<String, BEValue> m, byte[] my_id, byte[] infohash, MetaInfo metainfo, I2PSnarkUtil util)
throws IOException
{
BEValue reason = (BEValue)m.get("failure reason");
@ -80,7 +80,7 @@ class TrackerInfo
BEValue bePeers = (BEValue)m.get("peers");
if (bePeers == null) {
peers = Collections.EMPTY_SET;
peers = Collections.emptySet();
} else {
Set<Peer> p;
try {
@ -127,7 +127,7 @@ class TrackerInfo
private static Set<Peer> getPeers(List<BEValue> l, byte[] my_id, byte[] infohash, MetaInfo metainfo, I2PSnarkUtil util)
throws IOException
{
Set<Peer> peers = new HashSet(l.size());
Set<Peer> peers = new HashSet<Peer>(l.size());
for (BEValue bev : l) {
PeerID peerID;
@ -161,7 +161,7 @@ class TrackerInfo
throws IOException
{
int count = l.length / HASH_LENGTH;
Set<Peer> peers = new HashSet(count);
Set<Peer> peers = new HashSet<Peer>(count);
for (int i = 0; i < count; i++) {
PeerID peerID;

View File

@ -6,11 +6,9 @@ import java.util.List;
import net.i2p.I2PAppContext;
import net.i2p.crypto.TrustedUpdate;
import net.i2p.data.DataHelper;
import net.i2p.update.*;
import net.i2p.util.Log;
import net.i2p.util.SimpleTimer2;
import net.i2p.util.VersionComparator;
/**
* The downloader for router signed updates.

View File

@ -5,11 +5,8 @@ package org.klomp.snark.dht;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Comparator;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import java.util.TreeSet;
import java.util.concurrent.ConcurrentHashMap;
import net.i2p.I2PAppContext;
@ -53,8 +50,8 @@ class DHTNodes {
_context = ctx;
_expireTime = MAX_EXPIRE_TIME;
_log = _context.logManager().getLog(DHTNodes.class);
_nodeMap = new ConcurrentHashMap();
_kad = new KBucketSet(ctx, me, KAD_K, KAD_B, new KBTrimmer(ctx, KAD_K));
_nodeMap = new ConcurrentHashMap<NID, NodeInfo>();
_kad = new KBucketSet<NID>(ctx, me, KAD_K, KAD_B, new KBTrimmer(ctx, KAD_K));
}
public void start() {
@ -120,7 +117,7 @@ class DHTNodes {
else
key = new NID(h.getData());
List<NID> keys = _kad.getClosest(key, numWant);
List<NodeInfo> rv = new ArrayList(keys.size());
List<NodeInfo> rv = new ArrayList<NodeInfo>(keys.size());
for (NID nid : keys) {
NodeInfo ninfo = _nodeMap.get(nid);
if (ninfo != null)

View File

@ -104,10 +104,10 @@ class DHTTracker {
List<Hash> getPeers(InfoHash ih, int max) {
Peers peers = _torrents.get(ih);
if (peers == null)
return Collections.EMPTY_LIST;
return Collections.emptyList();
int size = peers.size();
List<Hash> rv = new ArrayList(peers.values());
List<Hash> rv = new ArrayList<Hash>(peers.values());
if (max < size) {
Collections.shuffle(rv, _context.random());
rv = rv.subList(0, max);

View File

@ -10,7 +10,6 @@ import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
@ -24,7 +23,6 @@ import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicLong;
import net.i2p.I2PAppContext;
import net.i2p.client.I2PClient;
import net.i2p.client.I2PSession;
import net.i2p.client.I2PSessionException;
import net.i2p.client.I2PSessionMuxedListener;
@ -32,12 +30,10 @@ import net.i2p.client.SendMessageOptions;
import net.i2p.client.datagram.I2PDatagramDissector;
import net.i2p.client.datagram.I2PDatagramMaker;
import net.i2p.client.datagram.I2PInvalidDatagramException;
import net.i2p.crypto.SHA1Hash;
import net.i2p.data.DataFormatException;
import net.i2p.data.DataHelper;
import net.i2p.data.Destination;
import net.i2p.data.Hash;
import net.i2p.data.SimpleDataStructure;
import net.i2p.util.ConcurrentHashSet;
import net.i2p.util.I2PAppThread;
import net.i2p.util.Log;
@ -169,10 +165,10 @@ public class KRPC implements I2PSessionMuxedListener, DHT {
_log = ctx.logManager().getLog(KRPC.class);
_tracker = new DHTTracker(ctx);
_sentQueries = new ConcurrentHashMap();
_outgoingTokens = new ConcurrentHashMap();
_incomingTokens = new ConcurrentHashMap();
_blacklist = new ConcurrentHashSet();
_sentQueries = new ConcurrentHashMap<MsgID, ReplyWaiter>();
_outgoingTokens = new ConcurrentHashMap<Token, NodeInfo>();
_incomingTokens = new ConcurrentHashMap<NID, Token>();
_blacklist = new ConcurrentHashSet<NID>();
// Construct my NodeInfo
// Pick ports over a big range to marginally increase security
@ -246,9 +242,9 @@ public class KRPC implements I2PSessionMuxedListener, DHT {
_log.info("DHT is empty, cannot explore");
return;
}
SortedSet<NodeInfo> toTry = new TreeSet(new NodeInfoComparator(target));
SortedSet<NodeInfo> toTry = new TreeSet<NodeInfo>(new NodeInfoComparator(target));
toTry.addAll(nodes);
Set<NodeInfo> tried = new HashSet();
Set<NodeInfo> tried = new HashSet<NodeInfo>();
if (_log.shouldLog(Log.INFO))
_log.info("Starting explore of " + target);
@ -328,7 +324,7 @@ public class KRPC implements I2PSessionMuxedListener, DHT {
rv.remove(_myNodeInfo.getHash());
if (rv.size() >= max)
return rv;
rv = new HashSet(rv);
rv = new HashSet<Hash>(rv);
long endTime = _context.clock().now() + maxWait;
// needs to be much higher than log(size) since many lookups will fail
@ -337,10 +333,10 @@ public class KRPC implements I2PSessionMuxedListener, DHT {
// Initial set to try, will get added to as we go
List<NodeInfo> nodes = _knownNodes.findClosest(iHash, maxNodes);
NodeInfoComparator comp = new NodeInfoComparator(iHash);
SortedSet<NodeInfo> toTry = new TreeSet(comp);
SortedSet<NodeInfo> heardFrom = new TreeSet(comp);
SortedSet<NodeInfo> toTry = new TreeSet<NodeInfo>(comp);
SortedSet<NodeInfo> heardFrom = new TreeSet<NodeInfo>(comp);
toTry.addAll(nodes);
SortedSet<NodeInfo> tried = new TreeSet(comp);
SortedSet<NodeInfo> tried = new TreeSet<NodeInfo>(comp);
if (_log.shouldLog(Log.INFO))
_log.info("Starting getPeers for " + iHash + " (b64: " + new NID(ih) + ") " + " with " + nodes.size() + " to try");
@ -697,9 +693,9 @@ public class KRPC implements I2PSessionMuxedListener, DHT {
private ReplyWaiter sendPing(NodeInfo nInfo) {
if (_log.shouldLog(Log.INFO))
_log.info("Sending ping to: " + nInfo);
Map<String, Object> map = new HashMap();
Map<String, Object> map = new HashMap<String, Object>();
map.put("q", "ping");
Map<String, Object> args = new HashMap();
Map<String, Object> args = new HashMap<String, Object>();
map.put("a", args);
return sendQuery(nInfo, map, true);
}
@ -714,9 +710,9 @@ public class KRPC implements I2PSessionMuxedListener, DHT {
private ReplyWaiter sendFindNode(NodeInfo nInfo, NID tID) {
if (_log.shouldLog(Log.INFO))
_log.info("Sending find node of " + tID + " to: " + nInfo);
Map<String, Object> map = new HashMap();
Map<String, Object> map = new HashMap<String, Object>();
map.put("q", "find_node");
Map<String, Object> args = new HashMap();
Map<String, Object> args = new HashMap<String, Object>();
args.put("target", tID.getData());
map.put("a", args);
return sendQuery(nInfo, map, true);
@ -731,9 +727,9 @@ public class KRPC implements I2PSessionMuxedListener, DHT {
private ReplyWaiter sendGetPeers(NodeInfo nInfo, InfoHash ih) {
if (_log.shouldLog(Log.INFO))
_log.info("Sending get peers of " + ih + " to: " + nInfo);
Map<String, Object> map = new HashMap();
Map<String, Object> map = new HashMap<String, Object>();
map.put("q", "get_peers");
Map<String, Object> args = new HashMap();
Map<String, Object> args = new HashMap<String, Object>();
args.put("info_hash", ih.getData());
map.put("a", args);
ReplyWaiter rv = sendQuery(nInfo, map, true);
@ -752,9 +748,9 @@ public class KRPC implements I2PSessionMuxedListener, DHT {
private ReplyWaiter sendAnnouncePeer(NodeInfo nInfo, InfoHash ih, Token token) {
if (_log.shouldLog(Log.INFO))
_log.info("Sending announce of " + ih + " to: " + nInfo);
Map<String, Object> map = new HashMap();
Map<String, Object> map = new HashMap<String, Object>();
map.put("q", "announce_peer");
Map<String, Object> args = new HashMap();
Map<String, Object> args = new HashMap<String, Object>();
args.put("info_hash", ih.getData());
// port ignored
args.put("port", Integer.valueOf(TrackerClient.PORT));
@ -775,8 +771,8 @@ public class KRPC implements I2PSessionMuxedListener, DHT {
private boolean sendPong(NodeInfo nInfo, MsgID msgID) {
if (_log.shouldLog(Log.INFO))
_log.info("Sending pong to: " + nInfo);
Map<String, Object> map = new HashMap();
Map<String, Object> resps = new HashMap();
Map<String, Object> map = new HashMap<String, Object>();
Map<String, Object> resps = new HashMap<String, Object>();
map.put("r", resps);
return sendResponse(nInfo, msgID, map);
}
@ -794,8 +790,8 @@ public class KRPC implements I2PSessionMuxedListener, DHT {
private boolean sendNodes(NodeInfo nInfo, MsgID msgID, Token token, byte[] ids) {
if (_log.shouldLog(Log.INFO))
_log.info("Sending nodes to: " + nInfo);
Map<String, Object> map = new HashMap();
Map<String, Object> resps = new HashMap();
Map<String, Object> map = new HashMap<String, Object>();
Map<String, Object> resps = new HashMap<String, Object>();
map.put("r", resps);
if (token != null)
resps.put("token", token.getData());
@ -807,8 +803,8 @@ public class KRPC implements I2PSessionMuxedListener, DHT {
private boolean sendPeers(NodeInfo nInfo, MsgID msgID, Token token, List<byte[]> peers) {
if (_log.shouldLog(Log.INFO))
_log.info("Sending peers to: " + nInfo);
Map<String, Object> map = new HashMap();
Map<String, Object> resps = new HashMap();
Map<String, Object> map = new HashMap<String, Object>();
Map<String, Object> resps = new HashMap<String, Object>();
map.put("r", resps);
resps.put("token", token.getData());
resps.put("values", peers);
@ -824,8 +820,8 @@ public class KRPC implements I2PSessionMuxedListener, DHT {
private boolean sendError(NodeInfo nInfo, MsgID msgID, int err, String msg) {
if (_log.shouldLog(Log.INFO))
_log.info("Sending error " + msg + " to: " + nInfo);
Map<String, Object> map = new HashMap();
Map<String, Object> resps = new HashMap();
Map<String, Object> map = new HashMap<String, Object>();
Map<String, Object> resps = new HashMap<String, Object>();
map.put("r", resps);
return sendResponse(nInfo, msgID, map);
}
@ -1260,7 +1256,7 @@ public class KRPC implements I2PSessionMuxedListener, DHT {
}
sendNodes(nInfo, msgID, token, nodeArray);
} else {
List<byte[]> hashes = new ArrayList(peers.size());
List<byte[]> hashes = new ArrayList<byte[]>(peers.size());
for (Hash peer : peers) {
hashes.add(peer.getData());
}
@ -1346,7 +1342,7 @@ public class KRPC implements I2PSessionMuxedListener, DHT {
*/
private List<NodeInfo> receiveNodes(NodeInfo nInfo, byte[] ids) throws InvalidBEncodingException {
int max = Math.min(K, ids.length / NodeInfo.LENGTH);
List<NodeInfo> rv = new ArrayList(max);
List<NodeInfo> rv = new ArrayList<NodeInfo>(max);
for (int off = 0; off < ids.length && rv.size() < max; off += NodeInfo.LENGTH) {
NodeInfo nInf = new NodeInfo(ids, off);
if (_blacklist.contains(nInf.getNID())) {
@ -1370,7 +1366,7 @@ public class KRPC implements I2PSessionMuxedListener, DHT {
if (_log.shouldLog(Log.INFO))
_log.info("Rcvd peers from: " + nInfo);
int max = Math.min(MAX_WANT, peers.size());
List<Hash> rv = new ArrayList(max);
List<Hash> rv = new ArrayList<Hash>(max);
for (BEValue bev : peers) {
byte[] b = bev.getBytes();
//Hash h = new Hash(b);

View File

@ -7,7 +7,6 @@ import java.util.Date;
import net.i2p.I2PAppContext;
import net.i2p.data.ByteArray;
import net.i2p.data.DataHelper;
/**
* Used for Both outgoing and incoming tokens