forked from I2P_Developers/i2p.i2p
* Console: Use capability method for counting integrated peers
* Netdb: Speed up collection of all the floodfill peers by using PeerManager.getPeersByCapability() * PeerManager: Capability cleanups
This commit is contained in:
@ -183,7 +183,8 @@ class ProfileOrganizerRenderer {
|
||||
}
|
||||
buf.append("</table>");
|
||||
|
||||
buf.append("<h2><a name=\"flood\"></a>").append(_("Floodfill and Integrated Peers")).append("</h2>\n");
|
||||
buf.append("<h2><a name=\"flood\"></a>").append(_("Floodfill and Integrated Peers"))
|
||||
.append(" (").append(integratedPeers.size()).append(")</h2>\n");
|
||||
buf.append("<table>");
|
||||
buf.append("<tr>");
|
||||
buf.append("<th class=\"smallhead\">").append(_("Peer")).append("</th>");
|
||||
|
@ -221,8 +221,8 @@ public class SummaryHelper extends HelperBase {
|
||||
public int getWellIntegratedPeers() {
|
||||
if (_context == null)
|
||||
return 0;
|
||||
else
|
||||
return _context.profileOrganizer().countWellIntegratedPeers();
|
||||
//return _context.profileOrganizer().countWellIntegratedPeers();
|
||||
return _context.peerManager().getPeersByCapability(FloodfillNetworkDatabaseFacade.CAPABILITY_FLOODFILL).size();
|
||||
}
|
||||
/**
|
||||
* How many peers the router ranks as failing.
|
||||
|
@ -317,7 +317,7 @@ public class RouterInfo extends DatabaseEntry {
|
||||
|
||||
/**
|
||||
* what special capabilities this router offers
|
||||
*
|
||||
* @return non-null, empty string if none
|
||||
*/
|
||||
public String getCapabilities() {
|
||||
if (_options == null) return "";
|
||||
|
12
history.txt
12
history.txt
@ -1,3 +1,15 @@
|
||||
2011-07-21 zzz
|
||||
* Atalk: Remove this terrible example
|
||||
* Console: Use capability method for counting integrated peers
|
||||
* i2psnark: Don't let connection problem crash the DirMonitor (ticket #495)
|
||||
* Netdb: Speed up collection of all the floodfill peers by using
|
||||
PeerManager.getPeersByCapability()
|
||||
* PeerManager: Capability cleanups
|
||||
* Tunnel TestJob:
|
||||
- Don't continue testing after the pool is dead
|
||||
- Tweak test intervals
|
||||
* TunnelPool: Make more methods package private
|
||||
|
||||
2011-07-18 zzz
|
||||
* FileUtil: Add a rename method and a new copy method
|
||||
* I2PTunnel: Rename privkey file when deleting tunnel to prevent inadvertent reuse
|
||||
|
@ -10,6 +10,7 @@ package net.i2p.router;
|
||||
|
||||
import java.io.Writer;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
import net.i2p.data.Hash;
|
||||
|
||||
@ -25,7 +26,7 @@ class DummyPeerManagerFacade implements PeerManagerFacade {
|
||||
public void restart() {}
|
||||
public void renderStatusHTML(Writer out) { }
|
||||
public List<Hash> selectPeers(PeerSelectionCriteria criteria) { return null; }
|
||||
public List<Hash> getPeersByCapability(char capability) { return null; }
|
||||
public Set<Hash> getPeersByCapability(char capability) { return null; }
|
||||
public void setCapabilities(Hash peer, String caps) {}
|
||||
public void removeCapabilities(Hash peer) {}
|
||||
public Hash selectRandomByCapability(char capability) { return null; }
|
||||
|
@ -9,6 +9,7 @@ package net.i2p.router;
|
||||
*/
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
import net.i2p.data.Hash;
|
||||
|
||||
@ -26,7 +27,7 @@ public interface PeerManagerFacade extends Service {
|
||||
* @return List of Hash objects of the RouterIdentity for matching peers
|
||||
*/
|
||||
public List<Hash> selectPeers(PeerSelectionCriteria criteria);
|
||||
public List<Hash> getPeersByCapability(char capability);
|
||||
public Set<Hash> getPeersByCapability(char capability);
|
||||
public void setCapabilities(Hash peer, String caps);
|
||||
public void removeCapabilities(Hash peer);
|
||||
public Hash selectRandomByCapability(char capability);
|
||||
|
@ -551,7 +551,7 @@ public class Router implements RouterClock.ClockShiftListener {
|
||||
// if prop set to true, don't tell people we are ff even if we are
|
||||
if (FloodfillNetworkDatabaseFacade.floodfillEnabled(_context) &&
|
||||
!Boolean.valueOf(_context.getProperty("router.hideFloodfillParticipant")).booleanValue())
|
||||
ri.addCapability(FloodfillNetworkDatabaseFacade.CAPACITY_FLOODFILL);
|
||||
ri.addCapability(FloodfillNetworkDatabaseFacade.CAPABILITY_FLOODFILL);
|
||||
|
||||
if(Boolean.valueOf(_context.getProperty(PROP_HIDDEN)).booleanValue())
|
||||
ri.addCapability(RouterInfo.CAPABILITY_HIDDEN);
|
||||
|
@ -18,7 +18,7 @@ public class RouterVersion {
|
||||
/** deprecated */
|
||||
public final static String ID = "Monotone";
|
||||
public final static String VERSION = CoreVersion.VERSION;
|
||||
public final static long BUILD = 12;
|
||||
public final static long BUILD = 13;
|
||||
|
||||
/** for example "-test" */
|
||||
public final static String EXTRA = "";
|
||||
|
@ -32,7 +32,7 @@ import net.i2p.util.Log;
|
||||
*
|
||||
*/
|
||||
public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacade {
|
||||
public static final char CAPACITY_FLOODFILL = 'f';
|
||||
public static final char CAPABILITY_FLOODFILL = 'f';
|
||||
private final Map _activeFloodQueries;
|
||||
private boolean _floodfillEnabled;
|
||||
/** for testing, see isFloodfill() below */
|
||||
@ -223,10 +223,7 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
|
||||
} catch (DataFormatException dfe) {}
|
||||
}
|
||||
String caps = peer.getCapabilities();
|
||||
if ( (caps != null) && (caps.indexOf(FloodfillNetworkDatabaseFacade.CAPACITY_FLOODFILL) != -1) )
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
return caps.indexOf(FloodfillNetworkDatabaseFacade.CAPABILITY_FLOODFILL) >= 0;
|
||||
}
|
||||
|
||||
public List<RouterInfo> getKnownRouterData() {
|
||||
|
@ -107,12 +107,23 @@ class FloodfillPeerSelector extends PeerSelector {
|
||||
* List is not sorted and not shuffled.
|
||||
*/
|
||||
private List<Hash> selectFloodfillParticipants(Set<Hash> toIgnore, KBucketSet kbuckets) {
|
||||
/*****
|
||||
if (kbuckets == null) return Collections.EMPTY_LIST;
|
||||
// TODO this is very slow - use profile getPeersByCapability('f') instead
|
||||
_context.statManager().addRateData("netDb.newFSC", 0, 0);
|
||||
FloodfillSelectionCollector matches = new FloodfillSelectionCollector(null, toIgnore, 0);
|
||||
kbuckets.getAll(matches);
|
||||
return matches.getFloodfillParticipants();
|
||||
*****/
|
||||
Set<Hash> set = _context.peerManager().getPeersByCapability(FloodfillNetworkDatabaseFacade.CAPABILITY_FLOODFILL);
|
||||
List<Hash> rv = new ArrayList(set.size());
|
||||
for (Hash h : set) {
|
||||
if ((toIgnore != null && toIgnore.contains(h)) ||
|
||||
_context.shitlist().isShitlistedForever(h))
|
||||
continue;
|
||||
rv.add(h);
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -251,12 +262,12 @@ class FloodfillPeerSelector extends PeerSelector {
|
||||
}
|
||||
|
||||
private class FloodfillSelectionCollector implements SelectionCollector {
|
||||
private TreeSet<Hash> _sorted;
|
||||
private List<Hash> _floodfillMatches;
|
||||
private Hash _key;
|
||||
private Set<Hash> _toIgnore;
|
||||
private final TreeSet<Hash> _sorted;
|
||||
private final List<Hash> _floodfillMatches;
|
||||
private final Hash _key;
|
||||
private final Set<Hash> _toIgnore;
|
||||
private int _matches;
|
||||
private int _wanted;
|
||||
private final int _wanted;
|
||||
|
||||
/**
|
||||
* Warning - may return our router hash - add to toIgnore if necessary
|
||||
@ -267,7 +278,6 @@ class FloodfillPeerSelector extends PeerSelector {
|
||||
_sorted = new TreeSet(new XORComparator(key));
|
||||
_floodfillMatches = new ArrayList(8);
|
||||
_toIgnore = toIgnore;
|
||||
_matches = 0;
|
||||
_wanted = wanted;
|
||||
}
|
||||
|
||||
|
@ -11,8 +11,9 @@ package net.i2p.router.peermanager;
|
||||
import java.io.IOException;
|
||||
import java.io.Writer;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
@ -21,6 +22,7 @@ import java.util.concurrent.ConcurrentHashMap;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.RouterInfo;
|
||||
import net.i2p.router.PeerSelectionCriteria;
|
||||
import net.i2p.router.Router;
|
||||
import net.i2p.router.RouterContext;
|
||||
import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseFacade;
|
||||
import net.i2p.util.ConcurrentHashSet;
|
||||
@ -30,15 +32,7 @@ import net.i2p.util.SimpleTimer2;
|
||||
/**
|
||||
* Manage the current state of the statistics
|
||||
*
|
||||
* All the capabilities methods appear to be almost unused -
|
||||
* TunnelPeerSelector just looks for unreachables, and that's it?
|
||||
* If so, a lot of this can go away, including the array of 26 ArrayLists,
|
||||
* and a lot of synchronization on _capabilitiesByPeer.
|
||||
*
|
||||
* We don't trust any published capabilities except for 'K' and 'U'.
|
||||
* This should be cleaned up.
|
||||
*
|
||||
* setCapabilities() and removeCapabilities() can just add/remove the profile and that's it.
|
||||
* Also maintain Sets for each of the capabilities in TRACKED_CAPS.
|
||||
*
|
||||
*/
|
||||
class PeerManager {
|
||||
@ -46,12 +40,24 @@ class PeerManager {
|
||||
private final RouterContext _context;
|
||||
private final ProfileOrganizer _organizer;
|
||||
private final ProfilePersistenceHelper _persistenceHelper;
|
||||
private final Set<Hash> _peersByCapability[];
|
||||
private final Map<Character, Set<Hash>> _peersByCapability;
|
||||
/** value strings are lower case */
|
||||
private final Map<Hash, String> _capabilitiesByPeer;
|
||||
private static final long REORGANIZE_TIME = 45*1000;
|
||||
private static final long REORGANIZE_TIME_MEDIUM = 123*1000;
|
||||
private static final long REORGANIZE_TIME_LONG = 551*1000;
|
||||
|
||||
public static final String TRACKED_CAPS = "" +
|
||||
FloodfillNetworkDatabaseFacade.CAPABILITY_FLOODFILL +
|
||||
RouterInfo.CAPABILITY_HIDDEN +
|
||||
Router.CAPABILITY_BW12 +
|
||||
Router.CAPABILITY_BW32 +
|
||||
Router.CAPABILITY_BW64 +
|
||||
Router.CAPABILITY_BW128 +
|
||||
Router.CAPABILITY_BW256 +
|
||||
Router.CAPABILITY_REACHABLE +
|
||||
Router.CAPABILITY_UNREACHABLE;
|
||||
|
||||
/**
|
||||
* Profiles are now loaded in a separate thread,
|
||||
* so this should return quickly.
|
||||
@ -63,9 +69,9 @@ class PeerManager {
|
||||
_organizer = context.profileOrganizer();
|
||||
_organizer.setUs(context.routerHash());
|
||||
_capabilitiesByPeer = new ConcurrentHashMap(128);
|
||||
_peersByCapability = new Set[26];
|
||||
for (int i = 0; i < _peersByCapability.length; i++)
|
||||
_peersByCapability[i] = new ConcurrentHashSet();
|
||||
_peersByCapability = new HashMap(TRACKED_CAPS.length());
|
||||
for (int i = 0; i < TRACKED_CAPS.length(); i++)
|
||||
_peersByCapability.put(Character.valueOf(Character.toLowerCase(TRACKED_CAPS.charAt(i))), new ConcurrentHashSet());
|
||||
loadProfilesInBackground();
|
||||
////_context.jobQueue().addJob(new EvaluateProfilesJob(_context));
|
||||
//SimpleScheduler.getInstance().addPeriodicEvent(new Reorg(), 0, REORGANIZE_TIME);
|
||||
@ -96,9 +102,8 @@ class PeerManager {
|
||||
}
|
||||
|
||||
void storeProfiles() {
|
||||
Set peers = selectPeers();
|
||||
for (Iterator<Hash> iter = peers.iterator(); iter.hasNext(); ) {
|
||||
Hash peer = iter.next();
|
||||
Set<Hash> peers = selectPeers();
|
||||
for (Hash peer : peers) {
|
||||
storeProfile(peer);
|
||||
}
|
||||
}
|
||||
@ -107,11 +112,11 @@ class PeerManager {
|
||||
void clearProfiles() {
|
||||
_organizer.clearProfiles();
|
||||
_capabilitiesByPeer.clear();
|
||||
for (int i = 0; i < _peersByCapability.length; i++)
|
||||
_peersByCapability[i].clear();
|
||||
for (Set p : _peersByCapability.values())
|
||||
p.clear();
|
||||
}
|
||||
|
||||
Set selectPeers() {
|
||||
Set<Hash> selectPeers() {
|
||||
return _organizer.selectAllPeers();
|
||||
}
|
||||
|
||||
@ -152,15 +157,12 @@ class PeerManager {
|
||||
*/
|
||||
void loadProfiles() {
|
||||
Set<PeerProfile> profiles = _persistenceHelper.readProfiles();
|
||||
for (Iterator<PeerProfile> iter = profiles.iterator(); iter.hasNext();) {
|
||||
PeerProfile prof = iter.next();
|
||||
if (prof != null) {
|
||||
for (PeerProfile prof : profiles) {
|
||||
_organizer.addProfile(prof);
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Profile for " + prof.getPeer().toBase64() + " loaded");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Find some peers that meet the criteria and we have the netDb info for locally
|
||||
@ -207,37 +209,37 @@ class PeerManager {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("We ran out of peers when looking for reachable ones after finding "
|
||||
+ "0 with "
|
||||
+ _organizer.countWellIntegratedPeers() + "/"
|
||||
+ _organizer.countHighCapacityPeers() + "/"
|
||||
+ _organizer.countFastPeers() + " integrated/high capacity/fast peers");
|
||||
+ _organizer.countFastPeers() + " high capacity/fast peers");
|
||||
}
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Peers selected: " + peers);
|
||||
return new ArrayList(peers);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param caps non-null, case is ignored
|
||||
*/
|
||||
public void setCapabilities(Hash peer, String caps) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Setting capabilities for " + peer.toBase64() + " to " + caps);
|
||||
if (caps != null) caps = caps.toLowerCase();
|
||||
caps = caps.toLowerCase();
|
||||
|
||||
String oldCaps = null;
|
||||
if (caps != null)
|
||||
oldCaps = _capabilitiesByPeer.put(peer, caps);
|
||||
else
|
||||
oldCaps = _capabilitiesByPeer.remove(peer);
|
||||
String oldCaps = _capabilitiesByPeer.put(peer, caps);
|
||||
if (caps.equals(oldCaps))
|
||||
return;
|
||||
|
||||
if (oldCaps != null) {
|
||||
for (int i = 0; i < oldCaps.length(); i++) {
|
||||
char c = oldCaps.charAt(i);
|
||||
if ( (caps == null) || (caps.indexOf(c) < 0) ) {
|
||||
if (caps.indexOf(c) < 0) {
|
||||
Set<Hash> peers = locked_getPeers(c);
|
||||
if (peers != null)
|
||||
peers.remove(peer);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (caps != null) {
|
||||
|
||||
for (int i = 0; i < caps.length(); i++) {
|
||||
char c = caps.charAt(i);
|
||||
if ( (oldCaps != null) && (oldCaps.indexOf(c) >= 0) )
|
||||
@ -247,25 +249,18 @@ class PeerManager {
|
||||
peers.add(peer);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** locking no longer req'd */
|
||||
private Set<Hash> locked_getPeers(char c) {
|
||||
c = Character.toLowerCase(c);
|
||||
int i = c - 'a';
|
||||
if ( (i < 0) || (i >= _peersByCapability.length) ) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Invalid capability " + c + " (" + i + ")");
|
||||
return null;
|
||||
}
|
||||
return _peersByCapability[i];
|
||||
return _peersByCapability.get(Character.valueOf(c));
|
||||
}
|
||||
|
||||
public void removeCapabilities(Hash peer) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Removing capabilities from " + peer.toBase64());
|
||||
|
||||
String oldCaps = (String)_capabilitiesByPeer.remove(peer);
|
||||
String oldCaps = _capabilitiesByPeer.remove(peer);
|
||||
if (oldCaps != null) {
|
||||
for (int i = 0; i < oldCaps.length(); i++) {
|
||||
char c = oldCaps.charAt(i);
|
||||
@ -291,33 +286,13 @@ class PeerManager {
|
||||
********/
|
||||
|
||||
/**
|
||||
* The only user of this is TunnelPeerSelector for unreachables?
|
||||
* @param capability case-insensitive
|
||||
* @return non-null unmodifiable set
|
||||
*/
|
||||
public List<Hash> getPeersByCapability(char capability) {
|
||||
if (true) {
|
||||
public Set<Hash> getPeersByCapability(char capability) {
|
||||
Set<Hash> peers = locked_getPeers(capability);
|
||||
if (peers != null)
|
||||
return new ArrayList(peers);
|
||||
return null;
|
||||
} else {
|
||||
// Wow this looks really slow...
|
||||
// What is the point of keeping all the data structures above
|
||||
// if we are going to go through the whole netdb anyway?
|
||||
// Not sure why jrandom switched to do it this way,
|
||||
// the checkin comments aren't clear...
|
||||
// Since the locking is gone, switch back to the above.
|
||||
FloodfillNetworkDatabaseFacade f = (FloodfillNetworkDatabaseFacade)_context.netDb();
|
||||
List<RouterInfo> routerInfos = f.getKnownRouterData();
|
||||
List<Hash> rv = new ArrayList();
|
||||
for (Iterator<RouterInfo> iter = routerInfos.iterator(); iter.hasNext(); ) {
|
||||
RouterInfo ri = iter.next();
|
||||
String caps = ri.getCapabilities();
|
||||
if (caps.indexOf(capability) >= 0)
|
||||
rv.add(ri.getIdentity().calculateHash());
|
||||
}
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Peers with capacity " + capability + ": " + rv.size());
|
||||
return rv;
|
||||
}
|
||||
return Collections.unmodifiableSet(peers);
|
||||
return Collections.EMPTY_SET;
|
||||
}
|
||||
}
|
||||
|
@ -10,8 +10,10 @@ package net.i2p.router.peermanager;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Writer;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.router.PeerManagerFacade;
|
||||
@ -63,10 +65,14 @@ public class PeerManagerFacadeImpl implements PeerManagerFacade {
|
||||
return _manager.selectPeers(criteria);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param caps non-null
|
||||
*/
|
||||
public void setCapabilities(Hash peer, String caps) {
|
||||
if (_manager == null) return;
|
||||
_manager.setCapabilities(peer, caps);
|
||||
}
|
||||
|
||||
public void removeCapabilities(Hash peer) {
|
||||
if (_manager == null) return;
|
||||
_manager.removeCapabilities(peer);
|
||||
@ -79,8 +85,12 @@ public class PeerManagerFacadeImpl implements PeerManagerFacade {
|
||||
return null;
|
||||
}
|
||||
|
||||
public List<Hash> getPeersByCapability(char capability) {
|
||||
if (_manager == null) return new ArrayList(0);
|
||||
/**
|
||||
* @param capability case-insensitive
|
||||
* @return non-null unmodifiable set
|
||||
*/
|
||||
public Set<Hash> getPeersByCapability(char capability) {
|
||||
if (_manager == null) return Collections.EMPTY_SET;
|
||||
return _manager.getPeersByCapability(capability);
|
||||
}
|
||||
|
||||
|
@ -46,7 +46,7 @@ public class ProfileOrganizer {
|
||||
private final Map<Hash, PeerProfile> _fastPeers;
|
||||
/** H(routerIdentity) to PeerProfile for all peers that have high capacities */
|
||||
private final Map<Hash, PeerProfile> _highCapacityPeers;
|
||||
/** H(routerIdentity) to PeerProfile for all peers that well integrated into the network and not failing horribly */
|
||||
/** TO BE REMOVED H(routerIdentity) to PeerProfile for all peers that well integrated into the network and not failing horribly */
|
||||
private final Map<Hash, PeerProfile> _wellIntegratedPeers;
|
||||
/** H(routerIdentity) to PeerProfile for all peers that are not failing horribly */
|
||||
private final Map<Hash, PeerProfile> _notFailingPeers;
|
||||
@ -187,6 +187,7 @@ public class ProfileOrganizer {
|
||||
|
||||
public int countFastPeers() { return count(_fastPeers); }
|
||||
public int countHighCapacityPeers() { return count(_highCapacityPeers); }
|
||||
/** @deprecated use ProfileManager.getPeersByCapability('f').size() */
|
||||
public int countWellIntegratedPeers() { return count(_wellIntegratedPeers); }
|
||||
public int countNotFailingPeers() { return count(_notFailingPeers); }
|
||||
public int countFailingPeers() { return count(_failingPeers); }
|
||||
@ -408,6 +409,7 @@ public class ProfileOrganizer {
|
||||
/**
|
||||
* Return a set of Hashes for peers that are well integrated into the network.
|
||||
*
|
||||
* @deprecated unused
|
||||
*/
|
||||
public void selectWellIntegratedPeers(int howMany, Set<Hash> exclude, Set<Hash> matches) {
|
||||
selectWellIntegratedPeers(howMany, exclude, matches, 0);
|
||||
@ -418,6 +420,7 @@ public class ProfileOrganizer {
|
||||
*
|
||||
* @param mask 0-4 Number of bytes to match to determine if peers in the same IP range should
|
||||
* not be in the same tunnel. 0 = disable check; 1 = /8; 2 = /16; 3 = /24; 4 = exact IP match
|
||||
* @deprecated unused
|
||||
*/
|
||||
public void selectWellIntegratedPeers(int howMany, Set<Hash> exclude, Set<Hash> matches, int mask) {
|
||||
getReadLock();
|
||||
|
@ -2,6 +2,7 @@ package net.i2p.router.tunnel.pool;
|
||||
|
||||
import java.math.BigInteger;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.HashSet;
|
||||
@ -198,7 +199,7 @@ public abstract class TunnelPeerSelector {
|
||||
if (filterUnreachable(ctx, isInbound, isExploratory)) {
|
||||
// NOTE: filterUnreachable returns true for inbound, false for outbound
|
||||
// This is the only use for getPeersByCapability? And the whole set of datastructures in PeerManager?
|
||||
List<Hash> caps = ctx.peerManager().getPeersByCapability(Router.CAPABILITY_UNREACHABLE);
|
||||
Collection<Hash> caps = ctx.peerManager().getPeersByCapability(Router.CAPABILITY_UNREACHABLE);
|
||||
if (caps != null)
|
||||
peers.addAll(caps);
|
||||
caps = ctx.profileOrganizer().selectPeersLocallyUnreachable();
|
||||
@ -352,7 +353,7 @@ public abstract class TunnelPeerSelector {
|
||||
}
|
||||
}
|
||||
int maxLen = 0;
|
||||
if (cap.indexOf(FloodfillNetworkDatabaseFacade.CAPACITY_FLOODFILL) >= 0)
|
||||
if (cap.indexOf(FloodfillNetworkDatabaseFacade.CAPABILITY_FLOODFILL) >= 0)
|
||||
maxLen++;
|
||||
if (cap.indexOf(Router.CAPABILITY_REACHABLE) >= 0)
|
||||
maxLen++;
|
||||
|
Reference in New Issue
Block a user