merge of '1b1c377c45e8746e4e04afc47cde55a03b859f32'

and '6869519b0cd38f4bc366d0277c5dc4e924348e66'
This commit is contained in:
z3d
2009-08-28 03:16:12 +00:00
48 changed files with 1014 additions and 144 deletions

View File

@ -1,6 +1,7 @@
package net.i2p.router;
import net.i2p.util.DecayingBloomFilter;
import net.i2p.util.DecayingHashSet;
import net.i2p.util.Log;
/**
@ -95,7 +96,7 @@ public class MessageValidator {
}
public void startup() {
_filter = new DecayingBloomFilter(_context, (int)Router.CLOCK_FUDGE_FACTOR * 2, 8);
_filter = new DecayingHashSet(_context, (int)Router.CLOCK_FUDGE_FACTOR * 2, 8, "RouterMV");
}
void shutdown() {

View File

@ -330,6 +330,7 @@ public class Router {
_context.blocklist().startup();
// let the timestamper get us sync'ed
// this will block for quite a while on a disconnected machine
long before = System.currentTimeMillis();
_context.clock().getTimestamper().waitForInitialization();
long waited = System.currentTimeMillis() - before;

View File

@ -75,19 +75,28 @@ public class RouterContext extends I2PAppContext {
//initAll();
_contexts.add(this);
}
/**
* Set properties where the defaults must be different from those
* in I2PAppContext.
*
* Unless we are explicitly disabling the timestamper, we want to use it.
* We need this now as the new timestamper default is disabled (so we don't
* have each I2PAppContext creating their own SNTP queries all the time)
*
* Set more PRNG buffers, as the default is now small for the I2PAppContext.
*
*/
static final Properties filterProps(Properties envProps) {
private static final Properties filterProps(Properties envProps) {
if (envProps == null)
envProps = new Properties();
if (envProps.getProperty("time.disabled") == null)
envProps.setProperty("time.disabled", "false");
if (envProps.getProperty("prng.buffers") == null)
envProps.setProperty("prng.buffers", "16");
return envProps;
}
public void initAll() {
if ("false".equals(getProperty("i2p.dummyClientFacade", "false")))
_clientManagerFacade = new ClientManagerFacadeImpl(this);

View File

@ -18,7 +18,7 @@ public class RouterVersion {
/** deprecated */
public final static String ID = "Monotone";
public final static String VERSION = CoreVersion.VERSION;
public final static long BUILD = 17;
public final static long BUILD = 18;
/** for example "-test" */
public final static String EXTRA = "";
public final static String FULL_VERSION = VERSION + "-" + BUILD + EXTRA;

View File

@ -56,7 +56,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
private DataStore _ds; // hash to DataStructure mapping, persisted when necessary
/** where the data store is pushing the data */
private String _dbDir;
private final Set _exploreKeys = new HashSet(64); // set of Hash objects that we should search on (to fill up a bucket, not to get data)
private final Set<Hash> _exploreKeys = new HashSet(64); // set of Hash objects that we should search on (to fill up a bucket, not to get data)
private boolean _initialized;
/** Clock independent time of when we started up */
private long _started;
@ -72,7 +72,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
* removed when the job decides to stop running.
*
*/
private final Map _publishingLeaseSets;
private final Map<Hash, RepublishLeaseSetJob> _publishingLeaseSets;
/**
* Hash of the key currently being searched for, pointing the SearchJob that
@ -80,7 +80,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
* added on to the list of jobs fired on success/failure
*
*/
private final Map _activeRequests;
private final Map<Hash, SearchJob> _activeRequests;
/**
* The search for the given key is no longer active
@ -160,7 +160,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
_exploreJob.updateExploreSchedule();
}
public Set getExploreKeys() {
public Set<Hash> getExploreKeys() {
if (!_initialized) return null;
synchronized (_exploreKeys) {
return new HashSet(_exploreKeys);
@ -302,12 +302,12 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
/**
* Get the routers closest to that key in response to a remote lookup
*/
public Set findNearestRouters(Hash key, int maxNumRouters, Set peersToIgnore) {
public Set<RouterInfo> findNearestRouters(Hash key, int maxNumRouters, Set peersToIgnore) {
if (!_initialized) return null;
return getRouters(_peerSelector.selectNearest(key, maxNumRouters, peersToIgnore, _kb));
}
private Set getRouters(Collection hashes) {
private Set<RouterInfo> getRouters(Collection hashes) {
if (!_initialized) return null;
Set rv = new HashSet(hashes.size());
for (Iterator iter = hashes.iterator(); iter.hasNext(); ) {
@ -481,8 +481,6 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
private static final long PUBLISH_DELAY = 3*1000;
public void publish(LeaseSet localLeaseSet) {
if (!_initialized) return;
if (_context.router().gracefulShutdownInProgress())
return;
Hash h = localLeaseSet.getDestination().calculateHash();
try {
store(h, localLeaseSet);
@ -492,6 +490,13 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
}
if (!_context.clientManager().shouldPublishLeaseSet(h))
return;
// If we're exiting, don't publish.
// If we're restarting, keep publishing to minimize the downtime.
if (_context.router().gracefulShutdownInProgress()) {
int code = _context.router().scheduledGracefulExitCode();
if (code == Router.EXIT_GRACEFUL || code == Router.EXIT_HARD)
return;
}
RepublishLeaseSetJob j = null;
synchronized (_publishingLeaseSets) {
@ -855,7 +860,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
}
return leases;
}
private Set getRouters() {
private Set<RouterInfo> getRouters() {
if (!_initialized) return null;
Set routers = new HashSet();
Set keys = getDataStore().getKeys();

View File

@ -5,6 +5,7 @@ import java.util.Map;
import net.i2p.data.Hash;
import net.i2p.router.RouterContext;
import net.i2p.util.DecayingBloomFilter;
import net.i2p.util.DecayingHashSet;
import net.i2p.util.Log;
/**
@ -52,7 +53,7 @@ public class InboundMessageFragments /*implements UDPTransport.PartialACKSource
// may want to extend the DecayingBloomFilter so we can use a smaller
// array size (currently its tuned for 10 minute rates for the
// messageValidator)
_recentlyCompletedMessages = new DecayingBloomFilter(_context, DECAY_PERIOD, 4);
_recentlyCompletedMessages = new DecayingHashSet(_context, DECAY_PERIOD, 4, "UDPIMF");
_ackSender.startup();
_messageReceiver.startup();
}

View File

@ -1,10 +1,11 @@
package net.i2p.router.tunnel;
import net.i2p.I2PAppContext;
import net.i2p.data.ByteArray;
import net.i2p.data.DataHelper;
import net.i2p.router.RouterContext;
import net.i2p.util.ByteCache;
import net.i2p.util.DecayingBloomFilter;
import net.i2p.util.DecayingHashSet;
/**
* Manage the IV validation for all of the router's tunnels by way of a big
@ -12,7 +13,7 @@ import net.i2p.util.DecayingBloomFilter;
*
*/
public class BloomFilterIVValidator implements IVValidator {
private I2PAppContext _context;
private RouterContext _context;
private DecayingBloomFilter _filter;
private ByteCache _ivXorCache = ByteCache.getInstance(32, HopProcessor.IV_LENGTH);
@ -23,9 +24,17 @@ public class BloomFilterIVValidator implements IVValidator {
*
*/
private static final int HALFLIFE_MS = 10*60*1000;
public BloomFilterIVValidator(I2PAppContext ctx, int KBps) {
private static final int MIN_SHARE_KBPS_TO_USE_BLOOM = 64;
public BloomFilterIVValidator(RouterContext ctx, int KBps) {
_context = ctx;
_filter = new DecayingBloomFilter(ctx, HALFLIFE_MS, 16);
// Select the filter based on share bandwidth.
// Note that at rates approaching 1MB, we need to do something else,
// as the Bloom filter false positive rates approach 0.1%. FIXME
if (getShareBandwidth(ctx) < MIN_SHARE_KBPS_TO_USE_BLOOM)
_filter = new DecayingHashSet(ctx, HALFLIFE_MS, 16, "TunnelIVV"); // appx. 4MB max
else
_filter = new DecayingBloomFilter(ctx, HALFLIFE_MS, 16, "TunnelIVV"); // 2MB fixed
ctx.statManager().createRateStat("tunnel.duplicateIV", "Note that a duplicate IV was received", "Tunnels",
new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
}
@ -39,4 +48,11 @@ public class BloomFilterIVValidator implements IVValidator {
return !dup; // return true if it is OK, false if it isn't
}
public void destroy() { _filter.stopDecaying(); }
private static int getShareBandwidth(RouterContext ctx) {
int irateKBps = ctx.bandwidthLimiter().getInboundKBytesPerSecond();
int orateKBps = ctx.bandwidthLimiter().getOutboundKBytesPerSecond();
double pct = ctx.router().getSharePercentage();
return (int) (pct * Math.min(irateKBps, orateKBps));
}
}

View File

@ -10,6 +10,7 @@ import net.i2p.data.SessionKey;
import net.i2p.data.i2np.BuildRequestRecord;
import net.i2p.data.i2np.TunnelBuildMessage;
import net.i2p.util.DecayingBloomFilter;
import net.i2p.util.DecayingHashSet;
import net.i2p.util.Log;
/**
@ -22,7 +23,7 @@ public class BuildMessageProcessor {
private DecayingBloomFilter _filter;
public BuildMessageProcessor(I2PAppContext ctx) {
_filter = new DecayingBloomFilter(ctx, 60*1000, 32);
_filter = new DecayingHashSet(ctx, 60*1000, 32, "TunnelBMP");
ctx.statManager().createRateStat("tunnel.buildRequestDup", "How frequently we get dup build request messages", "Tunnels", new long[] { 60*1000, 10*60*1000 });
}
/**

View File

@ -1,18 +1,19 @@
package net.i2p.router.tunnel;
import java.util.HashSet;
import java.util.Set;
import net.i2p.data.ByteArray;
import net.i2p.data.DataHelper;
import net.i2p.util.ConcurrentHashSet;
/**
* waste lots of RAM
*/
class HashSetIVValidator implements IVValidator {
private final HashSet _received;
private final Set<ByteArray> _received;
public HashSetIVValidator() {
_received = new HashSet();
_received = new ConcurrentHashSet();
}
public boolean receiveIV(byte ivData[], int ivOffset, byte payload[], int payloadOffset) {
@ -21,10 +22,7 @@ class HashSetIVValidator implements IVValidator {
byte iv[] = new byte[HopProcessor.IV_LENGTH];
DataHelper.xor(ivData, ivOffset, payload, payloadOffset, iv, 0, HopProcessor.IV_LENGTH);
ByteArray ba = new ByteArray(iv);
boolean isNew = false;
synchronized (_received) {
isNew = _received.add(ba);
}
boolean isNew = _received.add(ba);
return isNew;
}
}
}

View File

@ -35,7 +35,7 @@ public class InboundGatewayReceiver implements TunnelGateway.Receiver {
}
}
if (_context.tunnelDispatcher().shouldDropParticipatingMessage())
if (_context.tunnelDispatcher().shouldDropParticipatingMessage("IBGW", encrypted.length))
return -1;
_config.incrementSentMessages();
TunnelDataMessage msg = new TunnelDataMessage(_context);

View File

@ -43,7 +43,7 @@ public class OutboundTunnelEndpoint {
+ (toTunnel != null ? toTunnel.getTunnelId() + "" : ""));
// don't drop it if we are the target
if ((!_context.routerHash().equals(toRouter)) &&
_context.tunnelDispatcher().shouldDropParticipatingMessage())
_context.tunnelDispatcher().shouldDropParticipatingMessage("OBEP " + msg.getType(), msg.getMessageSize()))
return;
_config.incrementSentMessages();
_outDistributor.distribute(msg, toRouter, toTunnel);

View File

@ -540,8 +540,23 @@ public class TunnelDispatcher implements Service {
* This is similar to the code in ../RouterThrottleImpl.java
* We drop in proportion to how far over the limit we are.
* Perhaps an exponential function would be better?
*
* The drop probability is adjusted for the size of the message.
* At this stage, participants and IBGWs see a standard 1024 byte message.
* OBEPs however may see a wide variety of sizes.
*
* Network-wise, it's most efficient to drop OBEP messages, because they
* are unfragmented and we know their size. Therefore we drop the big ones
* and we drop a single wrapped I2CP message, not a fragment of one or more messages.
* Also, the OBEP is the earliest identifiable hop in the message's path
* (a plain participant could be earlier or later, but on average is later)
*
* @param type message hop location and type
* @param length the length of the message
*/
public boolean shouldDropParticipatingMessage() {
public boolean shouldDropParticipatingMessage(String type, int length) {
if (length <= 0)
return false;
RateStat rs = _context.statManager().getRate("tunnel.participatingBandwidth");
if (rs == null)
return false;
@ -574,13 +589,26 @@ public class TunnelDispatcher implements Service {
float pctDrop = (used - maxBps) / used;
if (pctDrop <= 0)
return false;
// increase the drop probability for OBEP,
// and lower it for IBGW, for network efficiency
double len = length;
if (type.startsWith("OBEP"))
len *= 1.5;
else if (type.startsWith("IBGW"))
len /= 1.5;
// drop in proportion to size w.r.t. a standard 1024-byte message
// this is a little expensive but we want to adjust the curve between 0 and 1
// Most messages are 1024, only at the OBEP do we see other sizes
if (len != 1024d)
pctDrop = (float) Math.pow(pctDrop, 1024d / len);
float rand = _context.random().nextFloat();
boolean reject = rand <= pctDrop;
if (reject) {
if (_log.shouldLog(Log.WARN)) {
int availBps = (int) (((maxKBps*1024)*share) - used);
_log.warn("Drop part. msg. avail/max/used " + availBps + "/" + (int) maxBps + "/"
+ used + " %Drop = " + pctDrop);
+ used + " %Drop = " + pctDrop
+ ' ' + type + ' ' + length);
}
_context.statManager().addRateData("tunnel.participatingMessageDropped", 1, 0);
}

View File

@ -150,7 +150,7 @@ public class TunnelParticipant {
}
private void send(HopConfig config, TunnelDataMessage msg, RouterInfo ri) {
if (_context.tunnelDispatcher().shouldDropParticipatingMessage())
if (_context.tunnelDispatcher().shouldDropParticipatingMessage("TDM", 1024))
return;
_config.incrementSentMessages();
long oldId = msg.getUniqueId();

View File

@ -31,6 +31,7 @@ class TestJob extends JobImpl {
/** base to randomize the test delay on */
private static final int TEST_DELAY = 30*1000;
private static final long[] RATES = { 60*1000, 10*60*1000l, 60*60*1000l };
public TestJob(RouterContext ctx, PooledTunnelCreatorConfig cfg, TunnelPool pool) {
super(ctx);
@ -43,19 +44,19 @@ class TestJob extends JobImpl {
_log.error("Invalid tunnel test configuration: no pool for " + cfg, new Exception("origin"));
getTiming().setStartAfter(getDelay() + ctx.clock().now());
ctx.statManager().createRateStat("tunnel.testFailedTime", "How long did the failure take (max of 60s for full timeout)?", "Tunnels",
new long[] { 60*1000, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
RATES);
ctx.statManager().createRateStat("tunnel.testExploratoryFailedTime", "How long did the failure of an exploratory tunnel take (max of 60s for full timeout)?", "Tunnels",
new long[] { 60*1000, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
RATES);
ctx.statManager().createRateStat("tunnel.testFailedCompletelyTime", "How long did the complete failure take (max of 60s for full timeout)?", "Tunnels",
new long[] { 60*1000, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
RATES);
ctx.statManager().createRateStat("tunnel.testExploratoryFailedCompletelyTime", "How long did the complete failure of an exploratory tunnel take (max of 60s for full timeout)?", "Tunnels",
new long[] { 60*1000, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
RATES);
ctx.statManager().createRateStat("tunnel.testSuccessLength", "How long were the tunnels that passed the test?", "Tunnels",
new long[] { 60*1000, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
RATES);
ctx.statManager().createRateStat("tunnel.testSuccessTime", "How long did tunnel testing take?", "Tunnels",
new long[] { 60*1000, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
RATES);
ctx.statManager().createRateStat("tunnel.testAborted", "Tunnel test could not occur, since there weren't any tunnels to test with", "Tunnels",
new long[] { 60*1000, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
RATES);
}
public String getName() { return "Test tunnel"; }
public void runJob() {
@ -69,6 +70,8 @@ class TestJob extends JobImpl {
scheduleRetest();
return;
}
if (getContext().router().gracefulShutdownInProgress())
return; // don't reschedule
_found = false;
// note: testing with exploratory tunnels always, even if the tested tunnel
// is a client tunnel (per _cfg.getDestination())

View File

@ -37,9 +37,9 @@ public class TunnelPoolManager implements TunnelManagerFacade {
private RouterContext _context;
private Log _log;
/** Hash (destination) to TunnelPool */
private final Map _clientInboundPools;
private final Map<Hash, TunnelPool> _clientInboundPools;
/** Hash (destination) to TunnelPool */
private final Map _clientOutboundPools;
private final Map<Hash, TunnelPool> _clientOutboundPools;
private TunnelPool _inboundExploratory;
private TunnelPool _outboundExploratory;
private BuildExecutor _executor;
@ -90,7 +90,7 @@ public class TunnelPoolManager implements TunnelManagerFacade {
if (destination == null) return selectInboundTunnel();
TunnelPool pool = null;
synchronized (_clientInboundPools) {
pool = (TunnelPool)_clientInboundPools.get(destination);
pool = _clientInboundPools.get(destination);
}
if (pool != null) {
return pool.selectTunnel();
@ -119,7 +119,7 @@ public class TunnelPoolManager implements TunnelManagerFacade {
if (destination == null) return selectOutboundTunnel();
TunnelPool pool = null;
synchronized (_clientOutboundPools) {
pool = (TunnelPool)_clientOutboundPools.get(destination);
pool = _clientOutboundPools.get(destination);
}
if (pool != null) {
return pool.selectTunnel();
@ -130,8 +130,8 @@ public class TunnelPoolManager implements TunnelManagerFacade {
public TunnelInfo getTunnelInfo(TunnelId id) {
TunnelInfo info = null;
synchronized (_clientInboundPools) {
for (Iterator iter = _clientInboundPools.values().iterator(); iter.hasNext(); ) {
TunnelPool pool = (TunnelPool)iter.next();
for (Iterator<TunnelPool> iter = _clientInboundPools.values().iterator(); iter.hasNext(); ) {
TunnelPool pool = iter.next();
info = pool.getTunnel(id);
if (info != null)
return info;
@ -166,7 +166,7 @@ public class TunnelPoolManager implements TunnelManagerFacade {
Hash client = (Hash)destinations.get(i);
TunnelPool pool = null;
synchronized (_clientInboundPools) {
pool = (TunnelPool)_clientInboundPools.get(client);
pool = _clientInboundPools.get(client);
}
count += pool.listTunnels().size();
}
@ -182,7 +182,7 @@ public class TunnelPoolManager implements TunnelManagerFacade {
Hash client = (Hash)destinations.get(i);
TunnelPool pool = null;
synchronized (_clientOutboundPools) {
pool = (TunnelPool)_clientOutboundPools.get(client);
pool = _clientOutboundPools.get(client);
}
count += pool.listTunnels().size();
}
@ -196,9 +196,9 @@ public class TunnelPoolManager implements TunnelManagerFacade {
return false;
TunnelPool pool;
if (tunnel.isInbound())
pool = (TunnelPool)_clientInboundPools.get(client);
pool = _clientInboundPools.get(client);
else
pool = (TunnelPool)_clientOutboundPools.get(client);
pool = _clientOutboundPools.get(client);
if (pool == null)
return false;
return pool.listTunnels().contains(tunnel);
@ -211,7 +211,7 @@ public class TunnelPoolManager implements TunnelManagerFacade {
public TunnelPoolSettings getInboundSettings(Hash client) {
TunnelPool pool = null;
synchronized (_clientInboundPools) {
pool = (TunnelPool)_clientInboundPools.get(client);
pool = _clientInboundPools.get(client);
}
if (pool != null)
return pool.getSettings();
@ -221,7 +221,7 @@ public class TunnelPoolManager implements TunnelManagerFacade {
public TunnelPoolSettings getOutboundSettings(Hash client) {
TunnelPool pool = null;
synchronized (_clientOutboundPools) {
pool = (TunnelPool)_clientOutboundPools.get(client);
pool = _clientOutboundPools.get(client);
}
if (pool != null)
return pool.getSettings();
@ -234,10 +234,10 @@ public class TunnelPoolManager implements TunnelManagerFacade {
public void setOutboundSettings(Hash client, TunnelPoolSettings settings) {
setSettings(_clientOutboundPools, client, settings);
}
private void setSettings(Map pools, Hash client, TunnelPoolSettings settings) {
private void setSettings(Map<Hash, TunnelPool> pools, Hash client, TunnelPoolSettings settings) {
TunnelPool pool = null;
synchronized (pools) {
pool = (TunnelPool)pools.get(client);
pool = pools.get(client);
}
if (pool != null) {
settings.setDestination(client); // prevent spoofing or unset dest
@ -260,7 +260,7 @@ public class TunnelPoolManager implements TunnelManagerFacade {
TunnelPool outbound = null;
// should we share the clientPeerSelector across both inbound and outbound?
synchronized (_clientInboundPools) {
inbound = (TunnelPool)_clientInboundPools.get(dest);
inbound = _clientInboundPools.get(dest);
if (inbound == null) {
inbound = new TunnelPool(_context, this, settings.getInboundSettings(),
new ClientPeerSelector());
@ -270,7 +270,7 @@ public class TunnelPoolManager implements TunnelManagerFacade {
}
}
synchronized (_clientOutboundPools) {
outbound = (TunnelPool)_clientOutboundPools.get(dest);
outbound = _clientOutboundPools.get(dest);
if (outbound == null) {
outbound = new TunnelPool(_context, this, settings.getOutboundSettings(),
new ClientPeerSelector());
@ -294,10 +294,10 @@ public class TunnelPoolManager implements TunnelManagerFacade {
TunnelPool inbound = null;
TunnelPool outbound = null;
synchronized (_clientInboundPools) {
inbound = (TunnelPool)_clientInboundPools.remove(destination);
inbound = _clientInboundPools.remove(destination);
}
synchronized (_clientOutboundPools) {
outbound = (TunnelPool)_clientOutboundPools.remove(destination);
outbound = _clientOutboundPools.remove(destination);
}
if (inbound != null)
inbound.shutdown();
@ -305,20 +305,24 @@ public class TunnelPoolManager implements TunnelManagerFacade {
outbound.shutdown();
}
/** queue a recurring test job if appropriate */
void buildComplete(PooledTunnelCreatorConfig cfg) {
buildComplete();
if (cfg.getLength() > 1) {
//buildComplete();
if (cfg.getLength() > 1 &&
!_context.router().gracefulShutdownInProgress()) {
TunnelPool pool = cfg.getTunnelPool();
if (pool == null) {
// never seen this before, do we reallly need to bother
// trying so hard to find his pool?
_log.error("How does this not have a pool? " + cfg, new Exception("baf"));
if (cfg.getDestination() != null) {
if (cfg.isInbound()) {
synchronized (_clientInboundPools) {
pool = (TunnelPool)_clientInboundPools.get(cfg.getDestination());
pool = _clientInboundPools.get(cfg.getDestination());
}
} else {
synchronized (_clientOutboundPools) {
pool = (TunnelPool)_clientOutboundPools.get(cfg.getDestination());
pool = _clientOutboundPools.get(cfg.getDestination());
}
}
} else {
@ -333,6 +337,8 @@ public class TunnelPoolManager implements TunnelManagerFacade {
_context.jobQueue().addJob(new TestJob(_context, cfg, pool));
}
}
/** ?? */
void buildComplete() {}
public void startup() {
@ -384,7 +390,7 @@ public class TunnelPoolManager implements TunnelManagerFacade {
}
/** list of TunnelPool instances currently in play */
void listPools(List out) {
void listPools(List<TunnelPool> out) {
synchronized (_clientInboundPools) {
out.addAll(_clientInboundPools.values());
}
@ -407,19 +413,19 @@ public class TunnelPoolManager implements TunnelManagerFacade {
out.write("<div class=\"wideload\"><h2><a name=\"exploratory\" ></a>Exploratory tunnels (<a href=\"/configtunnels.jsp#exploratory\">config</a>):</h2>\n");
renderPool(out, _inboundExploratory, _outboundExploratory);
List destinations = null;
List<Hash> destinations = null;
synchronized (_clientInboundPools) {
destinations = new ArrayList(_clientInboundPools.keySet());
}
for (int i = 0; i < destinations.size(); i++) {
Hash client = (Hash)destinations.get(i);
Hash client = destinations.get(i);
TunnelPool in = null;
TunnelPool outPool = null;
synchronized (_clientInboundPools) {
in = (TunnelPool)_clientInboundPools.get(client);
in = _clientInboundPools.get(client);
}
synchronized (_clientOutboundPools) {
outPool = (TunnelPool)_clientOutboundPools.get(client);
outPool = _clientOutboundPools.get(client);
}
String name = (in != null ? in.getSettings().getDestinationNickname() : null);
if ( (name == null) && (outPool != null) )
@ -505,7 +511,7 @@ public class TunnelPoolManager implements TunnelManagerFacade {
}
private void renderPool(Writer out, TunnelPool in, TunnelPool outPool) throws IOException {
List tunnels = null;
List<TunnelInfo> tunnels = null;
if (in == null)
tunnels = new ArrayList();
else
@ -519,7 +525,7 @@ public class TunnelPoolManager implements TunnelManagerFacade {
int live = 0;
int maxLength = 1;
for (int i = 0; i < tunnels.size(); i++) {
TunnelInfo info = (TunnelInfo)tunnels.get(i);
TunnelInfo info = tunnels.get(i);
if (info.getLength() > maxLength)
maxLength = info.getLength();
}
@ -536,7 +542,7 @@ public class TunnelPoolManager implements TunnelManagerFacade {
}
out.write("</tr>\n");
for (int i = 0; i < tunnels.size(); i++) {
TunnelInfo info = (TunnelInfo)tunnels.get(i);
TunnelInfo info = tunnels.get(i);
long timeLeft = info.getExpiration()-_context.clock().now();
if (timeLeft <= 0)
continue; // don't display tunnels in their grace period