Make TunnelPool final in PooledTunnelCreatorConfig

Don't pass around both pool and cfg in args
Remove unused methods
Cleanup multiple now() and getSettings() calls
This commit is contained in:
zzz
2019-06-07 15:19:06 +00:00
parent e50bf00fa8
commit 1a200a16cc
8 changed files with 66 additions and 118 deletions

View File

@ -16,8 +16,9 @@ import net.i2p.router.TunnelInfo;
* Coordinate the info that the tunnel creator keeps track of, including what
* peers are in the tunnel and what their configuration is
*
* See PooledTunnelCreatorConfig for the non-abstract class
*/
public class TunnelCreatorConfig implements TunnelInfo {
public abstract class TunnelCreatorConfig implements TunnelInfo {
protected final RouterContext _context;
/** only necessary for client tunnels */
private final Hash _destination;

View File

@ -415,7 +415,7 @@ class BuildExecutor implements Runnable {
_context.statManager().addRateData("tunnel.buildConfigTime", pTime, 0);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Configuring new tunnel " + i + " for " + pool + ": " + cfg);
buildTunnel(pool, cfg);
buildTunnel(cfg);
//realBuilt++;
} else {
i--;
@ -507,7 +507,7 @@ class BuildExecutor implements Runnable {
if (cfg != null) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Configuring short tunnel " + i + " for " + pool + ": " + cfg);
buildTunnel(pool, cfg);
buildTunnel(cfg);
if (cfg.getLength() > 1) {
allowed--; // oops... shouldn't have done that, but hey, its not that bad...
}
@ -524,7 +524,7 @@ class BuildExecutor implements Runnable {
public boolean isRunning() { return _isRunning; }
void buildTunnel(TunnelPool pool, PooledTunnelCreatorConfig cfg) {
void buildTunnel(PooledTunnelCreatorConfig cfg) {
long beforeBuild = System.currentTimeMillis();
if (cfg.getLength() > 1) {
do {
@ -532,7 +532,7 @@ class BuildExecutor implements Runnable {
cfg.setReplyMessageId(_context.random().nextLong(I2NPMessage.MAX_ID_VALUE));
} while (addToBuilding(cfg)); // if a dup, go araound again
}
boolean ok = BuildRequestor.request(_context, pool, cfg, this);
boolean ok = BuildRequestor.request(_context, cfg, this);
if (!ok)
return;
if (cfg.getLength() > 1) {
@ -556,10 +556,10 @@ class BuildExecutor implements Runnable {
* This wakes up the executor, so call this after TunnelPool.addTunnel()
* so we don't build too many.
*/
public void buildComplete(PooledTunnelCreatorConfig cfg, TunnelPool pool) {
public void buildComplete(PooledTunnelCreatorConfig cfg) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Build complete for " + cfg, new Exception());
pool.buildComplete(cfg);
cfg.getTunnelPool().buildComplete(cfg);
if (cfg.getLength() > 1)
removeFromBuilding(cfg.getReplyMessageId());
// Only wake up the build thread if it took a reasonable amount of time -

View File

@ -322,7 +322,7 @@ class BuildHandler implements Runnable {
if (record < 0) {
_log.error("Bad status index " + i);
// don't leak
_exec.buildComplete(cfg, cfg.getTunnelPool());
_exec.buildComplete(cfg);
return;
}
@ -379,14 +379,14 @@ class BuildHandler implements Runnable {
// This will happen very rarely. We check for dups when
// creating the config, but we don't track IDs for builds in progress.
_context.statManager().addRateData("tunnel.ownDupID", 1);
_exec.buildComplete(cfg, cfg.getTunnelPool());
_exec.buildComplete(cfg);
if (_log.shouldLog(Log.WARN))
_log.warn("Dup ID for our own tunnel " + cfg);
return;
}
cfg.getTunnelPool().addTunnel(cfg); // self.self.self.foo!
// call buildComplete() after addTunnel() so we don't try another build.
_exec.buildComplete(cfg, cfg.getTunnelPool());
_exec.buildComplete(cfg);
_exec.buildSuccessful(cfg);
if (cfg.getTunnelPool().getSettings().isExploratory()) {
@ -421,8 +421,7 @@ class BuildHandler implements Runnable {
}
}
ExpireJob expireJob = new ExpireJob(_context, cfg, cfg.getTunnelPool());
cfg.setExpireJob(expireJob);
ExpireJob expireJob = new ExpireJob(_context, cfg);
_context.jobQueue().addJob(expireJob);
if (cfg.getDestination() == null)
_context.statManager().addRateData("tunnel.buildExploratorySuccess", rtt);
@ -430,7 +429,7 @@ class BuildHandler implements Runnable {
_context.statManager().addRateData("tunnel.buildClientSuccess", rtt);
} else {
// someone is no fun
_exec.buildComplete(cfg, cfg.getTunnelPool());
_exec.buildComplete(cfg);
if (cfg.getDestination() == null)
_context.statManager().addRateData("tunnel.buildExploratoryReject", rtt);
else
@ -441,7 +440,7 @@ class BuildHandler implements Runnable {
_log.warn(msg.getUniqueId() + ": Tunnel reply could not be decrypted for tunnel " + cfg);
_context.statManager().addRateData("tunnel.corruptBuildReply", 1);
// don't leak
_exec.buildComplete(cfg, cfg.getTunnelPool());
_exec.buildComplete(cfg);
}
}
@ -723,8 +722,9 @@ class BuildHandler implements Runnable {
// tunnel-alt-creation.html specifies that this is enforced +/- 1 hour but it was not.
// As of 0.9.16, allow + 5 minutes to - 65 minutes.
long time = req.readRequestTime();
long now = (_context.clock().now() / (60l*60l*1000l)) * (60*60*1000);
long timeDiff = now - time;
long now = _context.clock().now();
long roundedNow = (now / (60l*60l*1000l)) * (60*60*1000);
long timeDiff = roundedNow - time;
if (timeDiff > MAX_REQUEST_AGE) {
_context.statManager().addRateData("tunnel.rejectTooOld", 1);
if (_log.shouldLog(Log.WARN))
@ -763,7 +763,7 @@ class BuildHandler implements Runnable {
//if ( (response == 0) && (_context.random().nextInt(50) <= 1) )
// response = TunnelHistory.TUNNEL_REJECT_PROBABALISTIC_REJECT;
long recvDelay = _context.clock().now()-state.recvTime;
long recvDelay = now - state.recvTime;
if (response == 0) {
// unused
@ -833,8 +833,8 @@ class BuildHandler implements Runnable {
HopConfig cfg = null;
if (response == 0) {
cfg = new HopConfig();
cfg.setCreation(_context.clock().now());
cfg.setExpiration(_context.clock().now() + 10*60*1000);
cfg.setCreation(now);
cfg.setExpiration(now + 10*60*1000);
cfg.setIVKey(req.readIVKey());
cfg.setLayerKey(req.readLayerKey());
if (isInGW) {
@ -935,7 +935,7 @@ class BuildHandler implements Runnable {
+ " recvDelay " + recvDelay + " replyMessage " + req.readReplyMessageId());
// now actually send the response
long expires = _context.clock().now() + NEXT_HOP_SEND_TIMEOUT;
long expires = now + NEXT_HOP_SEND_TIMEOUT;
if (!isOutEnd) {
state.msg.setUniqueId(req.readReplyMessageId());
state.msg.setMessageExpiration(expires);

View File

@ -16,6 +16,7 @@ import net.i2p.router.OutNetMessage;
import net.i2p.router.RouterContext;
import net.i2p.router.TunnelInfo;
import net.i2p.router.TunnelManagerFacade;
import net.i2p.router.TunnelPoolSettings;
import net.i2p.router.tunnel.BuildMessageGenerator;
import net.i2p.util.Log;
import net.i2p.util.VersionComparator;
@ -117,24 +118,25 @@ abstract class BuildRequestor {
* @param cfg ReplyMessageId must be set
* @return success
*/
public static boolean request(RouterContext ctx, TunnelPool pool,
public static boolean request(RouterContext ctx,
PooledTunnelCreatorConfig cfg, BuildExecutor exec) {
// new style crypto fills in all the blanks, while the old style waits for replies to fill in the next hop, etc
prepare(ctx, cfg);
if (cfg.getLength() <= 1) {
buildZeroHop(ctx, pool, cfg, exec);
buildZeroHop(ctx, cfg, exec);
return true;
}
Log log = ctx.logManager().getLog(BuildRequestor.class);
cfg.setTunnelPool(pool);
final TunnelPool pool = cfg.getTunnelPool();
final TunnelPoolSettings settings = pool.getSettings();
TunnelInfo pairedTunnel = null;
Hash farEnd = cfg.getFarEnd();
TunnelManagerFacade mgr = ctx.tunnelManager();
boolean isInbound = pool.getSettings().isInbound();
if (pool.getSettings().isExploratory() || !usePairedTunnels(ctx)) {
boolean isInbound = settings.isInbound();
if (settings.isExploratory() || !usePairedTunnels(ctx)) {
if (isInbound)
pairedTunnel = mgr.selectOutboundExploratoryTunnel(farEnd);
else
@ -142,9 +144,9 @@ abstract class BuildRequestor {
} else {
// building a client tunnel
if (isInbound)
pairedTunnel = mgr.selectOutboundTunnel(pool.getSettings().getDestination(), farEnd);
pairedTunnel = mgr.selectOutboundTunnel(settings.getDestination(), farEnd);
else
pairedTunnel = mgr.selectInboundTunnel(pool.getSettings().getDestination(), farEnd);
pairedTunnel = mgr.selectInboundTunnel(settings.getDestination(), farEnd);
if (pairedTunnel == null) {
if (isInbound) {
// random more reliable than closest ??
@ -178,12 +180,12 @@ abstract class BuildRequestor {
if (pairedTunnel == null) {
if (log.shouldLog(Log.WARN))
log.warn("Tunnel build failed, as we couldn't find a paired tunnel for " + cfg);
exec.buildComplete(cfg, pool);
exec.buildComplete(cfg);
// Not even an exploratory tunnel? We are in big trouble.
// Let's not spin through here too fast.
// But don't let a client tunnel waiting for exploratories slow things down too much,
// as there may be other tunnel pools who can build
int ms = pool.getSettings().isExploratory() ? 250 : 25;
int ms = settings.isExploratory() ? 250 : 25;
try { Thread.sleep(ms); } catch (InterruptedException ie) {}
return false;
}
@ -194,7 +196,7 @@ abstract class BuildRequestor {
if (msg == null) {
if (log.shouldLog(Log.WARN))
log.warn("Tunnel build failed, as we couldn't create the tunnel build message for " + cfg);
exec.buildComplete(cfg, pool);
exec.buildComplete(cfg);
return false;
}
@ -225,11 +227,11 @@ abstract class BuildRequestor {
if (peer == null) {
if (log.shouldLog(Log.WARN))
log.warn("Could not find the next hop to send the outbound request to: " + cfg);
exec.buildComplete(cfg, pool);
exec.buildComplete(cfg);
return false;
}
OutNetMessage outMsg = new OutNetMessage(ctx, msg, ctx.clock().now() + FIRST_HOP_TIMEOUT, PRIORITY, peer);
outMsg.setOnFailedSendJob(new TunnelBuildFirstHopFailJob(ctx, pool, cfg, exec));
outMsg.setOnFailedSendJob(new TunnelBuildFirstHopFailJob(ctx, cfg, exec));
try {
ctx.outNetMessagePool().add(outMsg);
} catch (RuntimeException re) {
@ -365,20 +367,19 @@ keep this here for the next time we change the build protocol
return msg;
}
private static void buildZeroHop(RouterContext ctx, TunnelPool pool, PooledTunnelCreatorConfig cfg, BuildExecutor exec) {
private static void buildZeroHop(RouterContext ctx, PooledTunnelCreatorConfig cfg, BuildExecutor exec) {
Log log = ctx.logManager().getLog(BuildRequestor.class);
if (log.shouldLog(Log.DEBUG))
log.debug("Build zero hop tunnel " + cfg);
exec.buildComplete(cfg, pool);
exec.buildComplete(cfg);
if (cfg.isInbound())
ctx.tunnelDispatcher().joinInbound(cfg);
else
ctx.tunnelDispatcher().joinOutbound(cfg);
pool.addTunnel(cfg);
cfg.getTunnelPool().addTunnel(cfg);
exec.buildSuccessful(cfg);
ExpireJob expireJob = new ExpireJob(ctx, cfg, pool);
cfg.setExpireJob(expireJob);
ExpireJob expireJob = new ExpireJob(ctx, cfg);
ctx.jobQueue().addJob(expireJob);
// can it get much easier?
}
@ -393,18 +394,16 @@ keep this here for the next time we change the build protocol
* Can't do this for inbound tunnels since the msg goes out an expl. tunnel.
*/
private static class TunnelBuildFirstHopFailJob extends JobImpl {
private final TunnelPool _pool;
private final PooledTunnelCreatorConfig _cfg;
private final BuildExecutor _exec;
private TunnelBuildFirstHopFailJob(RouterContext ctx, TunnelPool pool, PooledTunnelCreatorConfig cfg, BuildExecutor exec) {
private TunnelBuildFirstHopFailJob(RouterContext ctx, PooledTunnelCreatorConfig cfg, BuildExecutor exec) {
super(ctx);
_cfg = cfg;
_exec = exec;
_pool = pool;
}
public String getName() { return "Timeout contacting first peer for OB tunnel"; }
public void runJob() {
_exec.buildComplete(_cfg, _pool);
_exec.buildComplete(_cfg);
getContext().profileManager().tunnelTimedOut(_cfg.getPeer(1));
getContext().statManager().addRateData("tunnel.buildFailFirstHop", 1, 0);
// static, no _log

View File

@ -5,7 +5,6 @@ import java.util.concurrent.atomic.AtomicBoolean;
import net.i2p.router.JobImpl;
import net.i2p.router.Router;
import net.i2p.router.RouterContext;
import net.i2p.router.tunnel.TunnelCreatorConfig;
/**
* This runs twice for each tunnel.
@ -13,17 +12,15 @@ import net.i2p.router.tunnel.TunnelCreatorConfig;
* The second time, stop accepting data for it.
*/
class ExpireJob extends JobImpl {
private final TunnelPool _pool;
private final TunnelCreatorConfig _cfg;
private final PooledTunnelCreatorConfig _cfg;
private final AtomicBoolean _leaseUpdated = new AtomicBoolean(false);
private final long _dropAfter;
private static final long OB_EARLY_EXPIRE = 30*1000;
private static final long IB_EARLY_EXPIRE = OB_EARLY_EXPIRE + 7500;
public ExpireJob(RouterContext ctx, TunnelCreatorConfig cfg, TunnelPool pool) {
public ExpireJob(RouterContext ctx, PooledTunnelCreatorConfig cfg) {
super(ctx);
_pool = pool;
_cfg = cfg;
// we act as if this tunnel expires a random skew before it actually does
// so we rebuild out of sync. otoh, we will honor tunnel messages on it
@ -31,7 +28,7 @@ class ExpireJob extends JobImpl {
// others may be sending to the published lease expirations
// Also skew the inbound away from the outbound
long expire = cfg.getExpiration();
if (_pool.getSettings().isInbound()) {
if (cfg.getTunnelPool().getSettings().isInbound()) {
// wait extra long for IB so we don't drop msgs that
// got all the way to us.
_dropAfter = expire + (2 * Router.CLOCK_FUDGE_FACTOR);
@ -51,10 +48,11 @@ class ExpireJob extends JobImpl {
public void runJob() {
if (_leaseUpdated.compareAndSet(false,true)) {
TunnelPool pool = _cfg.getTunnelPool();
// First run
_pool.removeTunnel(_cfg);
pool.removeTunnel(_cfg);
// noop for outbound
_pool.refreshLeaseSet();
pool.refreshLeaseSet();
long timeToDrop = _dropAfter - getContext().clock().now();
requeue(timeToDrop);
} else {

View File

@ -3,38 +3,30 @@ package net.i2p.router.tunnel.pool;
import java.util.Properties;
import net.i2p.data.Hash;
import net.i2p.router.Job;
import net.i2p.router.RouterContext;
import net.i2p.router.TunnelInfo;
import net.i2p.router.tunnel.TunnelCreatorConfig;
import net.i2p.util.Log;
/**
* Data about a tunnel we created
*/
class PooledTunnelCreatorConfig extends TunnelCreatorConfig {
private TunnelPool _pool;
private TestJob _testJob;
/** Creates a new instance of PooledTunnelCreatorConfig */
private final TunnelPool _pool;
public PooledTunnelCreatorConfig(RouterContext ctx, int length, boolean isInbound) {
this(ctx, length, isInbound, null);
}
public PooledTunnelCreatorConfig(RouterContext ctx, int length, boolean isInbound, Hash destination) {
/**
* Creates a new instance of PooledTunnelCreatorConfig
*
* @param destination may be null
* @param pool non-null
*/
public PooledTunnelCreatorConfig(RouterContext ctx, int length, boolean isInbound,
Hash destination, TunnelPool pool) {
super(ctx, length, isInbound, destination);
}
/** calls TestJob */
@Override
public void testSuccessful(int ms) {
if (_testJob != null)
_testJob.testSuccessful(ms);
super.testSuccessful(ms);
_pool = pool;
}
/** called from TestJob */
public void testJobSuccessful(int ms) {
super.testSuccessful(ms);
testSuccessful(ms);
}
/**
@ -51,39 +43,17 @@ class PooledTunnelCreatorConfig extends TunnelCreatorConfig {
// Todo: Maybe delay or prevent failing if we are near tunnel build capacity,
// to prevent collapse (loss of all tunnels)
_pool.tunnelFailed(this);
if (_testJob != null) // just in case...
_context.jobQueue().removeJob(_testJob);
}
return rv;
}
@Override
public Properties getOptions() {
if (_pool == null) return null;
return _pool.getSettings().getUnknownOptions();
}
public void setTunnelPool(TunnelPool pool) {
if (pool != null) {
_pool = pool;
} else {
Log log = _context.logManager().getLog(getClass());
log.error("Null tunnel pool?", new Exception("foo"));
}
}
public TunnelPool getTunnelPool() { return _pool; }
/** @deprecated unused, which makes _testJob unused - why is it here */
@Deprecated
void setTestJob(TestJob job) { _testJob = job; }
/** does nothing, to be deprecated */
public void setExpireJob(Job job) { /* _expireJob = job; */ }
/**
* @deprecated Fix memory leaks caused by references if you need to use pairedTunnel
* @return non-null
*/
@Deprecated
public void setPairedTunnel(TunnelInfo tunnel) { /* _pairedTunnel = tunnel; */}
// public TunnelInfo getPairedTunnel() { return _pairedTunnel; }
public TunnelPool getTunnelPool() { return _pool; }
}

View File

@ -661,7 +661,7 @@ public class TunnelPool {
_log.info(toString() + ": building a fallback tunnel (usable: " + usable + " needed: " + quantity + ")");
// runs inline, since its 0hop
_manager.getExecutor().buildTunnel(this, configureNewTunnel(true));
_manager.getExecutor().buildTunnel(configureNewTunnel(true));
return true;
}
return false;
@ -1172,8 +1172,8 @@ public class TunnelPool {
}
PooledTunnelCreatorConfig cfg = new PooledTunnelCreatorConfig(_context, peers.size(),
settings.isInbound(), settings.getDestination());
cfg.setTunnelPool(this);
settings.isInbound(), settings.getDestination(),
this);
// peers list is ordered endpoint first, but cfg.getPeer() is ordered gateway first
for (int i = 0; i < peers.size(); i++) {
int j = peers.size() - 1 - i;
@ -1204,7 +1204,6 @@ public class TunnelPool {
*/
void buildComplete(PooledTunnelCreatorConfig cfg) {
synchronized (_inProgress) { _inProgress.remove(cfg); }
cfg.setTunnelPool(this);
//_manager.buildComplete(cfg);
}

View File

@ -129,7 +129,7 @@ public class TunnelPoolManager implements TunnelManagerFacade {
}
if (_log.shouldLog(Log.ERROR))
_log.error("Want the inbound tunnel for " + destination.toBase32() +
" but there isn't a pool?");
" but there isn't a pool?", new Exception());
return null;
}
@ -205,7 +205,7 @@ public class TunnelPoolManager implements TunnelManagerFacade {
}
if (_log.shouldLog(Log.ERROR))
_log.error("Want the inbound tunnel for " + destination.toBase32() +
" but there isn't a pool?");
" but there isn't a pool?", new Exception());
return null;
}
@ -561,25 +561,6 @@ public class TunnelPoolManager implements TunnelManagerFacade {
_context.router().isHidden() ||
_context.router().getRouterInfo().getAddressCount() <= 0)) {
TunnelPool pool = cfg.getTunnelPool();
if (pool == null) {
// never seen this before, do we reallly need to bother
// trying so hard to find his pool?
_log.error("How does this not have a pool? " + cfg, new Exception("baf"));
if (cfg.getDestination() != null) {
if (cfg.isInbound()) {
pool = _clientInboundPools.get(cfg.getDestination());
} else {
pool = _clientOutboundPools.get(cfg.getDestination());
}
} else {
if (cfg.isInbound()) {
pool = _inboundExploratory;
} else {
pool = _outboundExploratory;
}
}
cfg.setTunnelPool(pool);
}
_context.jobQueue().addJob(new TestJob(_context, cfg, pool));
}
}