forked from I2P_Developers/i2p.i2p
* TunnelPoolManager: Reduce race window for restarting a tunnel pool
This commit is contained in:
@ -1,3 +1,8 @@
|
|||||||
|
2011-07-28 zzz
|
||||||
|
* Context: Split up big lock to avoid deadlocks
|
||||||
|
* Streaming: Avoid a rare exception on race
|
||||||
|
* TunnelPoolManager: Reduce race window for restarting a tunnel pool
|
||||||
|
|
||||||
2011-07-27 kytv
|
2011-07-27 kytv
|
||||||
* Add armel (armv5tejl) wrapper. Compiled and tested in Debian Squeeze.
|
* Add armel (armv5tejl) wrapper. Compiled and tested in Debian Squeeze.
|
||||||
|
|
||||||
|
@ -18,7 +18,7 @@ public class RouterVersion {
|
|||||||
/** deprecated */
|
/** deprecated */
|
||||||
public final static String ID = "Monotone";
|
public final static String ID = "Monotone";
|
||||||
public final static String VERSION = CoreVersion.VERSION;
|
public final static String VERSION = CoreVersion.VERSION;
|
||||||
public final static long BUILD = 15;
|
public final static long BUILD = 16;
|
||||||
|
|
||||||
/** for example "-test" */
|
/** for example "-test" */
|
||||||
public final static String EXTRA = "";
|
public final static String EXTRA = "";
|
||||||
|
@ -33,7 +33,7 @@ public class TunnelPool {
|
|||||||
private final List<TunnelInfo> _tunnels;
|
private final List<TunnelInfo> _tunnels;
|
||||||
private final TunnelPeerSelector _peerSelector;
|
private final TunnelPeerSelector _peerSelector;
|
||||||
private final TunnelPoolManager _manager;
|
private final TunnelPoolManager _manager;
|
||||||
private boolean _alive;
|
private volatile boolean _alive;
|
||||||
private long _lifetimeProcessed;
|
private long _lifetimeProcessed;
|
||||||
private TunnelInfo _lastSelected;
|
private TunnelInfo _lastSelected;
|
||||||
private long _lastSelectionPeriod;
|
private long _lastSelectionPeriod;
|
||||||
@ -64,7 +64,8 @@ public class TunnelPool {
|
|||||||
* Warning, this may be called more than once
|
* Warning, this may be called more than once
|
||||||
* (without an intervening shutdown()) if the
|
* (without an intervening shutdown()) if the
|
||||||
* tunnel is stopped and then restarted by the client manager with the same
|
* tunnel is stopped and then restarted by the client manager with the same
|
||||||
* Destination (i.e. for servers or clients w/ persistent key)
|
* Destination (i.e. for servers or clients w/ persistent key,
|
||||||
|
* or restarting close-on-idle clients)
|
||||||
*/
|
*/
|
||||||
void startup() {
|
void startup() {
|
||||||
synchronized (_inProgress) {
|
synchronized (_inProgress) {
|
||||||
@ -362,6 +363,7 @@ public class TunnelPool {
|
|||||||
|
|
||||||
if (getTunnelCount() <= 0 && !isAlive()) {
|
if (getTunnelCount() <= 0 && !isAlive()) {
|
||||||
// this calls both our shutdown() and the other one (inbound/outbound)
|
// this calls both our shutdown() and the other one (inbound/outbound)
|
||||||
|
// This is racy - see TunnelPoolManager
|
||||||
_manager.removeTunnels(_settings.getDestination());
|
_manager.removeTunnels(_settings.getDestination());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -286,6 +286,7 @@ public class TunnelPoolManager implements TunnelManagerFacade {
|
|||||||
// should we share the clientPeerSelector across both inbound and outbound?
|
// should we share the clientPeerSelector across both inbound and outbound?
|
||||||
// or just one for all clients? why separate?
|
// or just one for all clients? why separate?
|
||||||
|
|
||||||
|
boolean delayOutbound = false;
|
||||||
// synch with removeTunnels() below
|
// synch with removeTunnels() below
|
||||||
synchronized (this) {
|
synchronized (this) {
|
||||||
inbound = _clientInboundPools.get(dest);
|
inbound = _clientInboundPools.get(dest);
|
||||||
@ -301,12 +302,18 @@ public class TunnelPoolManager implements TunnelManagerFacade {
|
|||||||
outbound = new TunnelPool(_context, this, settings.getOutboundSettings(),
|
outbound = new TunnelPool(_context, this, settings.getOutboundSettings(),
|
||||||
new ClientPeerSelector());
|
new ClientPeerSelector());
|
||||||
_clientOutboundPools.put(dest, outbound);
|
_clientOutboundPools.put(dest, outbound);
|
||||||
|
delayOutbound = true;
|
||||||
} else {
|
} else {
|
||||||
outbound.setSettings(settings.getOutboundSettings());
|
outbound.setSettings(settings.getOutboundSettings());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
inbound.startup();
|
inbound.startup();
|
||||||
SimpleScheduler.getInstance().addEvent(new DelayedStartup(outbound), 3*1000);
|
// don't delay the outbound if it already exists, as this opens up a large
|
||||||
|
// race window with removeTunnels() below
|
||||||
|
if (delayOutbound)
|
||||||
|
SimpleScheduler.getInstance().addEvent(new DelayedStartup(outbound), 1000);
|
||||||
|
else
|
||||||
|
outbound.startup();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -331,8 +338,10 @@ public class TunnelPoolManager implements TunnelManagerFacade {
|
|||||||
if (_log.shouldLog(Log.DEBUG))
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
_log.debug("Removing tunnels for the client " + destination);
|
_log.debug("Removing tunnels for the client " + destination);
|
||||||
if (_context.clientManager().isLocal(destination)) {
|
if (_context.clientManager().isLocal(destination)) {
|
||||||
if (_log.shouldLog(Log.CRIT))
|
// race with buildTunnels() on restart of a client
|
||||||
_log.log(Log.CRIT, "wtf, why are you removing the pool for " + destination.toBase64(), new Exception("i did it"));
|
if (_log.shouldLog(Log.ERROR))
|
||||||
|
_log.error("Not removing pool still registered with client manager: " + destination.toBase64(), new Exception("i did it"));
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
TunnelPool inbound = _clientInboundPools.remove(destination);
|
TunnelPool inbound = _clientInboundPools.remove(destination);
|
||||||
TunnelPool outbound = _clientOutboundPools.remove(destination);
|
TunnelPool outbound = _clientOutboundPools.remove(destination);
|
||||||
|
Reference in New Issue
Block a user