From df4143f036a08c67600ef54e1ac511ba98b8b101 Mon Sep 17 00:00:00 2001 From: zzz Date: Tue, 25 Aug 2009 13:12:24 +0000 Subject: [PATCH] * NetDb: - oops, store leaseset locally even when shutting down (fix -16) - Java 5 cleanups * PRNG: - Rename config option to prng.buffers (was router.prng.buffers) - Change the default from 16 to 2 for I2PAppContext (saves 3.5MB) * TunnelPool: - Don't test tunnels when shutting down - Less rates - Java 5 cleanups --- android/res/raw/router_config | 2 +- .../crypto/prng/AsyncFortunaStandalone.java | 8 ++- .../src/net/i2p/client/I2PSessionImpl2.java | 2 + router/java/src/net/i2p/router/Router.java | 1 + .../src/net/i2p/router/RouterContext.java | 11 +++- .../KademliaNetworkDatabaseFacade.java | 18 ++--- .../net/i2p/router/tunnel/pool/TestJob.java | 17 +++-- .../router/tunnel/pool/TunnelPoolManager.java | 66 ++++++++++--------- 8 files changed, 75 insertions(+), 50 deletions(-) diff --git a/android/res/raw/router_config b/android/res/raw/router_config index cf63ed56a..3cc52bd63 100644 --- a/android/res/raw/router_config +++ b/android/res/raw/router_config @@ -3,7 +3,7 @@ i2p.dir.temp=/data/data/net.i2p.router/files/tmp i2p.dir.pid=/data/data/net.i2p.router/files/tmp # save memory -router.prng.buffers=2 +prng.buffers=2 router.decayingBloomFilterM=20 stat.full=false i2np.udp.maxConnections=30 diff --git a/core/java/src/gnu/crypto/prng/AsyncFortunaStandalone.java b/core/java/src/gnu/crypto/prng/AsyncFortunaStandalone.java index 0b1f88765..c37ada8bc 100644 --- a/core/java/src/gnu/crypto/prng/AsyncFortunaStandalone.java +++ b/core/java/src/gnu/crypto/prng/AsyncFortunaStandalone.java @@ -12,7 +12,11 @@ import net.i2p.util.Log; * has been eaten) */ public class AsyncFortunaStandalone extends FortunaStandalone implements Runnable { - private static final int DEFAULT_BUFFERS = 16; + /** + * This is set to 2 to minimize memory usage for standalone apps. + * The router must override this via the prng.buffers property in the router context. + */ + private static final int DEFAULT_BUFFERS = 2; private static final int BUFSIZE = 256*1024; private int _bufferCount; private final byte asyncBuffers[][]; @@ -28,7 +32,7 @@ public class AsyncFortunaStandalone extends FortunaStandalone implements Runnabl public AsyncFortunaStandalone(I2PAppContext context) { super(); - _bufferCount = context.getProperty("router.prng.buffers", DEFAULT_BUFFERS); + _bufferCount = Math.max(context.getProperty("prng.buffers", DEFAULT_BUFFERS), 2); asyncBuffers = new byte[_bufferCount][BUFSIZE]; status = new int[_bufferCount]; for (int i = 0; i < _bufferCount; i++) diff --git a/core/java/src/net/i2p/client/I2PSessionImpl2.java b/core/java/src/net/i2p/client/I2PSessionImpl2.java index 508057c2c..981ccfae2 100644 --- a/core/java/src/net/i2p/client/I2PSessionImpl2.java +++ b/core/java/src/net/i2p/client/I2PSessionImpl2.java @@ -91,6 +91,8 @@ class I2PSessionImpl2 extends I2PSessionImpl { * Perhaps the http server (which does its own compression) * and P2P apps (with generally uncompressible data) should * set to false. + * + * Todo: don't compress if destination is local? */ private static final int DONT_COMPRESS_SIZE = 66; protected boolean shouldCompress(int size) { diff --git a/router/java/src/net/i2p/router/Router.java b/router/java/src/net/i2p/router/Router.java index 1b20bda9d..6e52f2bc5 100644 --- a/router/java/src/net/i2p/router/Router.java +++ b/router/java/src/net/i2p/router/Router.java @@ -330,6 +330,7 @@ public class Router { _context.blocklist().startup(); // let the timestamper get us sync'ed + // this will block for quite a while on a disconnected machine long before = System.currentTimeMillis(); _context.clock().getTimestamper().waitForInitialization(); long waited = System.currentTimeMillis() - before; diff --git a/router/java/src/net/i2p/router/RouterContext.java b/router/java/src/net/i2p/router/RouterContext.java index 467338eb2..2a3410348 100644 --- a/router/java/src/net/i2p/router/RouterContext.java +++ b/router/java/src/net/i2p/router/RouterContext.java @@ -75,19 +75,28 @@ public class RouterContext extends I2PAppContext { //initAll(); _contexts.add(this); } + /** + * Set properties where the defaults must be different from those + * in I2PAppContext. + * * Unless we are explicitly disabling the timestamper, we want to use it. * We need this now as the new timestamper default is disabled (so we don't * have each I2PAppContext creating their own SNTP queries all the time) * + * Set more PRNG buffers, as the default is now small for the I2PAppContext. + * */ - static final Properties filterProps(Properties envProps) { + private static final Properties filterProps(Properties envProps) { if (envProps == null) envProps = new Properties(); if (envProps.getProperty("time.disabled") == null) envProps.setProperty("time.disabled", "false"); + if (envProps.getProperty("prng.buffers") == null) + envProps.setProperty("prng.buffers", "16"); return envProps; } + public void initAll() { if ("false".equals(getProperty("i2p.dummyClientFacade", "false"))) _clientManagerFacade = new ClientManagerFacadeImpl(this); diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/KademliaNetworkDatabaseFacade.java b/router/java/src/net/i2p/router/networkdb/kademlia/KademliaNetworkDatabaseFacade.java index ff53f7d15..aafd45e7e 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/KademliaNetworkDatabaseFacade.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/KademliaNetworkDatabaseFacade.java @@ -56,7 +56,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade { private DataStore _ds; // hash to DataStructure mapping, persisted when necessary /** where the data store is pushing the data */ private String _dbDir; - private final Set _exploreKeys = new HashSet(64); // set of Hash objects that we should search on (to fill up a bucket, not to get data) + private final Set _exploreKeys = new HashSet(64); // set of Hash objects that we should search on (to fill up a bucket, not to get data) private boolean _initialized; /** Clock independent time of when we started up */ private long _started; @@ -72,7 +72,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade { * removed when the job decides to stop running. * */ - private final Map _publishingLeaseSets; + private final Map _publishingLeaseSets; /** * Hash of the key currently being searched for, pointing the SearchJob that @@ -80,7 +80,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade { * added on to the list of jobs fired on success/failure * */ - private final Map _activeRequests; + private final Map _activeRequests; /** * The search for the given key is no longer active @@ -160,7 +160,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade { _exploreJob.updateExploreSchedule(); } - public Set getExploreKeys() { + public Set getExploreKeys() { if (!_initialized) return null; synchronized (_exploreKeys) { return new HashSet(_exploreKeys); @@ -302,12 +302,12 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade { /** * Get the routers closest to that key in response to a remote lookup */ - public Set findNearestRouters(Hash key, int maxNumRouters, Set peersToIgnore) { + public Set findNearestRouters(Hash key, int maxNumRouters, Set peersToIgnore) { if (!_initialized) return null; return getRouters(_peerSelector.selectNearest(key, maxNumRouters, peersToIgnore, _kb)); } - private Set getRouters(Collection hashes) { + private Set getRouters(Collection hashes) { if (!_initialized) return null; Set rv = new HashSet(hashes.size()); for (Iterator iter = hashes.iterator(); iter.hasNext(); ) { @@ -481,8 +481,6 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade { private static final long PUBLISH_DELAY = 3*1000; public void publish(LeaseSet localLeaseSet) { if (!_initialized) return; - if (_context.router().gracefulShutdownInProgress()) - return; Hash h = localLeaseSet.getDestination().calculateHash(); try { store(h, localLeaseSet); @@ -492,6 +490,8 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade { } if (!_context.clientManager().shouldPublishLeaseSet(h)) return; + if (_context.router().gracefulShutdownInProgress()) + return; RepublishLeaseSetJob j = null; synchronized (_publishingLeaseSets) { @@ -855,7 +855,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade { } return leases; } - private Set getRouters() { + private Set getRouters() { if (!_initialized) return null; Set routers = new HashSet(); Set keys = getDataStore().getKeys(); diff --git a/router/java/src/net/i2p/router/tunnel/pool/TestJob.java b/router/java/src/net/i2p/router/tunnel/pool/TestJob.java index c0633ef9f..fc79c87f2 100644 --- a/router/java/src/net/i2p/router/tunnel/pool/TestJob.java +++ b/router/java/src/net/i2p/router/tunnel/pool/TestJob.java @@ -31,6 +31,7 @@ class TestJob extends JobImpl { /** base to randomize the test delay on */ private static final int TEST_DELAY = 30*1000; + private static final long[] RATES = { 60*1000, 10*60*1000l, 60*60*1000l }; public TestJob(RouterContext ctx, PooledTunnelCreatorConfig cfg, TunnelPool pool) { super(ctx); @@ -43,19 +44,19 @@ class TestJob extends JobImpl { _log.error("Invalid tunnel test configuration: no pool for " + cfg, new Exception("origin")); getTiming().setStartAfter(getDelay() + ctx.clock().now()); ctx.statManager().createRateStat("tunnel.testFailedTime", "How long did the failure take (max of 60s for full timeout)?", "Tunnels", - new long[] { 60*1000, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l }); + RATES); ctx.statManager().createRateStat("tunnel.testExploratoryFailedTime", "How long did the failure of an exploratory tunnel take (max of 60s for full timeout)?", "Tunnels", - new long[] { 60*1000, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l }); + RATES); ctx.statManager().createRateStat("tunnel.testFailedCompletelyTime", "How long did the complete failure take (max of 60s for full timeout)?", "Tunnels", - new long[] { 60*1000, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l }); + RATES); ctx.statManager().createRateStat("tunnel.testExploratoryFailedCompletelyTime", "How long did the complete failure of an exploratory tunnel take (max of 60s for full timeout)?", "Tunnels", - new long[] { 60*1000, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l }); + RATES); ctx.statManager().createRateStat("tunnel.testSuccessLength", "How long were the tunnels that passed the test?", "Tunnels", - new long[] { 60*1000, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l }); + RATES); ctx.statManager().createRateStat("tunnel.testSuccessTime", "How long did tunnel testing take?", "Tunnels", - new long[] { 60*1000, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l }); + RATES); ctx.statManager().createRateStat("tunnel.testAborted", "Tunnel test could not occur, since there weren't any tunnels to test with", "Tunnels", - new long[] { 60*1000, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l }); + RATES); } public String getName() { return "Test tunnel"; } public void runJob() { @@ -69,6 +70,8 @@ class TestJob extends JobImpl { scheduleRetest(); return; } + if (getContext().router().gracefulShutdownInProgress()) + return; // don't reschedule _found = false; // note: testing with exploratory tunnels always, even if the tested tunnel // is a client tunnel (per _cfg.getDestination()) diff --git a/router/java/src/net/i2p/router/tunnel/pool/TunnelPoolManager.java b/router/java/src/net/i2p/router/tunnel/pool/TunnelPoolManager.java index 6bd7ba646..a83b78aaa 100644 --- a/router/java/src/net/i2p/router/tunnel/pool/TunnelPoolManager.java +++ b/router/java/src/net/i2p/router/tunnel/pool/TunnelPoolManager.java @@ -37,9 +37,9 @@ public class TunnelPoolManager implements TunnelManagerFacade { private RouterContext _context; private Log _log; /** Hash (destination) to TunnelPool */ - private final Map _clientInboundPools; + private final Map _clientInboundPools; /** Hash (destination) to TunnelPool */ - private final Map _clientOutboundPools; + private final Map _clientOutboundPools; private TunnelPool _inboundExploratory; private TunnelPool _outboundExploratory; private BuildExecutor _executor; @@ -90,7 +90,7 @@ public class TunnelPoolManager implements TunnelManagerFacade { if (destination == null) return selectInboundTunnel(); TunnelPool pool = null; synchronized (_clientInboundPools) { - pool = (TunnelPool)_clientInboundPools.get(destination); + pool = _clientInboundPools.get(destination); } if (pool != null) { return pool.selectTunnel(); @@ -119,7 +119,7 @@ public class TunnelPoolManager implements TunnelManagerFacade { if (destination == null) return selectOutboundTunnel(); TunnelPool pool = null; synchronized (_clientOutboundPools) { - pool = (TunnelPool)_clientOutboundPools.get(destination); + pool = _clientOutboundPools.get(destination); } if (pool != null) { return pool.selectTunnel(); @@ -130,8 +130,8 @@ public class TunnelPoolManager implements TunnelManagerFacade { public TunnelInfo getTunnelInfo(TunnelId id) { TunnelInfo info = null; synchronized (_clientInboundPools) { - for (Iterator iter = _clientInboundPools.values().iterator(); iter.hasNext(); ) { - TunnelPool pool = (TunnelPool)iter.next(); + for (Iterator iter = _clientInboundPools.values().iterator(); iter.hasNext(); ) { + TunnelPool pool = iter.next(); info = pool.getTunnel(id); if (info != null) return info; @@ -166,7 +166,7 @@ public class TunnelPoolManager implements TunnelManagerFacade { Hash client = (Hash)destinations.get(i); TunnelPool pool = null; synchronized (_clientInboundPools) { - pool = (TunnelPool)_clientInboundPools.get(client); + pool = _clientInboundPools.get(client); } count += pool.listTunnels().size(); } @@ -182,7 +182,7 @@ public class TunnelPoolManager implements TunnelManagerFacade { Hash client = (Hash)destinations.get(i); TunnelPool pool = null; synchronized (_clientOutboundPools) { - pool = (TunnelPool)_clientOutboundPools.get(client); + pool = _clientOutboundPools.get(client); } count += pool.listTunnels().size(); } @@ -196,9 +196,9 @@ public class TunnelPoolManager implements TunnelManagerFacade { return false; TunnelPool pool; if (tunnel.isInbound()) - pool = (TunnelPool)_clientInboundPools.get(client); + pool = _clientInboundPools.get(client); else - pool = (TunnelPool)_clientOutboundPools.get(client); + pool = _clientOutboundPools.get(client); if (pool == null) return false; return pool.listTunnels().contains(tunnel); @@ -211,7 +211,7 @@ public class TunnelPoolManager implements TunnelManagerFacade { public TunnelPoolSettings getInboundSettings(Hash client) { TunnelPool pool = null; synchronized (_clientInboundPools) { - pool = (TunnelPool)_clientInboundPools.get(client); + pool = _clientInboundPools.get(client); } if (pool != null) return pool.getSettings(); @@ -221,7 +221,7 @@ public class TunnelPoolManager implements TunnelManagerFacade { public TunnelPoolSettings getOutboundSettings(Hash client) { TunnelPool pool = null; synchronized (_clientOutboundPools) { - pool = (TunnelPool)_clientOutboundPools.get(client); + pool = _clientOutboundPools.get(client); } if (pool != null) return pool.getSettings(); @@ -234,10 +234,10 @@ public class TunnelPoolManager implements TunnelManagerFacade { public void setOutboundSettings(Hash client, TunnelPoolSettings settings) { setSettings(_clientOutboundPools, client, settings); } - private void setSettings(Map pools, Hash client, TunnelPoolSettings settings) { + private void setSettings(Map pools, Hash client, TunnelPoolSettings settings) { TunnelPool pool = null; synchronized (pools) { - pool = (TunnelPool)pools.get(client); + pool = pools.get(client); } if (pool != null) { settings.setDestination(client); // prevent spoofing or unset dest @@ -260,7 +260,7 @@ public class TunnelPoolManager implements TunnelManagerFacade { TunnelPool outbound = null; // should we share the clientPeerSelector across both inbound and outbound? synchronized (_clientInboundPools) { - inbound = (TunnelPool)_clientInboundPools.get(dest); + inbound = _clientInboundPools.get(dest); if (inbound == null) { inbound = new TunnelPool(_context, this, settings.getInboundSettings(), new ClientPeerSelector()); @@ -270,7 +270,7 @@ public class TunnelPoolManager implements TunnelManagerFacade { } } synchronized (_clientOutboundPools) { - outbound = (TunnelPool)_clientOutboundPools.get(dest); + outbound = _clientOutboundPools.get(dest); if (outbound == null) { outbound = new TunnelPool(_context, this, settings.getOutboundSettings(), new ClientPeerSelector()); @@ -294,10 +294,10 @@ public class TunnelPoolManager implements TunnelManagerFacade { TunnelPool inbound = null; TunnelPool outbound = null; synchronized (_clientInboundPools) { - inbound = (TunnelPool)_clientInboundPools.remove(destination); + inbound = _clientInboundPools.remove(destination); } synchronized (_clientOutboundPools) { - outbound = (TunnelPool)_clientOutboundPools.remove(destination); + outbound = _clientOutboundPools.remove(destination); } if (inbound != null) inbound.shutdown(); @@ -305,20 +305,24 @@ public class TunnelPoolManager implements TunnelManagerFacade { outbound.shutdown(); } + /** queue a recurring test job if appropriate */ void buildComplete(PooledTunnelCreatorConfig cfg) { - buildComplete(); - if (cfg.getLength() > 1) { + //buildComplete(); + if (cfg.getLength() > 1 && + !_context.router().gracefulShutdownInProgress()) { TunnelPool pool = cfg.getTunnelPool(); if (pool == null) { + // never seen this before, do we reallly need to bother + // trying so hard to find his pool? _log.error("How does this not have a pool? " + cfg, new Exception("baf")); if (cfg.getDestination() != null) { if (cfg.isInbound()) { synchronized (_clientInboundPools) { - pool = (TunnelPool)_clientInboundPools.get(cfg.getDestination()); + pool = _clientInboundPools.get(cfg.getDestination()); } } else { synchronized (_clientOutboundPools) { - pool = (TunnelPool)_clientOutboundPools.get(cfg.getDestination()); + pool = _clientOutboundPools.get(cfg.getDestination()); } } } else { @@ -333,6 +337,8 @@ public class TunnelPoolManager implements TunnelManagerFacade { _context.jobQueue().addJob(new TestJob(_context, cfg, pool)); } } + + /** ?? */ void buildComplete() {} public void startup() { @@ -384,7 +390,7 @@ public class TunnelPoolManager implements TunnelManagerFacade { } /** list of TunnelPool instances currently in play */ - void listPools(List out) { + void listPools(List out) { synchronized (_clientInboundPools) { out.addAll(_clientInboundPools.values()); } @@ -407,19 +413,19 @@ public class TunnelPoolManager implements TunnelManagerFacade { out.write("

Exploratory tunnels (config):

\n"); renderPool(out, _inboundExploratory, _outboundExploratory); - List destinations = null; + List destinations = null; synchronized (_clientInboundPools) { destinations = new ArrayList(_clientInboundPools.keySet()); } for (int i = 0; i < destinations.size(); i++) { - Hash client = (Hash)destinations.get(i); + Hash client = destinations.get(i); TunnelPool in = null; TunnelPool outPool = null; synchronized (_clientInboundPools) { - in = (TunnelPool)_clientInboundPools.get(client); + in = _clientInboundPools.get(client); } synchronized (_clientOutboundPools) { - outPool = (TunnelPool)_clientOutboundPools.get(client); + outPool = _clientOutboundPools.get(client); } String name = (in != null ? in.getSettings().getDestinationNickname() : null); if ( (name == null) && (outPool != null) ) @@ -505,7 +511,7 @@ public class TunnelPoolManager implements TunnelManagerFacade { } private void renderPool(Writer out, TunnelPool in, TunnelPool outPool) throws IOException { - List tunnels = null; + List tunnels = null; if (in == null) tunnels = new ArrayList(); else @@ -519,7 +525,7 @@ public class TunnelPoolManager implements TunnelManagerFacade { int live = 0; int maxLength = 1; for (int i = 0; i < tunnels.size(); i++) { - TunnelInfo info = (TunnelInfo)tunnels.get(i); + TunnelInfo info = tunnels.get(i); if (info.getLength() > maxLength) maxLength = info.getLength(); } @@ -536,7 +542,7 @@ public class TunnelPoolManager implements TunnelManagerFacade { } out.write("\n"); for (int i = 0; i < tunnels.size(); i++) { - TunnelInfo info = (TunnelInfo)tunnels.get(i); + TunnelInfo info = tunnels.get(i); long timeLeft = info.getExpiration()-_context.clock().now(); if (timeLeft <= 0) continue; // don't display tunnels in their grace period