forked from I2P_Developers/i2p.i2p
#1069: Deprecated SimpleScheduler and moved functionality into SimpleTimer2
This commit is contained in:
@ -576,7 +576,7 @@ public class Router implements RouterClock.ClockShiftListener {
|
||||
_context.inNetMessagePool().startup();
|
||||
startupQueue();
|
||||
//_context.jobQueue().addJob(new CoalesceStatsJob(_context));
|
||||
_context.simpleScheduler().addPeriodicEvent(new CoalesceStatsEvent(_context), COALESCE_TIME);
|
||||
_context.simpleTimer2().addPeriodicEvent(new CoalesceStatsEvent(_context), COALESCE_TIME);
|
||||
_context.jobQueue().addJob(new UpdateRoutingKeyModifierJob(_context));
|
||||
//_context.adminManager().startup();
|
||||
_context.blocklist().startup();
|
||||
@ -840,7 +840,7 @@ public class Router implements RouterClock.ClockShiftListener {
|
||||
if (blockingRebuild)
|
||||
r.timeReached();
|
||||
else
|
||||
_context.simpleScheduler().addEvent(r, 0);
|
||||
_context.simpleTimer2().addEvent(r, 0);
|
||||
} catch (DataFormatException dfe) {
|
||||
_log.log(Log.CRIT, "Internal error - unable to sign our own address?!", dfe);
|
||||
}
|
||||
@ -1747,7 +1747,7 @@ public class Router implements RouterClock.ClockShiftListener {
|
||||
*/
|
||||
private void beginMarkingLiveliness() {
|
||||
File f = getPingFile();
|
||||
_context.simpleScheduler().addPeriodicEvent(new MarkLiveliness(this, f), 0, LIVELINESS_DELAY - (5*1000));
|
||||
_context.simpleTimer2().addPeriodicEvent(new MarkLiveliness(this, f), 0, LIVELINESS_DELAY - (5*1000));
|
||||
}
|
||||
|
||||
public static final String PROP_BANDWIDTH_SHARE_PERCENTAGE = "router.sharePercentage";
|
||||
|
@ -50,7 +50,7 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
_context = context;
|
||||
_log = context.logManager().getLog(RouterThrottleImpl.class);
|
||||
setTunnelStatus();
|
||||
_context.simpleScheduler().addEvent(new ResetStatus(), 5*1000 + _context.getProperty(PROP_REJECT_STARTUP_TIME, DEFAULT_REJECT_STARTUP_TIME));
|
||||
_context.simpleTimer2().addEvent(new ResetStatus(), 5*1000 + _context.getProperty(PROP_REJECT_STARTUP_TIME, DEFAULT_REJECT_STARTUP_TIME));
|
||||
_context.statManager().createRateStat("router.throttleNetworkCause", "How lagged the jobQueue was when an I2NP was throttled", "Throttle", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
//_context.statManager().createRateStat("router.throttleNetDbCause", "How lagged the jobQueue was when a networkDb request was throttled", "Throttle", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
_context.statManager().createRateStat("router.throttleTunnelCause", "How lagged the jobQueue was when a tunnel request was throttled", "Throttle", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
|
@ -557,7 +557,7 @@ class ClientConnectionRunner {
|
||||
// theirs is newer
|
||||
} else {
|
||||
// ours is newer, so wait a few secs and retry
|
||||
_context.simpleScheduler().addEvent(new Rerequest(set, expirationTime, onCreateJob, onFailedJob), 3*1000);
|
||||
_context.simpleTimer2().addEvent(new Rerequest(set, expirationTime, onCreateJob, onFailedJob), 3*1000);
|
||||
}
|
||||
// fire onCreated?
|
||||
return; // already requesting
|
||||
|
@ -183,7 +183,7 @@ public class TransientSessionKeyManager extends SessionKeyManager {
|
||||
context.statManager().createRateStat("crypto.sessionTagsExpired", "How many tags/sessions are expired?", "Encryption", new long[] { 10*60*1000, 60*60*1000, 3*60*60*1000 });
|
||||
context.statManager().createRateStat("crypto.sessionTagsRemaining", "How many tags/sessions are remaining after a cleanup?", "Encryption", new long[] { 10*60*1000, 60*60*1000, 3*60*60*1000 });
|
||||
_alive = true;
|
||||
_context.simpleScheduler().addEvent(new CleanupEvent(), 60*1000);
|
||||
_context.simpleTimer2().addEvent(new CleanupEvent(), 60*1000);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -205,7 +205,7 @@ public class TransientSessionKeyManager extends SessionKeyManager {
|
||||
int expired = aggressiveExpire();
|
||||
long expireTime = _context.clock().now() - beforeExpire;
|
||||
_context.statManager().addRateData("crypto.sessionTagsExpired", expired, expireTime);
|
||||
_context.simpleScheduler().addEvent(this, 60*1000);
|
||||
_context.simpleTimer2().addEvent(this, 60*1000);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -101,7 +101,7 @@ public class OutboundCache {
|
||||
|
||||
public OutboundCache(RouterContext ctx) {
|
||||
_context = ctx;
|
||||
_context.simpleScheduler().addPeriodicEvent(new OCMOSJCacheCleaner(), CLEAN_INTERVAL, CLEAN_INTERVAL);
|
||||
_context.simpleTimer2().addPeriodicEvent(new OCMOSJCacheCleaner(), CLEAN_INTERVAL, CLEAN_INTERVAL);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2,8 +2,8 @@ package net.i2p.router.networkdb.kademlia;
|
||||
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.util.ObjectCounter;
|
||||
import net.i2p.util.SimpleScheduler;
|
||||
import net.i2p.util.SimpleTimer;
|
||||
import net.i2p.util.SimpleTimer2;
|
||||
|
||||
/**
|
||||
* Count how often we have recently flooded a key
|
||||
@ -18,7 +18,7 @@ class FloodThrottler {
|
||||
|
||||
FloodThrottler() {
|
||||
this.counter = new ObjectCounter<Hash>();
|
||||
SimpleScheduler.getInstance().addPeriodicEvent(new Cleaner(), CLEAN_TIME);
|
||||
SimpleTimer2.getInstance().addPeriodicEvent(new Cleaner(), CLEAN_TIME);
|
||||
}
|
||||
|
||||
/** increments before checking */
|
||||
|
@ -3,8 +3,8 @@ package net.i2p.router.networkdb.kademlia;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.TunnelId;
|
||||
import net.i2p.util.ObjectCounter;
|
||||
import net.i2p.util.SimpleScheduler;
|
||||
import net.i2p.util.SimpleTimer;
|
||||
import net.i2p.util.SimpleTimer2;
|
||||
|
||||
/**
|
||||
* Count how often we have recently received a lookup request with
|
||||
@ -25,7 +25,7 @@ class LookupThrottler {
|
||||
|
||||
LookupThrottler() {
|
||||
this.counter = new ObjectCounter<ReplyTunnel>();
|
||||
SimpleScheduler.getInstance().addPeriodicEvent(new Cleaner(), CLEAN_TIME);
|
||||
SimpleTimer2.getInstance().addPeriodicEvent(new Cleaner(), CLEAN_TIME);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1,12 +1,13 @@
|
||||
package net.i2p.router.networkdb.kademlia;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import net.i2p.data.Destination;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.util.LHMCache;
|
||||
import net.i2p.util.ObjectCounter;
|
||||
import net.i2p.util.SimpleScheduler;
|
||||
import net.i2p.util.SimpleTimer;
|
||||
import net.i2p.util.SimpleTimer2;
|
||||
|
||||
/**
|
||||
* Track lookup fails
|
||||
@ -24,7 +25,7 @@ class NegativeLookupCache {
|
||||
public NegativeLookupCache() {
|
||||
this.counter = new ObjectCounter<Hash>();
|
||||
this.badDests = new LHMCache<Hash, Destination>(MAX_BAD_DESTS);
|
||||
SimpleScheduler.getInstance().addPeriodicEvent(new Cleaner(), CLEAN_TIME);
|
||||
SimpleTimer2.getInstance().addPeriodicEvent(new Cleaner(), CLEAN_TIME);
|
||||
}
|
||||
|
||||
public void lookupFailed(Hash h) {
|
||||
|
@ -184,7 +184,7 @@ public class ReseedChecker {
|
||||
*/
|
||||
void done() {
|
||||
_inProgress.set(false);
|
||||
_context.simpleScheduler().addEvent(new StatusCleaner(_lastStatus, _lastError), STATUS_CLEAN_TIME);
|
||||
_context.simpleTimer2().addEvent(new StatusCleaner(_lastStatus, _lastError), STATUS_CLEAN_TIME);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -278,7 +278,7 @@ public class CommSystemFacadeImpl extends CommSystemFacade {
|
||||
private static final int LOOKUP_TIME = 30*60*1000;
|
||||
|
||||
private void startGeoIP() {
|
||||
_context.simpleScheduler().addEvent(new QueueAll(), START_DELAY);
|
||||
_context.simpleTimer2().addEvent(new QueueAll(), START_DELAY);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -296,7 +296,7 @@ public class CommSystemFacadeImpl extends CommSystemFacade {
|
||||
continue;
|
||||
_geoIP.add(ip);
|
||||
}
|
||||
_context.simpleScheduler().addPeriodicEvent(new Lookup(), 5000, LOOKUP_TIME);
|
||||
_context.simpleTimer2().addPeriodicEvent(new Lookup(), 5000, LOOKUP_TIME);
|
||||
}
|
||||
}
|
||||
|
||||
@ -491,7 +491,7 @@ public class CommSystemFacadeImpl extends CommSystemFacade {
|
||||
|
||||
/** @since 0.7.12 */
|
||||
private void startTimestamper() {
|
||||
_context.simpleScheduler().addPeriodicEvent(new Timestamper(), TIME_START_DELAY, TIME_REPEAT_DELAY);
|
||||
_context.simpleTimer2().addPeriodicEvent(new Timestamper(), TIME_START_DELAY, TIME_REPEAT_DELAY);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -102,7 +102,7 @@ public abstract class TransportImpl implements Transport {
|
||||
_unreachableEntries = new HashMap<Hash, Long>(32);
|
||||
_wasUnreachableEntries = new HashMap<Hash, Long>(32);
|
||||
_localAddresses = new ConcurrentHashSet<InetAddress>(4);
|
||||
_context.simpleScheduler().addPeriodicEvent(new CleanupUnreachable(), 2 * UNREACHABLE_PERIOD, UNREACHABLE_PERIOD / 2);
|
||||
_context.simpleTimer2().addPeriodicEvent(new CleanupUnreachable(), 2 * UNREACHABLE_PERIOD, UNREACHABLE_PERIOD / 2);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -748,7 +748,7 @@ class EstablishmentManager {
|
||||
_transport.send(dsm, peer);
|
||||
|
||||
// just do this inline
|
||||
//_context.simpleScheduler().addEvent(new PublishToNewInbound(peer), 0);
|
||||
//_context.simpleTimer2().addEvent(new PublishToNewInbound(peer), 0);
|
||||
|
||||
Hash hash = peer.getRemotePeer();
|
||||
if ((hash != null) && (!_context.banlist().isBanlisted(hash)) && (!_transport.isUnreachable(hash))) {
|
||||
|
@ -1,8 +1,8 @@
|
||||
package net.i2p.router.transport.udp;
|
||||
|
||||
import net.i2p.util.ObjectCounter;
|
||||
import net.i2p.util.SimpleScheduler;
|
||||
import net.i2p.util.SimpleTimer;
|
||||
import net.i2p.util.SimpleTimer2;
|
||||
import net.i2p.util.SipHash;
|
||||
|
||||
/**
|
||||
@ -17,7 +17,7 @@ class IPThrottler {
|
||||
public IPThrottler(int max, long time) {
|
||||
_max = max;
|
||||
_counter = new ObjectCounter<Integer>();
|
||||
SimpleScheduler.getInstance().addPeriodicEvent(new Cleaner(), time);
|
||||
SimpleTimer2.getInstance().addPeriodicEvent(new Cleaner(), time);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -207,7 +207,7 @@ class PeerTestManager {
|
||||
test.incrementPacketsRelayed();
|
||||
sendTestToBob();
|
||||
|
||||
_context.simpleScheduler().addEvent(new ContinueTest(test.getNonce()), RESEND_TIMEOUT);
|
||||
_context.simpleTimer2().addEvent(new ContinueTest(test.getNonce()), RESEND_TIMEOUT);
|
||||
}
|
||||
|
||||
private class ContinueTest implements SimpleTimer.TimedEvent {
|
||||
@ -246,7 +246,7 @@ class PeerTestManager {
|
||||
sendTestToCharlie();
|
||||
}
|
||||
// retx at 4, 10, 17, 25 elapsed time
|
||||
_context.simpleScheduler().addEvent(ContinueTest.this, RESEND_TIMEOUT + (sent*1000));
|
||||
_context.simpleTimer2().addEvent(ContinueTest.this, RESEND_TIMEOUT + (sent*1000));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -702,7 +702,7 @@ class PeerTestManager {
|
||||
|
||||
if (isNew) {
|
||||
_activeTests.put(Long.valueOf(nonce), state);
|
||||
_context.simpleScheduler().addEvent(new RemoveTest(nonce), MAX_CHARLIE_LIFETIME);
|
||||
_context.simpleTimer2().addEvent(new RemoveTest(nonce), MAX_CHARLIE_LIFETIME);
|
||||
}
|
||||
|
||||
UDPPacket packet = _packetBuilder.buildPeerTestToBob(bobIP, from.getPort(), aliceIP, alicePort,
|
||||
@ -805,7 +805,7 @@ class PeerTestManager {
|
||||
|
||||
if (isNew) {
|
||||
_activeTests.put(Long.valueOf(nonce), state);
|
||||
_context.simpleScheduler().addEvent(new RemoveTest(nonce), MAX_CHARLIE_LIFETIME);
|
||||
_context.simpleTimer2().addEvent(new RemoveTest(nonce), MAX_CHARLIE_LIFETIME);
|
||||
}
|
||||
|
||||
UDPPacket packet = _packetBuilder.buildPeerTestToCharlie(aliceIP, from.getPort(), aliceIntroKey, nonce,
|
||||
|
@ -270,7 +270,7 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
|
||||
//_context.statManager().createRateStat("udp.packetAuthTime", "How long it takes to encrypt and MAC a packet for sending", "udp", RATES);
|
||||
//_context.statManager().createRateStat("udp.packetAuthTimeSlow", "How long it takes to encrypt and MAC a packet for sending (when its slow)", "udp", RATES);
|
||||
|
||||
_context.simpleScheduler().addPeriodicEvent(new PingIntroducers(), MIN_EXPIRE_TIMEOUT * 3 / 4);
|
||||
_context.simpleTimer2().addPeriodicEvent(new PingIntroducers(), MIN_EXPIRE_TIMEOUT * 3 / 4);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1235,7 +1235,7 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
|
||||
RemoteHostId remote = peer.getRemoteHostId();
|
||||
_dropList.add(remote);
|
||||
_context.statManager().addRateData("udp.dropPeerDroplist", 1);
|
||||
_context.simpleScheduler().addEvent(new RemoveDropList(remote), DROPLIST_PERIOD);
|
||||
_context.simpleTimer2().addEvent(new RemoveDropList(remote), DROPLIST_PERIOD);
|
||||
}
|
||||
markUnreachable(peerHash);
|
||||
_context.banlist().banlistRouter(peerHash, "Part of the wrong network, version = " + ((RouterInfo) entry).getVersion());
|
||||
|
@ -84,7 +84,7 @@ class TunnelGatewayPumper implements Runnable {
|
||||
// in case another packet came in
|
||||
_wantsPumping.remove(gw);
|
||||
if (_backlogged.add(gw))
|
||||
_context.simpleScheduler().addEvent(new Requeue(gw), REQUEUE_TIME);
|
||||
_context.simpleTimer2().addEvent(new Requeue(gw), REQUEUE_TIME);
|
||||
}
|
||||
gw = null;
|
||||
if (_wantsPumping.isEmpty()) {
|
||||
|
@ -41,7 +41,7 @@ class ParticipatingThrottler {
|
||||
ParticipatingThrottler(RouterContext ctx) {
|
||||
this.context = ctx;
|
||||
this.counter = new ObjectCounter<Hash>();
|
||||
ctx.simpleScheduler().addPeriodicEvent(new Cleaner(), CLEAN_TIME);
|
||||
ctx.simpleTimer2().addPeriodicEvent(new Cleaner(), CLEAN_TIME);
|
||||
}
|
||||
|
||||
/** increments before checking */
|
||||
|
@ -26,7 +26,7 @@ class RequestThrottler {
|
||||
RequestThrottler(RouterContext ctx) {
|
||||
this.context = ctx;
|
||||
this.counter = new ObjectCounter<Hash>();
|
||||
ctx.simpleScheduler().addPeriodicEvent(new Cleaner(), CLEAN_TIME);
|
||||
ctx.simpleTimer2().addPeriodicEvent(new Cleaner(), CLEAN_TIME);
|
||||
}
|
||||
|
||||
/** increments before checking */
|
||||
|
@ -430,7 +430,7 @@ public class TunnelPoolManager implements TunnelManagerFacade {
|
||||
// don't delay the outbound if it already exists, as this opens up a large
|
||||
// race window with removeTunnels() below
|
||||
if (delayOutbound)
|
||||
_context.simpleScheduler().addEvent(new DelayedStartup(outbound), 1000);
|
||||
_context.simpleTimer2().addEvent(new DelayedStartup(outbound), 1000);
|
||||
else
|
||||
outbound.startup();
|
||||
}
|
||||
@ -512,7 +512,7 @@ public class TunnelPoolManager implements TunnelManagerFacade {
|
||||
}
|
||||
|
||||
_inboundExploratory.startup();
|
||||
_context.simpleScheduler().addEvent(new DelayedStartup(_outboundExploratory), 3*1000);
|
||||
_context.simpleTimer2().addEvent(new DelayedStartup(_outboundExploratory), 3*1000);
|
||||
|
||||
// try to build up longer tunnels
|
||||
_context.jobQueue().addJob(new BootstrapPool(_context, _inboundExploratory));
|
||||
|
Reference in New Issue
Block a user