forked from I2P_Developers/i2p.i2p
* NetDB: Prep for leasesets with different expire times
- Add new I2CP RequestVariableLeaseSetMessage - Send RVLSM if client supports it; handle on client side; disabled by default for the moment. - Add LeaseSet.getLatestLeaseDate() - Check latest, not earliest, date too far in future in KNDF.validate() - Check latest date too far in past in KNDF.validate() - Only check gateway and tunnel ID for equality in OCMOSJ lease caching to reduce churn - Split up KNDF.validate(RI) for efficiency, don't need to check signature, netid, etc. before lookups, only on store - Remove enforeNetID config - Fix major bug causing newer leasesets to be treated as older, and not stored or published - Increase max adjustment time of earliest lease - TransientDataStore cleanups - RouterInfo and LeaseSet equals() speedups
This commit is contained in:
@ -18,7 +18,7 @@ public class RouterVersion {
|
||||
/** deprecated */
|
||||
public final static String ID = "Monotone";
|
||||
public final static String VERSION = CoreVersion.VERSION;
|
||||
public final static long BUILD = 6;
|
||||
public final static long BUILD = 7;
|
||||
|
||||
/** for example "-test" */
|
||||
public final static String EXTRA = "";
|
||||
|
@ -10,9 +10,12 @@ package net.i2p.router.client;
|
||||
|
||||
import java.util.Date;
|
||||
|
||||
import net.i2p.data.Lease;
|
||||
import net.i2p.data.LeaseSet;
|
||||
import net.i2p.data.i2cp.I2CPMessage;
|
||||
import net.i2p.data.i2cp.I2CPMessageException;
|
||||
import net.i2p.data.i2cp.RequestLeaseSetMessage;
|
||||
import net.i2p.data.i2cp.RequestVariableLeaseSetMessage;
|
||||
import net.i2p.router.Job;
|
||||
import net.i2p.router.JobImpl;
|
||||
import net.i2p.router.RouterContext;
|
||||
@ -31,6 +34,12 @@ class RequestLeaseSetJob extends JobImpl {
|
||||
private final ClientConnectionRunner _runner;
|
||||
private final LeaseRequestState _requestState;
|
||||
|
||||
private static final long MAX_FUDGE = 2*1000;
|
||||
|
||||
/** temp for testing */
|
||||
private static final String PROP_VARIABLE = "router.variableLeaseExpiration";
|
||||
private static final boolean DFLT_VARIABLE = false;
|
||||
|
||||
public RequestLeaseSetJob(RouterContext ctx, ClientConnectionRunner runner, LeaseRequestState state) {
|
||||
super(ctx);
|
||||
_log = ctx.logManager().getLog(RequestLeaseSetJob.class);
|
||||
@ -44,22 +53,51 @@ class RequestLeaseSetJob extends JobImpl {
|
||||
public void runJob() {
|
||||
if (_runner.isDead()) return;
|
||||
|
||||
RequestLeaseSetMessage msg = new RequestLeaseSetMessage();
|
||||
long endTime = _requestState.getRequested().getEarliestLeaseDate();
|
||||
// Add a small number of ms (0-300) that increases as we approach the expire time.
|
||||
LeaseSet requested = _requestState.getRequested();
|
||||
long endTime = requested.getEarliestLeaseDate();
|
||||
// Add a small number of ms (0 to MAX_FUDGE) that increases as we approach the expire time.
|
||||
// Since the earliest date functions as a version number,
|
||||
// this will force the floodfill to flood each new version;
|
||||
// otherwise it won't if the earliest time hasn't changed.
|
||||
long fudge = 300 - ((endTime - getContext().clock().now()) / 2000);
|
||||
long fudge = MAX_FUDGE - ((endTime - getContext().clock().now()) / (10*60*1000 / MAX_FUDGE));
|
||||
//if (_log.shouldLog(Log.DEBUG))
|
||||
// _log.debug("Adding fudge " + fudge);
|
||||
endTime += fudge;
|
||||
Date end = new Date(endTime);
|
||||
|
||||
msg.setEndDate(end);
|
||||
msg.setSessionId(_runner.getSessionId());
|
||||
|
||||
for (int i = 0; i < _requestState.getRequested().getLeaseCount(); i++) {
|
||||
msg.addEndpoint(_requestState.getRequested().getLease(i).getGateway(),
|
||||
_requestState.getRequested().getLease(i).getTunnelId());
|
||||
I2CPMessage msg;
|
||||
if (getContext().getProperty(PROP_VARIABLE, DFLT_VARIABLE) &&
|
||||
(_runner instanceof QueuedClientConnectionRunner ||
|
||||
RequestVariableLeaseSetMessage.isSupported(_runner.getClientVersion()))) {
|
||||
// new style - leases will have individual expirations
|
||||
RequestVariableLeaseSetMessage rmsg = new RequestVariableLeaseSetMessage();
|
||||
rmsg.setSessionId(_runner.getSessionId());
|
||||
for (int i = 0; i < requested.getLeaseCount(); i++) {
|
||||
Lease lease = requested.getLease(i);
|
||||
if (lease.getEndDate().getTime() < endTime) {
|
||||
// don't modify old object, we don't know where it came from
|
||||
Lease nl = new Lease();
|
||||
nl.setGateway(lease.getGateway());
|
||||
nl.setTunnelId(lease.getTunnelId());
|
||||
nl.setEndDate(new Date(endTime));
|
||||
lease = nl;
|
||||
//if (_log.shouldLog(Log.INFO))
|
||||
// _log.info("Adjusted end date to " + endTime + " for " + lease);
|
||||
}
|
||||
rmsg.addEndpoint(lease);
|
||||
}
|
||||
msg = rmsg;
|
||||
} else {
|
||||
// old style - all leases will have same expiration
|
||||
RequestLeaseSetMessage rmsg = new RequestLeaseSetMessage();
|
||||
Date end = new Date(endTime);
|
||||
rmsg.setEndDate(end);
|
||||
rmsg.setSessionId(_runner.getSessionId());
|
||||
for (int i = 0; i < requested.getLeaseCount(); i++) {
|
||||
Lease lease = requested.getLease(i);
|
||||
rmsg.addEndpoint(lease.getGateway(),
|
||||
lease.getTunnelId());
|
||||
}
|
||||
msg = rmsg;
|
||||
}
|
||||
|
||||
try {
|
||||
|
@ -304,7 +304,11 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
// it (due to failure for example) we won't continue to use it.
|
||||
for (int i = 0; i < _leaseSet.getLeaseCount(); i++) {
|
||||
Lease lease = _leaseSet.getLease(i);
|
||||
if (_lease.equals(lease)) {
|
||||
// Don't use Lease.equals(), as that compares expiration time too,
|
||||
// and that time may change in subsequent publication
|
||||
//if (_lease.equals(lease)) {
|
||||
if (_lease.getTunnelId().equals(lease.getTunnelId()) &&
|
||||
_lease.getGateway().equals(lease.getGateway())) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info(getJobId() + ": Found in cache - lease for " + _toString);
|
||||
return true;
|
||||
|
@ -96,10 +96,6 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
/** don't probe or broadcast data, just respond and search when explicitly needed */
|
||||
private static final boolean QUIET = false;
|
||||
|
||||
public static final String PROP_ENFORCE_NETID = "router.networkDatabase.enforceNetId";
|
||||
private static final boolean DEFAULT_ENFORCE_NETID = false;
|
||||
private boolean _enforceNetId = DEFAULT_ENFORCE_NETID;
|
||||
|
||||
public final static String PROP_DB_DIR = "router.networkDatabase.dbDir";
|
||||
public final static String DEFAULT_DB_DIR = "netDb";
|
||||
|
||||
@ -143,7 +139,6 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
_peerSelector = createPeerSelector();
|
||||
_publishingLeaseSets = new HashMap(8);
|
||||
_activeRequests = new HashMap(8);
|
||||
_enforceNetId = DEFAULT_ENFORCE_NETID;
|
||||
_reseedChecker = new ReseedChecker(context);
|
||||
context.statManager().createRateStat("netDb.lookupDeferred", "how many lookups are deferred?", "NetworkDatabase", new long[] { 60*60*1000 });
|
||||
context.statManager().createRateStat("netDb.exploreKeySet", "how many keys are queued for exploration?", "NetworkDatabase", new long[] { 60*60*1000 });
|
||||
@ -223,11 +218,6 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
_log.info("No DB dir specified [" + PROP_DB_DIR + "], using [" + DEFAULT_DB_DIR + "]");
|
||||
_dbDir = DEFAULT_DB_DIR;
|
||||
}
|
||||
String enforce = _context.getProperty(PROP_ENFORCE_NETID);
|
||||
if (enforce != null)
|
||||
_enforceNetId = Boolean.parseBoolean(enforce);
|
||||
else
|
||||
_enforceNetId = DEFAULT_ENFORCE_NETID;
|
||||
_ds.restart();
|
||||
_exploreKeys.clear();
|
||||
|
||||
@ -249,12 +239,6 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
_log.info("Starting up the kademlia network database");
|
||||
RouterInfo ri = _context.router().getRouterInfo();
|
||||
String dbDir = _context.getProperty(PROP_DB_DIR, DEFAULT_DB_DIR);
|
||||
String enforce = _context.getProperty(PROP_ENFORCE_NETID);
|
||||
if (enforce != null)
|
||||
_enforceNetId = Boolean.parseBoolean(enforce);
|
||||
else
|
||||
_enforceNetId = DEFAULT_ENFORCE_NETID;
|
||||
|
||||
_kb = new KBucketSet(_context, ri.getIdentity().getHash());
|
||||
try {
|
||||
_ds = new PersistentDataStore(_context, dbDir, this);
|
||||
@ -443,7 +427,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
fail(key);
|
||||
} else if (rv.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO) {
|
||||
try {
|
||||
if (validate(key, (RouterInfo)rv) == null)
|
||||
if (validate((RouterInfo)rv) == null)
|
||||
return rv;
|
||||
} catch (IllegalArgumentException iae) {}
|
||||
fail(key);
|
||||
@ -512,7 +496,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
// startup allows some lax rules).
|
||||
boolean valid = true;
|
||||
try {
|
||||
valid = (null == validate(key, (RouterInfo)ds));
|
||||
valid = (null == validate((RouterInfo)ds));
|
||||
} catch (IllegalArgumentException iae) {
|
||||
valid = false;
|
||||
}
|
||||
@ -531,6 +515,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
}
|
||||
|
||||
private static final long PUBLISH_DELAY = 3*1000;
|
||||
|
||||
public void publish(LeaseSet localLeaseSet) {
|
||||
if (!_initialized) return;
|
||||
Hash h = localLeaseSet.getDestination().calculateHash();
|
||||
@ -564,6 +549,8 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
// remove first since queue is a TreeSet now...
|
||||
_context.jobQueue().removeJob(j);
|
||||
j.getTiming().setStartAfter(nextTime);
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Queueing to publish at " + (new Date(nextTime)) + ' ' + localLeaseSet);
|
||||
_context.jobQueue().addJob(j);
|
||||
}
|
||||
|
||||
@ -627,34 +614,46 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
* Determine whether this leaseSet will be accepted as valid and current
|
||||
* given what we know now.
|
||||
*
|
||||
* TODO this is called several times, only check the key and signature once
|
||||
* Unlike for RouterInfos, this is only called once, when stored.
|
||||
* After that, LeaseSet.isCurrent() is used.
|
||||
*
|
||||
* @return reason why the entry is not valid, or null if it is valid
|
||||
*/
|
||||
String validate(Hash key, LeaseSet leaseSet) {
|
||||
private String validate(Hash key, LeaseSet leaseSet) {
|
||||
if (!key.equals(leaseSet.getDestination().calculateHash())) {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Invalid store attempt! key does not match leaseSet.destination! key = "
|
||||
+ key + ", leaseSet = " + leaseSet);
|
||||
return "Key does not match leaseSet.destination - " + key.toBase64();
|
||||
} else if (!leaseSet.verifySignature()) {
|
||||
}
|
||||
if (!leaseSet.verifySignature()) {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Invalid leaseSet signature! leaseSet = " + leaseSet);
|
||||
return "Invalid leaseSet signature on " + leaseSet.getDestination().calculateHash().toBase64();
|
||||
} else if (leaseSet.getEarliestLeaseDate() <= _context.clock().now() - 2*Router.CLOCK_FUDGE_FACTOR) {
|
||||
long age = _context.clock().now() - leaseSet.getEarliestLeaseDate();
|
||||
}
|
||||
long earliest = leaseSet.getEarliestLeaseDate();
|
||||
long latest = leaseSet.getLatestLeaseDate();
|
||||
long now = _context.clock().now();
|
||||
if (earliest <= now - 2*Router.CLOCK_FUDGE_FACTOR ||
|
||||
// same as the isCurrent(Router.CLOCK_FUDGE_FACTOR) test in
|
||||
// lookupLeaseSetLocally()
|
||||
latest <= now - Router.CLOCK_FUDGE_FACTOR) {
|
||||
long age = now - earliest;
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Old leaseSet! not storing it: "
|
||||
+ leaseSet.getDestination().calculateHash().toBase64()
|
||||
+ " expires on " + new Date(leaseSet.getEarliestLeaseDate()), new Exception("Rejecting store"));
|
||||
+ leaseSet.getDestination().calculateHash()
|
||||
+ " first exp. " + new Date(earliest)
|
||||
+ " last exp. " + new Date(latest),
|
||||
new Exception("Rejecting store"));
|
||||
return "Expired leaseSet for " + leaseSet.getDestination().calculateHash().toBase64()
|
||||
+ " expired " + DataHelper.formatDuration(age) + " ago";
|
||||
} else if (leaseSet.getEarliestLeaseDate() > _context.clock().now() + (Router.CLOCK_FUDGE_FACTOR + MAX_LEASE_FUTURE)) {
|
||||
long age = leaseSet.getEarliestLeaseDate() - _context.clock().now();
|
||||
}
|
||||
if (latest > now + (Router.CLOCK_FUDGE_FACTOR + MAX_LEASE_FUTURE)) {
|
||||
long age = latest - now;
|
||||
// let's not make this an error, it happens when peers have bad clocks
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("LeaseSet expires too far in the future: "
|
||||
+ leaseSet.getDestination().calculateHash().toBase64()
|
||||
+ leaseSet.getDestination().calculateHash()
|
||||
+ " expires " + DataHelper.formatDuration(age) + " from now");
|
||||
return "Future expiring leaseSet for " + leaseSet.getDestination().calculateHash()
|
||||
+ " expiring in " + DataHelper.formatDuration(age);
|
||||
@ -720,11 +719,40 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
* Determine whether this routerInfo will be accepted as valid and current
|
||||
* given what we know now.
|
||||
*
|
||||
* TODO this is called several times, only check the key and signature once
|
||||
* Call this only on first store, to check the key and signature once
|
||||
*
|
||||
* @return reason why the entry is not valid, or null if it is valid
|
||||
*/
|
||||
String validate(Hash key, RouterInfo routerInfo) throws IllegalArgumentException {
|
||||
private String validate(Hash key, RouterInfo routerInfo) throws IllegalArgumentException {
|
||||
if (!key.equals(routerInfo.getIdentity().getHash())) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Invalid store attempt! key does not match routerInfo.identity! key = " + key + ", router = " + routerInfo);
|
||||
return "Key does not match routerInfo.identity";
|
||||
}
|
||||
if (!routerInfo.isValid()) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Invalid routerInfo signature! forged router structure! router = " + routerInfo);
|
||||
return "Invalid routerInfo signature";
|
||||
}
|
||||
if (routerInfo.getNetworkId() != Router.NETWORK_ID){
|
||||
_context.banlist().banlistRouter(key, "Not in our network");
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Bad network: " + routerInfo);
|
||||
return "Not in our network";
|
||||
}
|
||||
return validate(routerInfo);
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine whether this routerInfo will be accepted as valid and current
|
||||
* given what we know now.
|
||||
*
|
||||
* Call this before each use, to check expiration
|
||||
*
|
||||
* @return reason why the entry is not valid, or null if it is valid
|
||||
* @since 0.9.7
|
||||
*/
|
||||
private String validate(RouterInfo routerInfo) throws IllegalArgumentException {
|
||||
long now = _context.clock().now();
|
||||
boolean upLongEnough = _context.router().getUptime() > 60*60*1000;
|
||||
// Once we're over MIN_ROUTERS routers, reduce the expiration time down from the default,
|
||||
@ -743,59 +771,44 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
ROUTER_INFO_EXPIRATION_MIN +
|
||||
((ROUTER_INFO_EXPIRATION - ROUTER_INFO_EXPIRATION_MIN) * MIN_ROUTERS / (_kb.size() + 1)));
|
||||
|
||||
if (!key.equals(routerInfo.getIdentity().getHash())) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Invalid store attempt! key does not match routerInfo.identity! key = " + key + ", router = " + routerInfo);
|
||||
return "Key does not match routerInfo.identity - " + key.toBase64();
|
||||
} else if (!routerInfo.isValid()) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Invalid routerInfo signature! forged router structure! router = " + routerInfo);
|
||||
return "Invalid routerInfo signature on " + key.toBase64();
|
||||
} else if (upLongEnough && !routerInfo.isCurrent(adjustedExpiration)) {
|
||||
if (routerInfo.getNetworkId() != Router.NETWORK_ID) {
|
||||
_context.banlist().banlistRouter(key, "Peer is not in our network");
|
||||
return "Peer is not in our network (" + routerInfo.getNetworkId() + ", wants "
|
||||
+ Router.NETWORK_ID + "): " + routerInfo.calculateHash().toBase64();
|
||||
}
|
||||
if (upLongEnough && !routerInfo.isCurrent(adjustedExpiration)) {
|
||||
long age = _context.clock().now() - routerInfo.getPublished();
|
||||
int existing = _kb.size();
|
||||
if (existing >= MIN_REMAINING_ROUTERS) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Not storing expired router for " + key.toBase64(), new Exception("Rejecting store"));
|
||||
return "Peer " + key.toBase64() + " expired " + DataHelper.formatDuration(age) + " ago";
|
||||
_log.info("Not storing expired RI " + routerInfo.getIdentity().getHash(), new Exception("Rejecting store"));
|
||||
return "Peer expired " + DataHelper.formatDuration(age) + " ago";
|
||||
} else {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Even though the peer is old, we have only " + existing
|
||||
+ " peers left (curPeer: " + key.toBase64() + " published on "
|
||||
+ new Date(routerInfo.getPublished()));
|
||||
+ " peers left " + routerInfo);
|
||||
}
|
||||
} else if (routerInfo.getPublished() > now + 2*Router.CLOCK_FUDGE_FACTOR) {
|
||||
}
|
||||
if (routerInfo.getPublished() > now + 2*Router.CLOCK_FUDGE_FACTOR) {
|
||||
long age = routerInfo.getPublished() - _context.clock().now();
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Peer " + key.toBase64() + " published their routerInfo in the future?! ["
|
||||
_log.info("Peer " + routerInfo.getIdentity().getHash() + " published their routerInfo in the future?! ["
|
||||
+ new Date(routerInfo.getPublished()) + "]", new Exception("Rejecting store"));
|
||||
return "Peer " + key.toBase64() + " published " + DataHelper.formatDuration(age) + " in the future?!";
|
||||
} else if (_enforceNetId && (routerInfo.getNetworkId() != Router.NETWORK_ID) ){
|
||||
String rv = "Peer " + key.toBase64() + " is from another network, not accepting it (id="
|
||||
+ routerInfo.getNetworkId() + ", want " + Router.NETWORK_ID + ")";
|
||||
return rv;
|
||||
} else if (upLongEnough && (routerInfo.getPublished() < now - 2*24*60*60*1000l) ) {
|
||||
return "Peer published " + DataHelper.formatDuration(age) + " in the future?!";
|
||||
}
|
||||
if (upLongEnough && (routerInfo.getPublished() < now - 2*24*60*60*1000l) ) {
|
||||
long age = _context.clock().now() - routerInfo.getPublished();
|
||||
return "Peer " + key.toBase64() + " published " + DataHelper.formatDuration(age) + " ago";
|
||||
} else if (upLongEnough && !routerInfo.isCurrent(ROUTER_INFO_EXPIRATION_SHORT)) {
|
||||
return "Peer published " + DataHelper.formatDuration(age) + " ago";
|
||||
}
|
||||
if (upLongEnough && !routerInfo.isCurrent(ROUTER_INFO_EXPIRATION_SHORT)) {
|
||||
if (routerInfo.getAddresses().isEmpty())
|
||||
return "Peer " + key.toBase64() + " published > 75m ago with no addresses";
|
||||
return "Peer published > 75m ago with no addresses";
|
||||
// This should cover the introducers case below too
|
||||
// And even better, catches the case where the router is unreachable but knows no introducers
|
||||
if (routerInfo.getCapabilities().indexOf(Router.CAPABILITY_UNREACHABLE) >= 0)
|
||||
return "Peer " + key.toBase64() + " published > 75m ago and thinks it is unreachable";
|
||||
return "Peer published > 75m ago and thinks it is unreachable";
|
||||
RouterAddress ra = routerInfo.getTargetAddress("SSU");
|
||||
if (ra != null) {
|
||||
// Introducers change often, introducee will ping introducer for 2 hours
|
||||
if (ra.getOption("ihost0") != null)
|
||||
return "Peer " + key.toBase64() + " published > 75m ago with SSU Introducers";
|
||||
return "Peer published > 75m ago with SSU Introducers";
|
||||
if (routerInfo.getTargetAddress("NTCP") == null)
|
||||
return "Peer " + key.toBase64() + " published > 75m ago, SSU only without introducers";
|
||||
return "Peer published > 75m ago, SSU only without introducers";
|
||||
}
|
||||
}
|
||||
return null;
|
||||
|
@ -47,12 +47,12 @@ public class RepublishLeaseSetJob extends JobImpl {
|
||||
if (getContext().clientManager().isLocal(_dest)) {
|
||||
LeaseSet ls = _facade.lookupLeaseSetLocally(_dest);
|
||||
if (ls != null) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Client " + _dest + " is local, so we're republishing it");
|
||||
if (!ls.isCurrent(Router.CLOCK_FUDGE_FACTOR)) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Not publishing a LOCAL lease that isn't current - " + _dest, new Exception("Publish expired LOCAL lease?"));
|
||||
} else {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Publishing " + ls);
|
||||
getContext().statManager().addRateData("netDb.republishLeaseSetCount", 1, 0);
|
||||
_facade.sendStore(_dest, ls, new OnRepublishSuccess(getContext()), new OnRepublishFailure(getContext(), this), REPUBLISH_LEASESET_TIMEOUT, null);
|
||||
_lastPublished = getContext().clock().now();
|
||||
|
@ -81,9 +81,11 @@ class TransientDataStore implements DataStore {
|
||||
return Collections.unmodifiableSet(_data.entrySet());
|
||||
}
|
||||
|
||||
/** for PersistentDataStore only - don't use here @throws IAE always */
|
||||
/** for PersistentDataStore only - don't use here
|
||||
* @throws UnsupportedOperationException always
|
||||
*/
|
||||
public DatabaseEntry get(Hash key, boolean persist) {
|
||||
throw new IllegalArgumentException("no");
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
public DatabaseEntry get(Hash key) {
|
||||
@ -103,9 +105,11 @@ class TransientDataStore implements DataStore {
|
||||
return count;
|
||||
}
|
||||
|
||||
/** for PersistentDataStore only - don't use here @throws IAE always */
|
||||
/** for PersistentDataStore only - don't use here
|
||||
* @throws UnsupportedOperationException always
|
||||
*/
|
||||
public boolean put(Hash key, DatabaseEntry data, boolean persist) {
|
||||
throw new IllegalArgumentException("no");
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
/**
|
||||
@ -116,8 +120,7 @@ class TransientDataStore implements DataStore {
|
||||
if (data == null) return false;
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Storing key " + key);
|
||||
DatabaseEntry old = null;
|
||||
old = _data.putIfAbsent(key, data);
|
||||
DatabaseEntry old = _data.putIfAbsent(key, data);
|
||||
boolean rv = false;
|
||||
if (data.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO) {
|
||||
// Don't do this here so we don't reset it at router startup;
|
||||
@ -128,13 +131,15 @@ class TransientDataStore implements DataStore {
|
||||
RouterInfo ori = (RouterInfo)old;
|
||||
if (ri.getPublished() < ori.getPublished()) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Almost clobbered an old router! " + key + ": [old published on " + new Date(ori.getPublished()) + " new on " + new Date(ri.getPublished()) + "]");
|
||||
_log.info("Almost clobbered an old router! " + key + ": [old published on " + new Date(ori.getPublished()) +
|
||||
" new on " + new Date(ri.getPublished()) + ']');
|
||||
} else if (ri.getPublished() == ori.getPublished()) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Duplicate " + key);
|
||||
} else {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Updated the old router for " + key + ": [old published on " + new Date(ori.getPublished()) + " new on " + new Date(ri.getPublished()) + "]");
|
||||
_log.info("Updated the old router for " + key + ": [old published on " + new Date(ori.getPublished()) +
|
||||
" new on " + new Date(ri.getPublished()) + ']');
|
||||
_data.put(key, data);
|
||||
rv = true;
|
||||
}
|
||||
@ -149,13 +154,15 @@ class TransientDataStore implements DataStore {
|
||||
LeaseSet ols = (LeaseSet)old;
|
||||
if (ls.getEarliestLeaseDate() < ols.getEarliestLeaseDate()) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Almost clobbered an old leaseSet! " + key + ": [old published on " + new Date(ols.getEarliestLeaseDate()) + " new on " + new Date(ls.getEarliestLeaseDate()) + "]");
|
||||
_log.info("Almost clobbered an old leaseSet! " + key + ": [old expires " + new Date(ols.getEarliestLeaseDate()) +
|
||||
" new on " + new Date(ls.getEarliestLeaseDate()) + ']');
|
||||
} else if (ls.getEarliestLeaseDate() == ols.getEarliestLeaseDate()) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Duplicate " + key);
|
||||
} else {
|
||||
if (_log.shouldLog(Log.INFO)) {
|
||||
_log.info("Updated old leaseSet " + key + ": [old published on " + new Date(ols.getEarliestLeaseDate()) + " new on " + new Date(ls.getEarliestLeaseDate()) + "]");
|
||||
_log.info("Updated old leaseSet " + key + ": [old expires " + new Date(ols.getEarliestLeaseDate()) +
|
||||
" new on " + new Date(ls.getEarliestLeaseDate()) + ']');
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("RAP? " + ls.getReceivedAsPublished() + " RAR? " + ls.getReceivedAsReply());
|
||||
}
|
||||
@ -164,7 +171,7 @@ class TransientDataStore implements DataStore {
|
||||
}
|
||||
} else {
|
||||
if (_log.shouldLog(Log.INFO)) {
|
||||
_log.info("New leaseset for " + key + ": published on " + new Date(ls.getEarliestLeaseDate()));
|
||||
_log.info("New leaseset for " + key + ": expires " + new Date(ls.getEarliestLeaseDate()));
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("RAP? " + ls.getReceivedAsPublished() + " RAR? " + ls.getReceivedAsReply());
|
||||
}
|
||||
@ -187,9 +194,11 @@ class TransientDataStore implements DataStore {
|
||||
return buf.toString();
|
||||
}
|
||||
|
||||
/** for PersistentDataStore only - don't use here */
|
||||
/** for PersistentDataStore only - don't use here
|
||||
* @throws UnsupportedOperationException always
|
||||
*/
|
||||
public DatabaseEntry remove(Hash key, boolean persist) {
|
||||
throw new IllegalArgumentException("no");
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
public DatabaseEntry remove(Hash key) {
|
||||
|
@ -16,6 +16,9 @@ class ExpireJob extends JobImpl {
|
||||
private boolean _leaseUpdated;
|
||||
private final long _dropAfter;
|
||||
|
||||
private static final long OB_EARLY_EXPIRE = 30*1000;
|
||||
private static final long IB_EARLY_EXPIRE = OB_EARLY_EXPIRE + 7500;
|
||||
|
||||
public ExpireJob(RouterContext ctx, TunnelCreatorConfig cfg, TunnelPool pool) {
|
||||
super(ctx);
|
||||
_pool = pool;
|
||||
@ -27,9 +30,11 @@ class ExpireJob extends JobImpl {
|
||||
// Also skew the inbound away from the outbound
|
||||
long expire = cfg.getExpiration();
|
||||
_dropAfter = expire + Router.CLOCK_FUDGE_FACTOR;
|
||||
expire -= ctx.random().nextLong(60*1000);
|
||||
if (_pool.getSettings().isInbound())
|
||||
expire -= ctx.random().nextLong(15*1000);
|
||||
expire -= IB_EARLY_EXPIRE + ctx.random().nextLong(IB_EARLY_EXPIRE);
|
||||
else
|
||||
expire -= OB_EARLY_EXPIRE + ctx.random().nextLong(OB_EARLY_EXPIRE);
|
||||
// See comments in TunnelPool.locked_buildNewLeaseSet
|
||||
cfg.setExpiration(expire);
|
||||
getTiming().setStartAfter(expire);
|
||||
}
|
||||
@ -42,6 +47,7 @@ class ExpireJob extends JobImpl {
|
||||
if (!_leaseUpdated) {
|
||||
_pool.removeTunnel(_cfg);
|
||||
_leaseUpdated = true;
|
||||
// noop for outbound
|
||||
_pool.refreshLeaseSet();
|
||||
long timeToDrop = _dropAfter - getContext().clock().now();
|
||||
requeue(timeToDrop);
|
||||
|
@ -18,6 +18,7 @@ import net.i2p.router.RouterContext;
|
||||
import net.i2p.router.TunnelInfo;
|
||||
import net.i2p.router.TunnelPoolSettings;
|
||||
import net.i2p.router.tunnel.HopConfig;
|
||||
import net.i2p.router.tunnel.TunnelCreatorConfig;
|
||||
import net.i2p.stat.Rate;
|
||||
import net.i2p.stat.RateAverages;
|
||||
import net.i2p.stat.RateStat;
|
||||
@ -725,7 +726,13 @@ public class TunnelPool {
|
||||
continue;
|
||||
}
|
||||
Lease lease = new Lease();
|
||||
lease.setEndDate(new Date(tunnel.getExpiration()));
|
||||
// bugfix
|
||||
// ExpireJob reduces the expiration, which causes a 2nd leaseset with the same lease
|
||||
// to have an earlier expiration, so it isn't stored.
|
||||
// Get the "real" expiration from the gateway hop config,
|
||||
// HopConfig expirations are the same as the "real" expiration and don't change
|
||||
// see configureNewTunnel()
|
||||
lease.setEndDate(new Date(((TunnelCreatorConfig)tunnel).getConfig(0).getExpiration()));
|
||||
lease.setTunnelId(inId);
|
||||
lease.setGateway(gw);
|
||||
leases.add(lease);
|
||||
@ -1131,6 +1138,7 @@ public class TunnelPool {
|
||||
// tunnelIds will be updated during building, and as the creator, we
|
||||
// don't need to worry about prev/next hop
|
||||
}
|
||||
// note that this will be adjusted by expire job
|
||||
cfg.setExpiration(expiration);
|
||||
if (!settings.isInbound())
|
||||
cfg.setPriority(settings.getPriority());
|
||||
|
Reference in New Issue
Block a user