diff --git a/core/java/src/net/i2p/data/LeaseSet2.java b/core/java/src/net/i2p/data/LeaseSet2.java index ced13d8d6f..208c76496b 100644 --- a/core/java/src/net/i2p/data/LeaseSet2.java +++ b/core/java/src/net/i2p/data/LeaseSet2.java @@ -52,6 +52,17 @@ public class LeaseSet2 extends LeaseSet { _checked = true; } + /** + * Published timestamp, as received. + * Different than getDate(), which is the earliest lease expiration. + * + * @return in ms, with 1 second resolution + * @since 0.9.39 + */ + public long getPublished() { + return _published; + } + public boolean isUnpublished() { return (_flags & FLAG_UNPUBLISHED) != 0; } @@ -384,8 +395,10 @@ public class LeaseSet2 extends LeaseSet { protected void writeHeader(OutputStream out) throws DataFormatException, IOException { _destination.writeBytes(out); - if (_published <= 0) - _published = Clock.getInstance().now(); + if (_published <= 0) { + // we round it here, so comparisons during verifies aren't wrong + _published = ((Clock.getInstance().now() + 500) / 1000) * 1000; + } long pub1k = _published / 1000; DataHelper.writeLong(out, 4, pub1k); // Divide separately to prevent rounding errors @@ -575,8 +588,9 @@ public class LeaseSet2 extends LeaseSet { buf.append("\n\tPublished: ").append(new java.util.Date(_published)); buf.append("\n\tExpires: ").append(new java.util.Date(_expires)); buf.append("\n\tLeases: #").append(getLeaseCount()); - for (int i = 0; i < getLeaseCount(); i++) + for (int i = 0; i < getLeaseCount(); i++) { buf.append("\n\t\t").append(getLease(i)); + } buf.append("]"); return buf.toString(); } diff --git a/history.txt b/history.txt index 0a086c1618..b48bc095c0 100644 --- a/history.txt +++ b/history.txt @@ -1,4 +1,26 @@ +2019-02-23 zzz + * NetDB: + - Use published date, not earliest lease expiration, for LS2 comparisons + - Fix earliest LS expiration adjustment when publishing for LS2 + - Increase flood candidates for LS2 + - Don't start new store after verify fail if we've already done so + - Version checks for encrypted LS2 + +2019-02-21 zzz + * Crypto: Keygen for RedDSA, allow RedDSA for unblinded keys (Enc LS2) + * Data: Always set unpublished flag for inner LS (Enc LS2) + * I2CP: Force i2cp.leaseSetType option for offline keys + +2019-02-20 zzz + * Crypto: ChaCha20 and RedDSA for Encrypted LS2 (proposal #123) + * Data: Encrypt/decrypt/sign/verify for Encrypted LS2 (proposal #123) + +2019-02-19 zzz + * Crypto: Implement blinding, add sig type 11 (proposal 123) + 2019-02-18 zzz + * Console: Drop midnight and classic themes (ticket #2272) + * Tomcat 8.5.38 * Transport: - Fixes for NTCP when SSU disabled (ticket #1417) - Delay port forwarding until after UPnP rescan complete diff --git a/router/java/src/net/i2p/router/RouterVersion.java b/router/java/src/net/i2p/router/RouterVersion.java index a6204817e3..f2522cd4ed 100644 --- a/router/java/src/net/i2p/router/RouterVersion.java +++ b/router/java/src/net/i2p/router/RouterVersion.java @@ -18,7 +18,7 @@ public class RouterVersion { /** deprecated */ public final static String ID = "Monotone"; public final static String VERSION = CoreVersion.VERSION; - public final static long BUILD = 8; + public final static long BUILD = 9; /** for example "-test" */ public final static String EXTRA = ""; diff --git a/router/java/src/net/i2p/router/client/ClientConnectionRunner.java b/router/java/src/net/i2p/router/client/ClientConnectionRunner.java index c316fb8a40..a0c9f49fe5 100644 --- a/router/java/src/net/i2p/router/client/ClientConnectionRunner.java +++ b/router/java/src/net/i2p/router/client/ClientConnectionRunner.java @@ -607,7 +607,7 @@ class ClientConnectionRunner { } else { state.setIsSuccessful(true); if (_log.shouldLog(Log.DEBUG)) - _log.debug("LeaseSet created fully: " + state + " / " + ls); + _log.debug("LeaseSet created fully: " + state + '\n' + ls); sp.leaseRequest = null; _consecutiveLeaseRequestFails = 0; } @@ -813,7 +813,7 @@ class ClientConnectionRunner { // so the comparison will always work. int leases = set.getLeaseCount(); // synch so _currentLeaseSet isn't changed out from under us - LeaseSet current = null; + LeaseSet current; Destination dest = sp.dest; LeaseRequestState state; synchronized (this) { @@ -875,7 +875,8 @@ class ClientConnectionRunner { } else { // so the timer won't fire off with an older LS request sp.rerequestTimer = null; - sp.leaseRequest = state = new LeaseRequestState(onCreateJob, onFailedJob, + long earliest = (current != null) ? current.getEarliestLeaseDate() : 0; + sp.leaseRequest = state = new LeaseRequestState(onCreateJob, onFailedJob, earliest, _context.clock().now() + expirationTime, set); if (_log.shouldLog(Log.DEBUG)) _log.debug("New request: " + state); diff --git a/router/java/src/net/i2p/router/client/LeaseRequestState.java b/router/java/src/net/i2p/router/client/LeaseRequestState.java index a6c597edc8..0cbcf7cbf2 100644 --- a/router/java/src/net/i2p/router/client/LeaseRequestState.java +++ b/router/java/src/net/i2p/router/client/LeaseRequestState.java @@ -25,17 +25,22 @@ class LeaseRequestState { private final Job _onGranted; private final Job _onFailed; private final long _expiration; + private final long _currentEarliestLeastDate; private boolean _successful; /** + * @param currentEarliestLeastDate absolute time, the earliest expiration in + * the current LS (NOT the requested one), or 0 if none * @param expiration absolute time, when the request expires (not when the LS expires) * @param requested LeaseSet with requested leases - this object must be updated to contain the * signed version (as well as any changed/added/removed Leases) * The LeaseSet contains Leases and destination only, it is unsigned. */ - public LeaseRequestState(Job onGranted, Job onFailed, long expiration, LeaseSet requested) { + public LeaseRequestState(Job onGranted, Job onFailed, long currentEarliestLeastDate, + long expiration, LeaseSet requested) { _onGranted = onGranted; _onFailed = onFailed; + _currentEarliestLeastDate = currentEarliestLeastDate; _expiration = expiration; _requestedLeaseSet = requested; } @@ -69,6 +74,14 @@ class LeaseRequestState { /** when the request for the lease set expires */ public long getExpiration() { return _expiration; } + /** + * The earliest lease expiration time in the current LS (NOT the requested one), + * or 0 if none. + * + * @since 0.9.39 + */ + public long getCurrentEarliestLeaseDate() { return _currentEarliestLeastDate; } + /** whether the request was successful in the time allotted */ public boolean getIsSuccessful() { return _successful; } public void setIsSuccessful(boolean is) { _successful = is; } diff --git a/router/java/src/net/i2p/router/client/RequestLeaseSetJob.java b/router/java/src/net/i2p/router/client/RequestLeaseSetJob.java index dbf225539a..2667ab1174 100644 --- a/router/java/src/net/i2p/router/client/RequestLeaseSetJob.java +++ b/router/java/src/net/i2p/router/client/RequestLeaseSetJob.java @@ -9,6 +9,7 @@ package net.i2p.router.client; */ import java.util.Date; +import java.util.Properties; import net.i2p.data.Lease; import net.i2p.data.LeaseSet; @@ -16,6 +17,7 @@ import net.i2p.data.i2cp.I2CPMessage; import net.i2p.data.i2cp.I2CPMessageException; import net.i2p.data.i2cp.RequestLeaseSetMessage; import net.i2p.data.i2cp.RequestVariableLeaseSetMessage; +import net.i2p.data.i2cp.SessionConfig; import net.i2p.data.i2cp.SessionId; import net.i2p.router.JobImpl; import net.i2p.router.RouterContext; @@ -53,16 +55,38 @@ class RequestLeaseSetJob extends JobImpl { public void runJob() { if (_runner.isDead()) return; + boolean isLS2 = false; + SessionConfig cfg = _runner.getPrimaryConfig(); + if (cfg != null) { + Properties props = cfg.getOptions(); + if (props != null) { + String lsType = props.getProperty("i2cp.leaseSetType"); + if (lsType != null && !lsType.equals("1")) + isLS2 = true; + } + } + LeaseSet requested = _requestState.getRequested(); long endTime = requested.getEarliestLeaseDate(); // Add a small number of ms (0 to MAX_FUDGE) that increases as we approach the expire time. // Since the earliest date functions as a version number, // this will force the floodfill to flood each new version; // otherwise it won't if the earliest time hasn't changed. - long fudge = MAX_FUDGE - ((endTime - getContext().clock().now()) / (10*60*1000 / MAX_FUDGE)); + + if (isLS2) { + // fix for 0.9.38 floodfills, + // adding some ms doesn't work since the dates are truncated, + // and 0.9.38 did not use LeaseSet2.getPublished() + long earliest = 1000 + _requestState.getCurrentEarliestLeaseDate(); + if (endTime < earliest) + endTime = earliest; + } else { + long diff = endTime - getContext().clock().now(); + long fudge = MAX_FUDGE - (diff / (10*60*1000 / MAX_FUDGE)); + endTime += fudge; + } //if (_log.shouldLog(Log.DEBUG)) // _log.debug("Adding fudge " + fudge); - endTime += fudge; SessionId id = _runner.getSessionId(requested.getDestination().calculateHash()); if (id == null) { diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillNetworkDatabaseFacade.java b/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillNetworkDatabaseFacade.java index 568d7cf4f1..0e402359b4 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillNetworkDatabaseFacade.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillNetworkDatabaseFacade.java @@ -208,11 +208,20 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad RouterKeyGenerator gen = _context.routerKeyGenerator(); Hash rkey = gen.getRoutingKey(key); FloodfillPeerSelector sel = (FloodfillPeerSelector)getPeerSelector(); - List peers = sel.selectFloodfillParticipants(rkey, MAX_TO_FLOOD, getKBuckets()); + final int type = ds.getType(); + final boolean isls2 = ds.isLeaseSet() && type != DatabaseEntry.KEY_TYPE_LEASESET; + int max = MAX_TO_FLOOD; + // increase candidates because we will be skipping some + if (type == DatabaseEntry.KEY_TYPE_ENCRYPTED_LS2) + max *= 4; + else if (isls2) + max *= 2; + List peers = sel.selectFloodfillParticipants(rkey, max, getKBuckets()); + // todo key cert skip? long until = gen.getTimeTillMidnight(); if (until < NEXT_RKEY_LS_ADVANCE_TIME || - (ds.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO && until < NEXT_RKEY_RI_ADVANCE_TIME)) { + (type == DatabaseEntry.KEY_TYPE_ROUTERINFO && until < NEXT_RKEY_RI_ADVANCE_TIME)) { // to avoid lookup faulures after midnight, also flood to some closest to the // next routing key for a period of time before midnight. Hash nkey = gen.getNextRoutingKey(key); @@ -229,28 +238,20 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad peers.add(h); i++; } + if (i >= MAX_TO_FLOOD) + break; + } + if (i > 0) { + max += i; + if (_log.shouldInfo()) + _log.info("Flooding the entry for " + key + " to " + i + " more, just before midnight"); } - if (i > 0 && _log.shouldLog(Log.INFO)) - _log.info("Flooding the entry for " + key + " to " + i + " more, just before midnight"); } int flooded = 0; - boolean isls2 = ds.isLeaseSet() && ds.getType() != DatabaseEntry.KEY_TYPE_LEASESET; for (int i = 0; i < peers.size(); i++) { Hash peer = peers.get(i); RouterInfo target = lookupRouterInfoLocally(peer); - if ( (target == null) || (_context.banlist().isBanlisted(peer)) ) - continue; - // Don't flood an RI back to itself - // Not necessary, a ff will do its own flooding (reply token == 0) - // But other implementations may not... - if (ds.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO && peer.equals(key)) - continue; - if (peer.equals(_context.routerHash())) - continue; - // min version checks - if (isls2 && !StoreJob.shouldStoreLS2To(target)) - continue; - if (!StoreJob.shouldStoreTo(target)) + if (!shouldFloodTo(key, type, peer, target)) continue; DatabaseStoreMessage msg = new DatabaseStoreMessage(_context); msg.setEntry(ds); @@ -265,12 +266,37 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad flooded++; if (_log.shouldLog(Log.INFO)) _log.info("Flooding the entry for " + key.toBase64() + " to " + peer.toBase64()); + if (flooded >= MAX_TO_FLOOD) + break; } if (_log.shouldLog(Log.INFO)) _log.info("Flooded the data to " + flooded + " of " + peers.size() + " peers"); } + /** @since 0.9.39 */ + private boolean shouldFloodTo(Hash key, int type, Hash peer, RouterInfo target) { + if ( (target == null) || (_context.banlist().isBanlisted(peer)) ) + return false; + // Don't flood an RI back to itself + // Not necessary, a ff will do its own flooding (reply token == 0) + // But other implementations may not... + if (type == DatabaseEntry.KEY_TYPE_ROUTERINFO && peer.equals(key)) + return false; + if (peer.equals(_context.routerHash())) + return false; + // min version checks + if (type != DatabaseEntry.KEY_TYPE_ROUTERINFO && type != DatabaseEntry.KEY_TYPE_LS2 && + !StoreJob.shouldStoreLS2To(target)) + return false; + if (type == DatabaseEntry.KEY_TYPE_ENCRYPTED_LS2 && + !StoreJob.shouldStoreEncLS2To(target)) + return false; + if (!StoreJob.shouldStoreTo(target)) + return false; + return true; + } + /** note in the profile that the store failed */ private static class FloodFailedJob extends JobImpl { private final Hash _peer; diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillStoreJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillStoreJob.java index 854432a6a0..0f137c9a74 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillStoreJob.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillStoreJob.java @@ -13,6 +13,7 @@ import java.util.Set; import net.i2p.data.DatabaseEntry; import net.i2p.data.Hash; +import net.i2p.data.LeaseSet2; import net.i2p.router.Job; import net.i2p.router.RouterContext; import net.i2p.util.Log; @@ -58,37 +59,49 @@ class FloodfillStoreJob extends StoreJob { protected void succeed() { super.succeed(); + final boolean shouldLog = _log.shouldInfo(); + if (_facade.isVerifyInProgress(_state.getTarget())) { - if (_log.shouldLog(Log.INFO)) + if (shouldLog) _log.info("Skipping verify, one already in progress for: " + _state.getTarget()); return; } if (getContext().router().gracefulShutdownInProgress()) { - if (_log.shouldLog(Log.INFO)) + if (shouldLog) _log.info("Skipping verify, shutdown in progress for: " + _state.getTarget()); return; } // Get the time stamp from the data we sent, so the Verify job can meke sure that // it finds something stamped with that time or newer. DatabaseEntry data = _state.getData(); - boolean isRouterInfo = data.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO; + final int type = data.getType(); + final boolean isRouterInfo = type == DatabaseEntry.KEY_TYPE_ROUTERINFO; // default false since 0.9.7.1 if (isRouterInfo && !getContext().getBooleanProperty(PROP_RI_VERIFY)) { _facade.routerInfoPublishSuccessful(); return; } - long published = data.getDate(); - boolean isls2 = data.isLeaseSet() && data.getType() != DatabaseEntry.KEY_TYPE_LEASESET; - + final boolean isls2 = data.isLeaseSet() && type != DatabaseEntry.KEY_TYPE_LEASESET; + long published; + if (isls2) { + LeaseSet2 ls2 = (LeaseSet2) data; + published = ls2.getPublished(); + } else { + published = data.getDate(); + } // we should always have exactly one successful entry Hash sentTo = null; try { sentTo = _state.getSuccessful().iterator().next(); } catch (NoSuchElementException nsee) {} - getContext().jobQueue().addJob(new FloodfillVerifyStoreJob(getContext(), _state.getTarget(), - published, isRouterInfo, isls2, - sentTo, _facade)); + Job fvsj = new FloodfillVerifyStoreJob(getContext(), _state.getTarget(), + published, type, + sentTo, _facade); + if (shouldLog) + _log.info(getJobId() + ": Succeeded sending key " + _state.getTarget() + + ", queueing verify job " + fvsj.getJobId()); + getContext().jobQueue().addJob(fvsj); } @Override diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillVerifyStoreJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillVerifyStoreJob.java index 5e09087cd9..024edf7b93 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillVerifyStoreJob.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/FloodfillVerifyStoreJob.java @@ -9,6 +9,7 @@ import net.i2p.data.DatabaseEntry; import net.i2p.data.Destination; import net.i2p.data.Hash; import net.i2p.data.LeaseSet; +import net.i2p.data.LeaseSet2; import net.i2p.data.router.RouterInfo; import net.i2p.data.i2np.DatabaseLookupMessage; import net.i2p.data.i2np.DatabaseSearchReplyMessage; @@ -16,6 +17,7 @@ import net.i2p.data.i2np.DatabaseStoreMessage; import net.i2p.data.i2np.I2NPMessage; import net.i2p.router.JobImpl; import net.i2p.router.MessageSelector; +import net.i2p.router.ProfileManager; import net.i2p.router.ReplyJob; import net.i2p.router.RouterContext; import net.i2p.router.TunnelInfo; @@ -37,6 +39,7 @@ class FloodfillVerifyStoreJob extends JobImpl { private long _expiration; private long _sendTime; private final long _published; + private final int _type; private final boolean _isRouterInfo; private final boolean _isLS2; private MessageWrapper.WrappedMessage _wrappedMessage; @@ -51,16 +54,18 @@ class FloodfillVerifyStoreJob extends JobImpl { /** * Delay a few seconds, then start the verify + * @param published getDate() for RI or LS1, getPublished() for LS2 * @param sentTo who to give the credit or blame to, can be null */ - public FloodfillVerifyStoreJob(RouterContext ctx, Hash key, long published, boolean isRouterInfo, - boolean isLS2, Hash sentTo, FloodfillNetworkDatabaseFacade facade) { + public FloodfillVerifyStoreJob(RouterContext ctx, Hash key, long published, int type, + Hash sentTo, FloodfillNetworkDatabaseFacade facade) { super(ctx); facade.verifyStarted(key); _key = key; _published = published; - _isRouterInfo = isRouterInfo; - _isLS2 = isLS2; + _isRouterInfo = type == DatabaseEntry.KEY_TYPE_ROUTERINFO; + _isLS2 = !_isRouterInfo && type != DatabaseEntry.KEY_TYPE_LEASESET; + _type = type; _log = ctx.logManager().getLog(getClass()); _sentTo = sentTo; _facade = facade; @@ -148,7 +153,7 @@ class FloodfillVerifyStoreJob extends JobImpl { } } if (_log.shouldLog(Log.INFO)) - _log.info("Requesting encrypted reply from " + _target + ' ' + sess.key + ' ' + sess.tag); + _log.info(getJobId() + ": Requesting encrypted reply from " + _target + ' ' + sess.key + ' ' + sess.tag); lookup.setReplySession(sess.key, sess.tag); } Hash fromKey; @@ -166,7 +171,7 @@ class FloodfillVerifyStoreJob extends JobImpl { I2NPMessage sent = _wrappedMessage.getMessage(); if (_log.shouldLog(Log.INFO)) - _log.info("Starting verify (stored " + _key + " to " + _sentTo + "), asking " + _target); + _log.info(getJobId() + ": Starting verify (stored " + _key + " to " + _sentTo + "), asking " + _target); _sendTime = getContext().clock().now(); _expiration = _sendTime + VERIFY_TIMEOUT; getContext().messageRegistry().registerPending(new VerifyReplySelector(), @@ -199,7 +204,8 @@ class FloodfillVerifyStoreJob extends JobImpl { RouterInfo ri = _facade.lookupRouterInfoLocally(peer); //if (ri != null && StoreJob.supportsCert(ri, keyCert)) { if (ri != null && StoreJob.shouldStoreTo(ri) && - (!_isLS2 || StoreJob.shouldStoreLS2To(ri))) { + (!_isLS2 || (StoreJob.shouldStoreLS2To(ri) && + (_type != DatabaseEntry.KEY_TYPE_ENCRYPTED_LS2 || StoreJob.shouldStoreEncLS2To(ri))))) { Set peerIPs = new MaskedIPSet(getContext(), ri, IP_CLOSE_BYTES); if (!_ipSet.containsAny(peerIPs)) { _ipSet.addAll(peerIPs); @@ -221,7 +227,7 @@ class FloodfillVerifyStoreJob extends JobImpl { } if (_log.shouldLog(Log.WARN)) - _log.warn("No other peers to verify floodfill with, using the one we sent to"); + _log.warn(getJobId() + ": No other peers to verify floodfill with, using the one we sent to"); return _sentTo; } @@ -259,60 +265,78 @@ class FloodfillVerifyStoreJob extends JobImpl { private class VerifyReplyJob extends JobImpl implements ReplyJob { private I2NPMessage _message; + public VerifyReplyJob(RouterContext ctx) { super(ctx); } + public String getName() { return "Handle floodfill verification reply"; } + public void runJob() { - long delay = getContext().clock().now() - _sendTime; + final RouterContext ctx = getContext(); + long delay = ctx.clock().now() - _sendTime; if (_wrappedMessage != null) _wrappedMessage.acked(); _facade.verifyFinished(_key); - if (_message instanceof DatabaseStoreMessage) { + final ProfileManager pm = ctx.profileManager(); + final int type = _message.getType(); + if (type == DatabaseStoreMessage.MESSAGE_TYPE) { // Verify it's as recent as the one we sent DatabaseStoreMessage dsm = (DatabaseStoreMessage)_message; - boolean success = dsm.getEntry().getDate() >= _published; + DatabaseEntry entry = dsm.getEntry(); + long newDate; + boolean success; + if (_isLS2 && + entry.getType() != DatabaseEntry.KEY_TYPE_ROUTERINFO && + entry.getType() != DatabaseEntry.KEY_TYPE_LEASESET) { + LeaseSet2 ls2 = (LeaseSet2) entry; + success = ls2.getPublished() >= _published; + } else { + success = entry.getDate() >= _published; + } if (success) { // store ok, w00t! - getContext().profileManager().dbLookupSuccessful(_target, delay); + pm.dbLookupSuccessful(_target, delay); if (_sentTo != null) - getContext().profileManager().dbStoreSuccessful(_sentTo); - getContext().statManager().addRateData("netDb.floodfillVerifyOK", delay); + pm.dbStoreSuccessful(_sentTo); + ctx.statManager().addRateData("netDb.floodfillVerifyOK", delay); if (_log.shouldLog(Log.INFO)) - _log.info("Verify success for " + _key); + _log.info(getJobId() + ": Verify success for " + _key); if (_isRouterInfo) _facade.routerInfoPublishSuccessful(); return; } - if (_log.shouldLog(Log.WARN)) - _log.warn("Verify failed (older) for " + _key); - if (_log.shouldLog(Log.INFO)) - _log.info("Rcvd older data: " + dsm.getEntry()); - } else if (_message instanceof DatabaseSearchReplyMessage) { + if (_log.shouldWarn()) { + _log.warn(getJobId() + ": Verify failed (older) for " + _key); + if (_log.shouldInfo()) + _log.info(getJobId() + ": Rcvd older data: " + dsm.getEntry()); + } + } else if (type == DatabaseSearchReplyMessage.MESSAGE_TYPE) { DatabaseSearchReplyMessage dsrm = (DatabaseSearchReplyMessage) _message; // assume 0 old, all new, 0 invalid, 0 dup - getContext().profileManager().dbLookupReply(_target, 0, + pm.dbLookupReply(_target, 0, dsrm.getNumReplies(), 0, 0, delay); if (_log.shouldLog(Log.WARN)) - _log.warn("Verify failed (DSRM) for " + _key); + _log.warn(getJobId() + ": Verify failed (DSRM) for " + _key); // only for RI... LS too dangerous? if (_isRouterInfo) - getContext().jobQueue().addJob(new SingleLookupJob(getContext(), dsrm)); + ctx.jobQueue().addJob(new SingleLookupJob(ctx, dsrm)); } // store failed, boo, hiss! // blame the sent-to peer, but not the verify peer if (_sentTo != null) - getContext().profileManager().dbStoreFailed(_sentTo); + pm.dbStoreFailed(_sentTo); // Blame the verify peer also. // We must use dbLookupFailed() or dbStoreFailed(), neither of which is exactly correct, // but we have to use one of them to affect the FloodfillPeerSelector ordering. // If we don't do this we get stuck using the same verify peer every time even // though it is the real problem. if (_target != null && !_target.equals(_sentTo)) - getContext().profileManager().dbLookupFailed(_target); - getContext().statManager().addRateData("netDb.floodfillVerifyFail", delay); + pm.dbLookupFailed(_target); + ctx.statManager().addRateData("netDb.floodfillVerifyFail", delay); resend(); } + public void setMessage(I2NPMessage message) { _message = message; } } @@ -327,11 +351,30 @@ class FloodfillVerifyStoreJob extends JobImpl { private void resend() { DatabaseEntry ds = _facade.lookupLocally(_key); if (ds != null) { + // By the time we get here, a minute or more after the store started, + // we may have already started a new store + // (probably, for LS, and we don't verify by default for RI) + long newDate; + if (_isLS2 && + ds.getType() != DatabaseEntry.KEY_TYPE_ROUTERINFO && + ds.getType() != DatabaseEntry.KEY_TYPE_LEASESET) { + LeaseSet2 ls2 = (LeaseSet2) ds; + newDate = ls2.getPublished(); + } else { + newDate = ds.getDate(); + } + if (newDate > _published) { + if (_log.shouldInfo()) + _log.info(getJobId() + ": Verify failed, but new store already happened for: " + _key); + return; + } Set toSkip = new HashSet(2); if (_sentTo != null) toSkip.add(_sentTo); if (_target != null) toSkip.add(_target); + if (_log.shouldWarn()) + _log.warn(getJobId() + ": Verify failed, starting new store for: " + _key); _facade.sendStore(_key, ds, null, null, FloodfillNetworkDatabaseFacade.PUBLISH_TIMEOUT, toSkip); } } @@ -350,7 +393,7 @@ class FloodfillVerifyStoreJob extends JobImpl { // getContext().profileManager().dbStoreFailed(_sentTo); getContext().statManager().addRateData("netDb.floodfillVerifyTimeout", getContext().clock().now() - _sendTime); if (_log.shouldLog(Log.WARN)) - _log.warn("Verify timed out for: " + _key); + _log.warn(getJobId() + ": Verify timed out for: " + _key); if (_ignore.size() < MAX_PEERS_TO_TRY) { // Don't resend, simply rerun FVSJ.this inline and // chose somebody besides _target for verification diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/HandleFloodfillDatabaseStoreMessageJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/HandleFloodfillDatabaseStoreMessageJob.java index c0d854298b..31a5c775e0 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/HandleFloodfillDatabaseStoreMessageJob.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/HandleFloodfillDatabaseStoreMessageJob.java @@ -14,6 +14,7 @@ import java.util.Date; import net.i2p.data.DatabaseEntry; import net.i2p.data.Hash; import net.i2p.data.LeaseSet; +import net.i2p.data.LeaseSet2; import net.i2p.data.TunnelId; import net.i2p.data.router.RouterAddress; import net.i2p.data.router.RouterIdentity; @@ -113,10 +114,19 @@ class HandleFloodfillDatabaseStoreMessageJob extends JobImpl { } else if (match.getEarliestLeaseDate() < ls.getEarliestLeaseDate()) { wasNew = true; // If it is in our keyspace and we are talking to it - - if (match.getReceivedAsPublished()) ls.setReceivedAsPublished(true); + } else if (type != DatabaseEntry.KEY_TYPE_LEASESET && + match.getType() != DatabaseEntry.KEY_TYPE_LEASESET) { + LeaseSet2 ls2 = (LeaseSet2) ls; + LeaseSet2 match2 = (LeaseSet2) match; + if (match2.getPublished() < ls2.getPublished()) { + wasNew = true; + if (match.getReceivedAsPublished()) + ls.setReceivedAsPublished(true); + } else { + wasNew = false; + } } else { wasNew = false; // The FloodOnlyLookupSelector goes away after the first good reply @@ -228,7 +238,7 @@ class HandleFloodfillDatabaseStoreMessageJob extends JobImpl { return; } long floodBegin = System.currentTimeMillis(); - _facade.flood(_message.getEntry()); + _facade.flood(entry); // ERR: see comment in HandleDatabaseLookupMessageJob regarding hidden mode //else if (!_message.getRouterInfo().isHidden()) long floodEnd = System.currentTimeMillis(); diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/KademliaNetworkDatabaseFacade.java b/router/java/src/net/i2p/router/networkdb/kademlia/KademliaNetworkDatabaseFacade.java index 02061e4635..d0c323bb2c 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/KademliaNetworkDatabaseFacade.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/KademliaNetworkDatabaseFacade.java @@ -935,8 +935,8 @@ public abstract class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacad if (fkc != null) { boolean validFamily = fkc.verify(routerInfo); if (!validFamily) { - if (_log.shouldWarn()) - _log.warn("Bad family sig: " + routerInfo.getHash()); + if (_log.shouldInfo()) + _log.info("Bad family sig: " + routerInfo.getHash()); } // todo store in RI } diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/RepublishLeaseSetJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/RepublishLeaseSetJob.java index ca8cd80f4a..7a2e1b6d8f 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/RepublishLeaseSetJob.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/RepublishLeaseSetJob.java @@ -87,6 +87,9 @@ class RepublishLeaseSetJob extends JobImpl { requeue(RETRY_DELAY + getContext().random().nextInt(RETRY_DELAY)); } + /** + * @return last attempted publish time, or 0 if never + */ public long lastPublished() { return _lastPublished; } diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/StoreJob.java b/router/java/src/net/i2p/router/networkdb/kademlia/StoreJob.java index 5adc238f54..aaedd309bf 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/StoreJob.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/StoreJob.java @@ -53,7 +53,7 @@ abstract class StoreJob extends JobImpl { private final static int PARALLELIZATION = 4; // how many sent at a time private final static int REDUNDANCY = 4; // we want the data sent to 6 peers private final static int STORE_PRIORITY = OutNetMessage.PRIORITY_MY_NETDB_STORE; - + /** * Send a data structure to the floodfills * @@ -89,9 +89,12 @@ abstract class StoreJob extends JobImpl { else _connectMask = ConnectChecker.ANY_V4; } + if (_log.shouldLog(Log.DEBUG)) + _log.debug(getJobId() + ": New store job for " + data, new Exception("I did it")); } public String getName() { return "Kademlia NetDb Store";} + public void runJob() { sendNext(); } @@ -194,6 +197,12 @@ abstract class StoreJob extends JobImpl { _log.info(getJobId() + ": Skipping old router " + peer); _state.addSkipped(peer); skipped++; + } else if (type == DatabaseEntry.KEY_TYPE_ENCRYPTED_LS2 && + !shouldStoreEncLS2To((RouterInfo)ds)) { + if (_log.shouldInfo()) + _log.info(getJobId() + ": Skipping router that doesn't support LS2 " + peer); + _state.addSkipped(peer); + skipped++; } else if (isls2 && !shouldStoreLS2To((RouterInfo)ds)) { if (_log.shouldLog(Log.INFO)) @@ -410,7 +419,7 @@ abstract class StoreJob extends JobImpl { StoreMessageSelector selector = new StoreMessageSelector(getContext(), getJobId(), peer, token, expiration); if (_log.shouldLog(Log.DEBUG)) - _log.debug("sending store to " + peer.getIdentity().getHash() + " through " + outTunnel + ": " + msg); + _log.debug(getJobId() + ": sending store to " + peer.getIdentity().getHash() + " through " + outTunnel + ": " + msg); getContext().messageRegistry().registerPending(selector, onReply, onFail); getContext().tunnelDispatcher().dispatchOutbound(msg, outTunnel.getSendTunnelId(0), null, to); } else { @@ -476,7 +485,7 @@ abstract class StoreJob extends JobImpl { StoreMessageSelector selector = new StoreMessageSelector(getContext(), getJobId(), peer, token, expiration); if (_log.shouldLog(Log.DEBUG)) { - _log.debug("sending encrypted store to " + peer.getIdentity().getHash() + " through " + outTunnel + ": " + sent); + _log.debug(getJobId() + ": sending encrypted store to " + peer.getIdentity().getHash() + " through " + outTunnel + ": " + sent); } getContext().messageRegistry().registerPending(selector, onReply, onFail); getContext().tunnelDispatcher().dispatchOutbound(sent, outTunnel.getSendTunnelId(0), null, to); @@ -512,7 +521,7 @@ abstract class StoreJob extends JobImpl { public static final String MIN_STORE_VERSION = "0.9.28"; /** - * Is it too old? + * Is it new enough? * @since 0.9.33 */ static boolean shouldStoreTo(RouterInfo ri) { @@ -524,7 +533,7 @@ abstract class StoreJob extends JobImpl { public static final String MIN_STORE_LS2_VERSION = "0.9.38"; /** - * Is it too old? + * Is it new enough? * @since 0.9.38 */ static boolean shouldStoreLS2To(RouterInfo ri) { @@ -532,6 +541,21 @@ abstract class StoreJob extends JobImpl { return VersionComparator.comp(v, MIN_STORE_LS2_VERSION) >= 0; } + /** + * Was supported in 38, but they're now sigtype 11 which wasn't added until 39 + * @since 0.9.39 + */ + public static final String MIN_STORE_ENCLS2_VERSION = "0.9.39"; + + /** + * Is it new enough? + * @since 0.9.38 + */ + static boolean shouldStoreEncLS2To(RouterInfo ri) { + String v = ri.getVersion(); + return VersionComparator.comp(v, MIN_STORE_ENCLS2_VERSION) >= 0; + } + /** * Called after sending a dbStore to a peer successfully, * marking the store as successful @@ -575,7 +599,7 @@ abstract class StoreJob extends JobImpl { if ( (_sendThrough != null) && (_msgSize > 0) ) { if (_log.shouldDebug()) - _log.debug("sent a " + _msgSize + " byte netDb message through tunnel " + _sendThrough + " after " + howLong); + _log.debug(StoreJob.this.getJobId() + ": sent a " + _msgSize + " byte netDb message through tunnel " + _sendThrough + " after " + howLong); for (int i = 0; i < _sendThrough.getLength(); i++) getContext().profileManager().tunnelDataPushed(_sendThrough.getPeer(i), howLong, _msgSize); _sendThrough.incrementVerifiedBytesTransferred(_msgSize); @@ -634,10 +658,11 @@ abstract class StoreJob extends JobImpl { * Send was totally successful */ protected void succeed() { - if (_log.shouldLog(Log.INFO)) + if (_log.shouldInfo()) { _log.info(getJobId() + ": Succeeded sending key " + _state.getTarget()); - if (_log.shouldLog(Log.DEBUG)) - _log.debug(getJobId() + ": State of successful send: " + _state); + if (_log.shouldDebug()) + _log.debug(getJobId() + ": State of successful send: " + _state); + } if (_onSuccess != null) getContext().jobQueue().addJob(_onSuccess); _state.complete(true); @@ -648,10 +673,11 @@ abstract class StoreJob extends JobImpl { * Send totally failed */ protected void fail() { - if (_log.shouldLog(Log.INFO)) + if (_log.shouldInfo()) { _log.info(getJobId() + ": Failed sending key " + _state.getTarget()); - if (_log.shouldLog(Log.DEBUG)) - _log.debug(getJobId() + ": State of failed send: " + _state, new Exception("Who failed me?")); + if (_log.shouldDebug()) + _log.debug(getJobId() + ": State of failed send: " + _state, new Exception("Who failed me?")); + } if (_onFailure != null) getContext().jobQueue().addJob(_onFailure); _state.complete(true); diff --git a/router/java/src/net/i2p/router/networkdb/kademlia/TransientDataStore.java b/router/java/src/net/i2p/router/networkdb/kademlia/TransientDataStore.java index 5e4fe39884..6c0007f9bc 100644 --- a/router/java/src/net/i2p/router/networkdb/kademlia/TransientDataStore.java +++ b/router/java/src/net/i2p/router/networkdb/kademlia/TransientDataStore.java @@ -18,6 +18,7 @@ import java.util.Set; import net.i2p.data.DatabaseEntry; import net.i2p.data.Hash; import net.i2p.data.LeaseSet; +import net.i2p.data.LeaseSet2; import net.i2p.data.router.RouterInfo; import net.i2p.router.RouterContext; import net.i2p.util.Log; @@ -153,17 +154,29 @@ class TransientDataStore implements DataStore { LeaseSet ls = (LeaseSet)data; if (old != null) { LeaseSet ols = (LeaseSet)old; - if (ls.getEarliestLeaseDate() < ols.getEarliestLeaseDate()) { + long oldDate, newDate; + if (type != DatabaseEntry.KEY_TYPE_LEASESET && + ols.getType() != DatabaseEntry.KEY_TYPE_LEASESET) { + LeaseSet2 ls2 = (LeaseSet2) ls; + LeaseSet2 ols2 = (LeaseSet2) ols; + oldDate = ols2.getPublished(); + newDate = ls2.getPublished(); + } else { + oldDate = ols.getEarliestLeaseDate(); + newDate = ls.getEarliestLeaseDate(); + } + + if (newDate < oldDate) { if (_log.shouldLog(Log.INFO)) - _log.info("Almost clobbered an old leaseSet! " + key + ": [old expires " + new Date(ols.getEarliestLeaseDate()) + - " new on " + new Date(ls.getEarliestLeaseDate()) + ']'); - } else if (ls.getEarliestLeaseDate() == ols.getEarliestLeaseDate()) { + _log.info("Almost clobbered an old leaseSet! " + key + ": [old " + new Date(oldDate) + + " new " + new Date(newDate) + ']'); + } else if (newDate == oldDate) { if (_log.shouldLog(Log.INFO)) _log.info("Duplicate " + key); } else { if (_log.shouldLog(Log.INFO)) { - _log.info("Updated old leaseSet " + key + ": [old expires " + new Date(ols.getEarliestLeaseDate()) + - " new on " + new Date(ls.getEarliestLeaseDate()) + ']'); + _log.info("Updated old leaseSet " + key + ": [old " + new Date(oldDate) + + " new " + new Date(newDate) + ']'); if (_log.shouldLog(Log.DEBUG)) _log.debug("RAP? " + ls.getReceivedAsPublished() + " RAR? " + ls.getReceivedAsReply()); }