NetDB: Iterative search improvements

- Pass DSRM hashes through IMD to ILJ for client tunnels too
- Query unknown DRSM hashes in ILJ without looking up
  when using client tunnels; look up after querying
  when using expl. tunnels to speed things up
- Don't look up banlisted hashes
- Check 'from' hash in DSRM against list of peers sent to
- Don't query an unknown peer through a zero-hop OB tunnel
- Log tweaks
Extend expiration of returned message in IMD
This commit is contained in:
zzz
2014-04-06 15:18:16 +00:00
parent e097a1caeb
commit 3b1e030b39
5 changed files with 90 additions and 18 deletions

View File

@ -1,8 +1,11 @@
2014-04-06 zzz
* NetDB: Iterative search improvements
2014-04-05 zzz
* I2PTunnel:
- Add server option for unique local address per-client
- Fix changing outproxy without stopping tunnel (ticket #1164)
- Fix add-to-addressbook hostname link in i2ptunnel (ticket #688)
* NetDB: Skip key cert LS stores and verifies for floodfills that don't support them
2014-04-01 zzz

View File

@ -18,7 +18,7 @@ public class RouterVersion {
/** deprecated */
public final static String ID = "Monotone";
public final static String VERSION = CoreVersion.VERSION;
public final static long BUILD = 3;
public final static long BUILD = 4;
/** for example "-test" */
public final static String EXTRA = "";

View File

@ -3,6 +3,7 @@ package net.i2p.router.networkdb.kademlia;
import net.i2p.data.Hash;
import net.i2p.data.RouterInfo;
import net.i2p.data.i2np.DatabaseSearchReplyMessage;
import net.i2p.util.Log;
import net.i2p.router.JobImpl;
import net.i2p.router.RouterContext;
@ -22,19 +23,26 @@ import net.i2p.router.RouterContext;
* @since 0.8.9
*/
class IterativeLookupJob extends JobImpl {
private final Log _log;
private final DatabaseSearchReplyMessage _dsrm;
private final IterativeSearchJob _search;
public IterativeLookupJob(RouterContext ctx, DatabaseSearchReplyMessage dsrm, IterativeSearchJob search) {
super(ctx);
_log = ctx.logManager().getLog(IterativeLookupJob.class);
_dsrm = dsrm;
_search = search;
}
public void runJob() {
// TODO - dsrm.getFromHash() can't be trusted - check against the list of
// those we sent the search to in _search ?
Hash from = _dsrm.getFromHash();
// dsrm.getFromHash() can't be trusted - check against the list of
// those we sent the search to in _search
if (!_search.wasQueried(from)) {
if (_log.shouldLog(Log.WARN))
_log.warn(_search.getJobId() + ": ILJ DSRM from unqueried peer: " + _dsrm);
return;
}
// Chase the hashes from the reply
// 255 max, see comments in SingleLookupJob
@ -54,15 +62,35 @@ class IterativeLookupJob extends JobImpl {
invalidPeers++;
continue;
}
if (getContext().banlist().isBanlistedForever(peer)) {
oldPeers++;
continue;
}
RouterInfo ri = getContext().netDb().lookupRouterInfoLocally(peer);
if (ri == null) {
// get the RI from the peer that told us about it
getContext().jobQueue().addJob(new IterativeFollowupJob(getContext(), peer, from, _search));
// Take it on faith that it's ff to speed things up, we don't need the RI
// to query it.
// Zero-hop outbound tunnel will be failed in ISJ.sendQuery()
_search.newPeerToTry(peer);
if (_search.getFromHash() == null) {
// get the RI from the peer that told us about it
// Only if original search used expl. tunnels
getContext().jobQueue().addJob(new SingleSearchJob(getContext(), peer, from));
} else {
// don't look it up as we don't have a good way to do it securely...
// add to expl. queue to look up later? no, probably not safe either
}
newPeers++;
} else if (ri.getPublished() < getContext().clock().now() - 60*60*1000 ||
!FloodfillNetworkDatabaseFacade.isFloodfill(ri)) {
// get an updated RI from the (now ff?) peer
getContext().jobQueue().addJob(new IterativeFollowupJob(getContext(), peer, peer, _search));
// Only if original search used expl. tunnels
if (_search.getFromHash() == null) {
getContext().jobQueue().addJob(new IterativeFollowupJob(getContext(), peer, peer, _search));
} else {
// for now, don't believe him, don't call newPeerToTry()
// is IFJ safe if we use the client tunnels?
}
oldPeers++;
} else {
// add it to the sorted queue
@ -73,6 +101,8 @@ class IterativeLookupJob extends JobImpl {
oldPeers++;
}
}
if (_log.shouldLog(Log.INFO))
_log.info(_search.getJobId() + ": ILJ DSRM processed " + newPeers + '/' + oldPeers + '/' + invalidPeers + " new/old/invalid hashes");
long timeSent = _search.timeSent(from);
// assume 0 dup
if (timeSent > 0) {

View File

@ -252,7 +252,6 @@ class IterativeSearchJob extends FloodSearchJob {
* Send a DLM to the peer
*/
private void sendQuery(Hash peer) {
DatabaseLookupMessage dlm = new DatabaseLookupMessage(getContext(), true);
TunnelManagerFacade tm = getContext().tunnelManager();
TunnelInfo outTunnel;
TunnelInfo replyTunnel;
@ -281,11 +280,22 @@ class IterativeSearchJob extends FloodSearchJob {
// if it happens to be closest to itself and we are using zero-hop exploratory tunnels.
// If we don't, the OutboundMessageDistributor ends up logging erors for
// not being able to send to the floodfill, if we don't have an older netdb entry.
if (outTunnel.getLength() <= 1 && peer.equals(_key)) {
failed(peer, false);
return;
if (outTunnel.getLength() <= 1) {
if (peer.equals(_key)) {
failed(peer, false);
if (_log.shouldLog(Log.WARN))
_log.warn(getJobId() + ": not doing zero-hop self-lookup of " + peer);
return;
}
if (_facade.lookupLocallyWithoutValidation(peer) == null) {
failed(peer, false);
if (_log.shouldLog(Log.WARN))
_log.warn(getJobId() + ": not doing zero-hop lookup to unknown " + peer);
return;
}
}
DatabaseLookupMessage dlm = new DatabaseLookupMessage(getContext(), true);
dlm.setFrom(replyTunnel.getPeer(0));
dlm.setMessageExpiration(getContext().clock().now() + SINGLE_SEARCH_MSG_TIME);
dlm.setReplyTunnel(replyTunnel.getReceiveTunnelId(0));
@ -386,11 +396,17 @@ class IterativeSearchJob extends FloodSearchJob {
if (peer.equals(getContext().routerHash()) ||
peer.equals(_key))
return;
if (getContext().banlist().isBanlistedForever(peer)) {
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": banlisted peer from DSRM " + peer);
return;
}
RouterInfo ri = getContext().netDb().lookupRouterInfoLocally(peer);
if (!FloodfillNetworkDatabaseFacade.isFloodfill(ri))
return;
if (getContext().banlist().isBanlistedForever(peer))
if (ri != null && !FloodfillNetworkDatabaseFacade.isFloodfill(ri)) {
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": non-ff peer from DSRM " + peer);
return;
}
synchronized (this) {
if (_failedPeers.contains(peer) ||
_unheardFrom.contains(peer))
@ -399,10 +415,28 @@ class IterativeSearchJob extends FloodSearchJob {
return; // already in the list
}
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": new peer from DSRM " + peer);
_log.info(getJobId() + ": new peer from DSRM: known? " + (ri != null) + ' ' + peer);
retry();
}
/**
* Hash of the dest this query is from
* @return null for router
* @since 0.9.13
*/
public Hash getFromHash() {
return _fromLocalDest;
}
/**
* Did we send a request to this peer?
* @since 0.9.13
*/
public boolean wasQueried(Hash peer) {
synchronized (this) {
return _unheardFrom.contains(peer) || _failedPeers.contains(peer);
}
}
/**
* When did we send the query to the peer?

View File

@ -69,6 +69,7 @@ class InboundMessageDistributor implements GarlicMessageReceiver.CloveReceiver {
// LS or RI and client or expl., so that we can safely follow references
// in a reply to a LS lookup over client tunnels.
// ILJ would also have to follow references via client tunnels
/****
DatabaseSearchReplyMessage orig = (DatabaseSearchReplyMessage) msg;
if (orig.getNumReplies() > 0) {
if (_log.shouldLog(Log.INFO))
@ -78,6 +79,7 @@ class InboundMessageDistributor implements GarlicMessageReceiver.CloveReceiver {
newMsg.setSearchKey(orig.getSearchKey());
msg = newMsg;
}
****/
} else if ( (_client != null) &&
(type == DatabaseStoreMessage.MESSAGE_TYPE)) {
DatabaseStoreMessage dsm = (DatabaseStoreMessage) msg;
@ -164,8 +166,8 @@ class InboundMessageDistributor implements GarlicMessageReceiver.CloveReceiver {
_log.warn("no outbound tunnel to send the client message for " + _client + ": " + msg);
return;
}
if (_log.shouldLog(Log.INFO))
_log.info("distributing inbound tunnel message back out " + out
if (_log.shouldLog(Log.DEBUG))
_log.debug("distributing IB tunnel msg type " + type + " back out " + out
+ " targetting " + target);
TunnelId outId = out.getSendTunnelId(0);
if (outId == null) {
@ -174,8 +176,9 @@ class InboundMessageDistributor implements GarlicMessageReceiver.CloveReceiver {
+ " failing to distribute " + msg);
return;
}
if (msg.getMessageExpiration() < _context.clock().now() + 10*1000)
msg.setMessageExpiration(_context.clock().now() + 10*1000);
long exp = _context.clock().now() + 20*1000;
if (msg.getMessageExpiration() < exp)
msg.setMessageExpiration(exp);
_context.tunnelDispatcher().dispatchOutbound(msg, outId, tunnel, target);
}
}
@ -250,6 +253,7 @@ class InboundMessageDistributor implements GarlicMessageReceiver.CloveReceiver {
// in a reply to a LS lookup over client tunnels.
// ILJ would also have to follow references via client tunnels
DatabaseSearchReplyMessage orig = (DatabaseSearchReplyMessage) data;
/****
if (orig.getNumReplies() > 0) {
if (_log.shouldLog(Log.INFO))
_log.info("Removing replies from a garlic DSRM down a tunnel for " + _client + ": " + data);
@ -258,6 +262,7 @@ class InboundMessageDistributor implements GarlicMessageReceiver.CloveReceiver {
newMsg.setSearchKey(orig.getSearchKey());
orig = newMsg;
}
****/
_context.inNetMessagePool().add(orig, null, null);
} else if (type == DataMessage.MESSAGE_TYPE) {
// a data message targetting the local router is how we send load tests (real