NetDB: Send RI lookups directly to the floodfill if

we are already connected to him
(from ISJ only; does not affect exploration, verifies, LSes, ...)
This commit is contained in:
zzz
2015-03-16 22:40:25 +00:00
parent db25eff74a
commit d22b05e114
4 changed files with 55 additions and 12 deletions

View File

@ -1,5 +1,10 @@
2015-03-16 zzz 2015-03-16 zzz
* Apache Tomcat 6.0.43 * Apache Tomcat 6.0.43
* NetDB: Send RI lookups directly to the floodfill if
we are already connected to him
* Router:
- Republish RI early if capabilities change
- Increase exploratory tunnel quantity if floodfill
* Throttle: Reject tunnels based on job lag * Throttle: Reject tunnels based on job lag
2015-03-15 zzz 2015-03-15 zzz

View File

@ -18,7 +18,7 @@ public class RouterVersion {
/** deprecated */ /** deprecated */
public final static String ID = "Monotone"; public final static String ID = "Monotone";
public final static String VERSION = CoreVersion.VERSION; public final static String VERSION = CoreVersion.VERSION;
public final static long BUILD = 4; public final static long BUILD = 5;
/** for example "-test" */ /** for example "-test" */
public final static String EXTRA = ""; public final static String EXTRA = "";

View File

@ -66,6 +66,8 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
_context.statManager().createRateStat("netDb.searchReplyNotValidated", "How many search replies we get that we are NOT able to validate (fetch)", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l }); _context.statManager().createRateStat("netDb.searchReplyNotValidated", "How many search replies we get that we are NOT able to validate (fetch)", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("netDb.searchReplyValidationSkipped", "How many search replies we get from unreliable peers that we skip?", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l }); _context.statManager().createRateStat("netDb.searchReplyValidationSkipped", "How many search replies we get from unreliable peers that we skip?", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("netDb.republishQuantity", "How many peers do we need to send a found leaseSet to?", "NetworkDatabase", new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l }); _context.statManager().createRateStat("netDb.republishQuantity", "How many peers do we need to send a found leaseSet to?", "NetworkDatabase", new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
// for ISJ
_context.statManager().createRateStat("netDb.RILookupDirect", "Was an iterative RI lookup sent directly?", "NetworkDatabase", new long[] { 60*60*1000 });
} }
@Override @Override

View File

@ -127,6 +127,7 @@ class IterativeSearchJob extends FloodSearchJob {
_fromLocalDest = fromLocalDest; _fromLocalDest = fromLocalDest;
if (fromLocalDest != null && !isLease && _log.shouldLog(Log.WARN)) if (fromLocalDest != null && !isLease && _log.shouldLog(Log.WARN))
_log.warn("Search for RI " + key + " down client tunnel " + fromLocalDest, new Exception()); _log.warn("Search for RI " + key + " down client tunnel " + fromLocalDest, new Exception());
// all createRateStat in FNDF
} }
@Override @Override
@ -260,9 +261,11 @@ class IterativeSearchJob extends FloodSearchJob {
*/ */
private void sendQuery(Hash peer) { private void sendQuery(Hash peer) {
TunnelManagerFacade tm = getContext().tunnelManager(); TunnelManagerFacade tm = getContext().tunnelManager();
RouterInfo ri = getContext().netDb().lookupRouterInfoLocally(peer);
TunnelInfo outTunnel; TunnelInfo outTunnel;
TunnelInfo replyTunnel; TunnelInfo replyTunnel;
boolean isClientReplyTunnel; boolean isClientReplyTunnel;
boolean isDirect;
if (_fromLocalDest != null) { if (_fromLocalDest != null) {
outTunnel = tm.selectOutboundTunnel(_fromLocalDest, peer); outTunnel = tm.selectOutboundTunnel(_fromLocalDest, peer);
if (outTunnel == null) if (outTunnel == null)
@ -271,12 +274,26 @@ class IterativeSearchJob extends FloodSearchJob {
isClientReplyTunnel = replyTunnel != null; isClientReplyTunnel = replyTunnel != null;
if (!isClientReplyTunnel) if (!isClientReplyTunnel)
replyTunnel = tm.selectInboundExploratoryTunnel(peer); replyTunnel = tm.selectInboundExploratoryTunnel(peer);
isDirect = false;
} else if ((!_isLease) && ri != null && getContext().commSystem().isEstablished(peer)) {
// If it's a RI lookup, not from a client, and we're already connected, just ask directly
// This also saves the ElG encryption for us and the decryption for the ff
// There's no anonymity reason to use an expl. tunnel... the main reason
// is to limit connections to the ffs. But if we're already connected,
// do it the fast and easy way.
outTunnel = null;
replyTunnel = null;
isClientReplyTunnel = false;
isDirect = true;
getContext().statManager().addRateData("netDb.RILookupDirect", 1);
} else { } else {
outTunnel = tm.selectOutboundExploratoryTunnel(peer); outTunnel = tm.selectOutboundExploratoryTunnel(peer);
replyTunnel = tm.selectInboundExploratoryTunnel(peer); replyTunnel = tm.selectInboundExploratoryTunnel(peer);
isClientReplyTunnel = false; isClientReplyTunnel = false;
isDirect = false;
getContext().statManager().addRateData("netDb.RILookupDirect", 0);
} }
if ( (replyTunnel == null) || (outTunnel == null) ) { if ((!isDirect) && (replyTunnel == null || outTunnel == null)) {
failed(); failed();
return; return;
} }
@ -287,7 +304,7 @@ class IterativeSearchJob extends FloodSearchJob {
// if it happens to be closest to itself and we are using zero-hop exploratory tunnels. // if it happens to be closest to itself and we are using zero-hop exploratory tunnels.
// If we don't, the OutboundMessageDistributor ends up logging erors for // If we don't, the OutboundMessageDistributor ends up logging erors for
// not being able to send to the floodfill, if we don't have an older netdb entry. // not being able to send to the floodfill, if we don't have an older netdb entry.
if (outTunnel.getLength() <= 1) { if (outTunnel != null && outTunnel.getLength() <= 1) {
if (peer.equals(_key)) { if (peer.equals(_key)) {
failed(peer, false); failed(peer, false);
if (_log.shouldLog(Log.WARN)) if (_log.shouldLog(Log.WARN))
@ -303,9 +320,13 @@ class IterativeSearchJob extends FloodSearchJob {
} }
DatabaseLookupMessage dlm = new DatabaseLookupMessage(getContext(), true); DatabaseLookupMessage dlm = new DatabaseLookupMessage(getContext(), true);
dlm.setFrom(replyTunnel.getPeer(0)); if (isDirect) {
dlm.setFrom(getContext().routerHash());
} else {
dlm.setFrom(replyTunnel.getPeer(0));
dlm.setReplyTunnel(replyTunnel.getReceiveTunnelId(0));
}
dlm.setMessageExpiration(getContext().clock().now() + SINGLE_SEARCH_MSG_TIME); dlm.setMessageExpiration(getContext().clock().now() + SINGLE_SEARCH_MSG_TIME);
dlm.setReplyTunnel(replyTunnel.getReceiveTunnelId(0));
dlm.setSearchKey(_key); dlm.setSearchKey(_key);
dlm.setSearchType(_isLease ? DatabaseLookupMessage.Type.LS : DatabaseLookupMessage.Type.RI); dlm.setSearchType(_isLease ? DatabaseLookupMessage.Type.LS : DatabaseLookupMessage.Type.RI);
@ -317,16 +338,19 @@ class IterativeSearchJob extends FloodSearchJob {
_log.info(getJobId() + ": ISJ try " + tries + " for " + _log.info(getJobId() + ": ISJ try " + tries + " for " +
(_isLease ? "LS " : "RI ") + (_isLease ? "LS " : "RI ") +
_key + " to " + peer + _key + " to " + peer +
" direct? " + isDirect +
" reply via client tunnel? " + isClientReplyTunnel); " reply via client tunnel? " + isClientReplyTunnel);
} }
long now = getContext().clock().now(); long now = getContext().clock().now();
_sentTime.put(peer, Long.valueOf(now)); _sentTime.put(peer, Long.valueOf(now));
I2NPMessage outMsg = null; I2NPMessage outMsg = null;
if (_isLease || getContext().getProperty(PROP_ENCRYPT_RI, DEFAULT_ENCRYPT_RI)) { if (isDirect) {
// never wrap
} else if (_isLease || getContext().getProperty(PROP_ENCRYPT_RI, DEFAULT_ENCRYPT_RI)) {
// Full ElG is fairly expensive so only do it for LS lookups // Full ElG is fairly expensive so only do it for LS lookups
// and for RI lookups on fast boxes.
// if we have the ff RI, garlic encrypt it // if we have the ff RI, garlic encrypt it
RouterInfo ri = getContext().netDb().lookupRouterInfoLocally(peer);
if (ri != null) { if (ri != null) {
// request encrypted reply // request encrypted reply
if (DatabaseLookupMessage.supportsEncryptedReplies(ri)) { if (DatabaseLookupMessage.supportsEncryptedReplies(ri)) {
@ -355,7 +379,19 @@ class IterativeSearchJob extends FloodSearchJob {
} }
if (outMsg == null) if (outMsg == null)
outMsg = dlm; outMsg = dlm;
getContext().tunnelDispatcher().dispatchOutbound(outMsg, outTunnel.getSendTunnelId(0), peer); if (isDirect) {
OutNetMessage m = new OutNetMessage(getContext(), outMsg, outMsg.getMessageExpiration(),
OutNetMessage.PRIORITY_MY_NETDB_LOOKUP, ri);
// Should always succeed, we are connected already
//m.setOnFailedReplyJob(onFail);
//m.setOnFailedSendJob(onFail);
//m.setOnReplyJob(onReply);
//m.setReplySelector(selector);
//getContext().messageRegistry().registerPending(m);
getContext().commSystem().processMessage(m);
} else {
getContext().tunnelDispatcher().dispatchOutbound(outMsg, outTunnel.getSendTunnelId(0), peer);
}
// The timeout job is always run (never cancelled) // The timeout job is always run (never cancelled)
// Note that the timeout is much shorter than the message expiration (see above) // Note that the timeout is much shorter than the message expiration (see above)
@ -481,8 +517,8 @@ class IterativeSearchJob extends FloodSearchJob {
_log.info(getJobId() + ": ISJ for " + _key + " failed with " + timeRemaining + " remaining after " + time + _log.info(getJobId() + ": ISJ for " + _key + " failed with " + timeRemaining + " remaining after " + time +
", peers queried: " + tries); ", peers queried: " + tries);
} }
getContext().statManager().addRateData("netDb.failedTime", time, 0); getContext().statManager().addRateData("netDb.failedTime", time);
getContext().statManager().addRateData("netDb.failedRetries", Math.max(0, tries - 1), 0); getContext().statManager().addRateData("netDb.failedRetries", Math.max(0, tries - 1));
for (Job j : _onFailed) { for (Job j : _onFailed) {
getContext().jobQueue().addJob(j); getContext().jobQueue().addJob(j);
} }
@ -515,8 +551,8 @@ class IterativeSearchJob extends FloodSearchJob {
if (_log.shouldLog(Log.INFO)) if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": ISJ for " + _key + " successful after " + time + _log.info(getJobId() + ": ISJ for " + _key + " successful after " + time +
", peers queried: " + tries); ", peers queried: " + tries);
getContext().statManager().addRateData("netDb.successTime", time, 0); getContext().statManager().addRateData("netDb.successTime", time);
getContext().statManager().addRateData("netDb.successRetries", tries - 1, 0); getContext().statManager().addRateData("netDb.successRetries", tries - 1);
for (Job j : _onFind) { for (Job j : _onFind) {
getContext().jobQueue().addJob(j); getContext().jobQueue().addJob(j);
} }