NetDB: Fix tunnel selection for verify of encrypted ls2 store

Fix NPE handling lookup of encrypted ls2
This commit is contained in:
zzz
2019-03-04 19:04:42 +00:00
parent 0b2896516e
commit bfafdd34be
3 changed files with 26 additions and 13 deletions

View File

@ -102,7 +102,7 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
// to avoid anonymity vulnerabilities. // to avoid anonymity vulnerabilities.
// As this is complex, lots of comments follow... // As this is complex, lots of comments follow...
boolean isLocal = getContext().clientManager().isLocal(ls.getDestination()); boolean isLocal = getContext().clientManager().isLocal(ls.getHash());
boolean shouldPublishLocal = isLocal && getContext().clientManager().shouldPublishLeaseSet(_message.getSearchKey()); boolean shouldPublishLocal = isLocal && getContext().clientManager().shouldPublishLeaseSet(_message.getSearchKey());
// Only answer a request for a LeaseSet if it has been published // Only answer a request for a LeaseSet if it has been published

View File

@ -13,6 +13,7 @@ import java.util.Set;
import net.i2p.data.DatabaseEntry; import net.i2p.data.DatabaseEntry;
import net.i2p.data.Hash; import net.i2p.data.Hash;
import net.i2p.data.LeaseSet;
import net.i2p.data.LeaseSet2; import net.i2p.data.LeaseSet2;
import net.i2p.router.Job; import net.i2p.router.Job;
import net.i2p.router.RouterContext; import net.i2p.router.RouterContext;
@ -41,7 +42,8 @@ class FloodfillStoreJob extends StoreJob {
* @param toSkip set of peer hashes of people we dont want to send the data to (e.g. we * @param toSkip set of peer hashes of people we dont want to send the data to (e.g. we
* already know they have it). This can be null. * already know they have it). This can be null.
*/ */
public FloodfillStoreJob(RouterContext context, FloodfillNetworkDatabaseFacade facade, Hash key, DatabaseEntry data, Job onSuccess, Job onFailure, long timeoutMs, Set<Hash> toSkip) { public FloodfillStoreJob(RouterContext context, FloodfillNetworkDatabaseFacade facade, Hash key, DatabaseEntry data,
Job onSuccess, Job onFailure, long timeoutMs, Set<Hash> toSkip) {
super(context, facade, key, data, onSuccess, onFailure, timeoutMs, toSkip); super(context, facade, key, data, onSuccess, onFailure, timeoutMs, toSkip);
_facade = facade; _facade = facade;
} }
@ -60,15 +62,16 @@ class FloodfillStoreJob extends StoreJob {
super.succeed(); super.succeed();
final boolean shouldLog = _log.shouldInfo(); final boolean shouldLog = _log.shouldInfo();
final Hash key = _state.getTarget();
if (_facade.isVerifyInProgress(_state.getTarget())) { if (_facade.isVerifyInProgress(key)) {
if (shouldLog) if (shouldLog)
_log.info("Skipping verify, one already in progress for: " + _state.getTarget()); _log.info("Skipping verify, one already in progress for: " + key);
return; return;
} }
if (getContext().router().gracefulShutdownInProgress()) { if (getContext().router().gracefulShutdownInProgress()) {
if (shouldLog) if (shouldLog)
_log.info("Skipping verify, shutdown in progress for: " + _state.getTarget()); _log.info("Skipping verify, shutdown in progress for: " + key);
return; return;
} }
// Get the time stamp from the data we sent, so the Verify job can meke sure that // Get the time stamp from the data we sent, so the Verify job can meke sure that
@ -95,11 +98,18 @@ class FloodfillStoreJob extends StoreJob {
try { try {
sentTo = _state.getSuccessful().iterator().next(); sentTo = _state.getSuccessful().iterator().next();
} catch (NoSuchElementException nsee) {} } catch (NoSuchElementException nsee) {}
Job fvsj = new FloodfillVerifyStoreJob(getContext(), _state.getTarget(), Hash client;
if (type == DatabaseEntry.KEY_TYPE_ENCRYPTED_LS2) {
// get the real client hash
client = ((LeaseSet)data).getDestination().calculateHash();
} else {
client = key;
}
Job fvsj = new FloodfillVerifyStoreJob(getContext(), key, client,
published, type, published, type,
sentTo, _facade); sentTo, _facade);
if (shouldLog) if (shouldLog)
_log.info(getJobId() + ": Succeeded sending key " + _state.getTarget() + _log.info(getJobId() + ": Succeeded sending key " + key +
", queueing verify job " + fvsj.getJobId()); ", queueing verify job " + fvsj.getJobId());
getContext().jobQueue().addJob(fvsj); getContext().jobQueue().addJob(fvsj);
} }

View File

@ -32,7 +32,7 @@ import net.i2p.util.Log;
*/ */
class FloodfillVerifyStoreJob extends JobImpl { class FloodfillVerifyStoreJob extends JobImpl {
private final Log _log; private final Log _log;
private final Hash _key; private final Hash _key, _client;
private Hash _target; private Hash _target;
private final Hash _sentTo; private final Hash _sentTo;
private final FloodfillNetworkDatabaseFacade _facade; private final FloodfillNetworkDatabaseFacade _facade;
@ -54,14 +54,16 @@ class FloodfillVerifyStoreJob extends JobImpl {
/** /**
* Delay a few seconds, then start the verify * Delay a few seconds, then start the verify
* @param client generally the same as key, unless encrypted LS2
* @param published getDate() for RI or LS1, getPublished() for LS2 * @param published getDate() for RI or LS1, getPublished() for LS2
* @param sentTo who to give the credit or blame to, can be null * @param sentTo who to give the credit or blame to, can be null
*/ */
public FloodfillVerifyStoreJob(RouterContext ctx, Hash key, long published, int type, public FloodfillVerifyStoreJob(RouterContext ctx, Hash key, Hash client, long published, int type,
Hash sentTo, FloodfillNetworkDatabaseFacade facade) { Hash sentTo, FloodfillNetworkDatabaseFacade facade) {
super(ctx); super(ctx);
facade.verifyStarted(key); facade.verifyStarted(key);
_key = key; _key = key;
_client = client;
_published = published; _published = published;
_isRouterInfo = type == DatabaseEntry.KEY_TYPE_ROUTERINFO; _isRouterInfo = type == DatabaseEntry.KEY_TYPE_ROUTERINFO;
_isLS2 = !_isRouterInfo && type != DatabaseEntry.KEY_TYPE_LEASESET; _isLS2 = !_isRouterInfo && type != DatabaseEntry.KEY_TYPE_LEASESET;
@ -105,12 +107,13 @@ class FloodfillVerifyStoreJob extends JobImpl {
replyTunnelInfo = getContext().tunnelManager().selectInboundExploratoryTunnel(_target); replyTunnelInfo = getContext().tunnelManager().selectInboundExploratoryTunnel(_target);
isInboundExploratory = true; isInboundExploratory = true;
} else { } else {
replyTunnelInfo = getContext().tunnelManager().selectInboundTunnel(_key, _target); replyTunnelInfo = getContext().tunnelManager().selectInboundTunnel(_client, _target);
isInboundExploratory = false; isInboundExploratory = false;
} }
if (replyTunnelInfo == null) { if (replyTunnelInfo == null) {
if (_log.shouldLog(Log.WARN)) if (_log.shouldLog(Log.WARN))
_log.warn("No inbound tunnels to get a reply from!"); _log.warn("No inbound tunnels to get a reply from!");
_facade.verifyFinished(_key);
return; return;
} }
DatabaseLookupMessage lookup = buildLookup(replyTunnelInfo); DatabaseLookupMessage lookup = buildLookup(replyTunnelInfo);
@ -122,7 +125,7 @@ class FloodfillVerifyStoreJob extends JobImpl {
if (_isRouterInfo || getContext().keyRing().get(_key) != null) if (_isRouterInfo || getContext().keyRing().get(_key) != null)
outTunnel = getContext().tunnelManager().selectOutboundExploratoryTunnel(_target); outTunnel = getContext().tunnelManager().selectOutboundExploratoryTunnel(_target);
else else
outTunnel = getContext().tunnelManager().selectOutboundTunnel(_key, _target); outTunnel = getContext().tunnelManager().selectOutboundTunnel(_client, _target);
if (outTunnel == null) { if (outTunnel == null) {
if (_log.shouldLog(Log.WARN)) if (_log.shouldLog(Log.WARN))
_log.warn("No outbound tunnels to verify a store"); _log.warn("No outbound tunnels to verify a store");
@ -144,7 +147,7 @@ class FloodfillVerifyStoreJob extends JobImpl {
if (isInboundExploratory) { if (isInboundExploratory) {
sess = MessageWrapper.generateSession(getContext()); sess = MessageWrapper.generateSession(getContext());
} else { } else {
sess = MessageWrapper.generateSession(getContext(), _key); sess = MessageWrapper.generateSession(getContext(), _client);
if (sess == null) { if (sess == null) {
if (_log.shouldLog(Log.WARN)) if (_log.shouldLog(Log.WARN))
_log.warn("No SKM to reply to"); _log.warn("No SKM to reply to");
@ -160,7 +163,7 @@ class FloodfillVerifyStoreJob extends JobImpl {
if (_isRouterInfo) if (_isRouterInfo)
fromKey = null; fromKey = null;
else else
fromKey = _key; fromKey = _client;
_wrappedMessage = MessageWrapper.wrap(getContext(), lookup, fromKey, peer); _wrappedMessage = MessageWrapper.wrap(getContext(), lookup, fromKey, peer);
if (_wrappedMessage == null) { if (_wrappedMessage == null) {
if (_log.shouldLog(Log.WARN)) if (_log.shouldLog(Log.WARN))