forked from I2P_Developers/i2p.i2p
NetDB: ECIES router support (proposal 156):
Support sending encrypted lookups and stores to ECIES routers Support requesting AEAD replies to ECIES routers Encrypt RI lookups when using ECIES even on slow machines Switch back to RatchetSKM Don't schedule ack timer for router SKM Reduce getContext() calls GMB null check cleanup MessageWrapper javadoc clarifications Log tweaks
This commit is contained in:
13
history.txt
13
history.txt
@ -1,8 +1,21 @@
|
||||
2020-10-21 zzz
|
||||
* NetDB:
|
||||
- ECIES router support for encrypted lookups and stores (proposal #156)
|
||||
- Reseed after a long downtime
|
||||
* SSU: Increase socket buffer size (ticket #2781)
|
||||
|
||||
2020-10-17 zzz
|
||||
* i2psnark: Remove references to "maggot" links
|
||||
* SSU: Fix calculation of nextSendDelay (ticket #2714)
|
||||
|
||||
2020-10-15 zzz
|
||||
* Crypto: libjbigi for aarch64 (ticket #1840)
|
||||
* i2psnark: Hide BEP 48 padding directory from UI
|
||||
* Router:
|
||||
- More efficient initialization of Noise state
|
||||
- Destroy ratchet HandshakeState after NS failure
|
||||
- Add support for ratchet zero key (proposals #144, #156)
|
||||
* Util: Singleton OrderedProperties comparator
|
||||
|
||||
2020-10-12 zzz
|
||||
* DTG: Enable by default for Linux KDE and LXDE;
|
||||
|
@ -642,13 +642,13 @@ public class RouterContext extends I2PAppContext {
|
||||
protected void initializeSessionKeyManager() {
|
||||
synchronized (_lock3) {
|
||||
if (_sessionKeyManager == null) {
|
||||
TransientSessionKeyManager tskm = new TransientSessionKeyManager(this);
|
||||
PublicKey pk = keyManager().getPublicKey();
|
||||
if (pk != null && pk.getType() == EncType.ECIES_X25519) {
|
||||
// TODO RatchetSKM only after updating MessageWrapper
|
||||
RatchetSKM rskm = new RatchetSKM(this);
|
||||
_sessionKeyManager = new MuxedSKM(tskm, rskm);
|
||||
//_sessionKeyManager = new MuxedSKM(tskm, rskm);
|
||||
_sessionKeyManager = rskm;
|
||||
} else {
|
||||
TransientSessionKeyManager tskm = new TransientSessionKeyManager(this);
|
||||
_sessionKeyManager = tskm;
|
||||
}
|
||||
}
|
||||
|
@ -18,7 +18,7 @@ public class RouterVersion {
|
||||
/** deprecated */
|
||||
public final static String ID = "Monotone";
|
||||
public final static String VERSION = CoreVersion.VERSION;
|
||||
public final static long BUILD = 7;
|
||||
public final static long BUILD = 8;
|
||||
|
||||
/** for example "-test" */
|
||||
public final static String EXTRA = "";
|
||||
|
@ -1278,9 +1278,14 @@ public final class ECIESAEADEngine {
|
||||
* Set a timer for a ratchet-layer reply if the application does not respond.
|
||||
* NS only. CloveSet must include a LS for validation.
|
||||
*
|
||||
* @param skm must have non-null destination
|
||||
* @since 0.9.46
|
||||
*/
|
||||
private void setResponseTimerNS(PublicKey from, List<GarlicClove> cloveSet, RatchetSKM skm) {
|
||||
Destination us = skm.getDestination();
|
||||
// temp for router SKM
|
||||
if (us == null)
|
||||
return;
|
||||
for (GarlicClove clove : cloveSet) {
|
||||
I2NPMessage msg = clove.getData();
|
||||
if (msg.getType() != DatabaseStoreMessage.MESSAGE_TYPE)
|
||||
@ -1301,7 +1306,6 @@ public final class ECIESAEADEngine {
|
||||
Destination d = ls2.getDestination();
|
||||
if (_log.shouldInfo())
|
||||
_log.info("Validated NS sender: " + d.toBase32());
|
||||
Destination us = skm.getDestination();
|
||||
ACKTimer ack = new ACKTimer(_context, us, d);
|
||||
if (skm.registerTimer(from, d, ack)) {
|
||||
ack.schedule(1000);
|
||||
@ -1316,12 +1320,16 @@ public final class ECIESAEADEngine {
|
||||
* Set a timer for a ratchet-layer reply if the application does not respond.
|
||||
* NSR/ES only.
|
||||
*
|
||||
* @param skm must have non-null destination
|
||||
* @since 0.9.47
|
||||
*/
|
||||
private void setResponseTimer(PublicKey from, List<GarlicClove> cloveSet, RatchetSKM skm) {
|
||||
Destination d = skm.getDestination(from);
|
||||
if (d != null) {
|
||||
Destination us = skm.getDestination();
|
||||
// temp for router SKM
|
||||
if (us == null)
|
||||
return;
|
||||
ACKTimer ack = new ACKTimer(_context, us, d);
|
||||
if (skm.registerTimer(from, null, ack)) {
|
||||
ack.schedule(1000);
|
||||
|
@ -29,6 +29,8 @@ import net.i2p.data.i2np.DeliveryInstructions;
|
||||
import net.i2p.data.i2np.GarlicClove;
|
||||
import net.i2p.data.i2np.GarlicMessage;
|
||||
import net.i2p.data.i2np.I2NPMessage;
|
||||
import net.i2p.data.router.RouterIdentity;
|
||||
import net.i2p.data.router.RouterInfo;
|
||||
import net.i2p.router.LeaseSetKeys;
|
||||
import net.i2p.router.RouterContext;
|
||||
import net.i2p.router.crypto.ratchet.MuxedSKM;
|
||||
@ -160,14 +162,16 @@ public class GarlicMessageBuilder {
|
||||
Log log = ctx.logManager().getLog(GarlicMessageBuilder.class);
|
||||
PublicKey key = config.getRecipientPublicKey();
|
||||
if (key == null) {
|
||||
if (config.getRecipient() == null) {
|
||||
RouterInfo ri = config.getRecipient();
|
||||
if (ri == null)
|
||||
throw new IllegalArgumentException("Null recipient specified");
|
||||
} else if (config.getRecipient().getIdentity() == null) {
|
||||
RouterIdentity ident = ri.getIdentity();
|
||||
if (ident == null)
|
||||
throw new IllegalArgumentException("Null recipient.identity specified");
|
||||
} else if (config.getRecipient().getIdentity().getPublicKey() == null) {
|
||||
PublicKey pk = ident.getPublicKey();
|
||||
if (pk == null)
|
||||
throw new IllegalArgumentException("Null recipient.identity.publicKey specified");
|
||||
} else
|
||||
key = config.getRecipient().getIdentity().getPublicKey();
|
||||
key = pk;
|
||||
}
|
||||
if (key.getType() != EncType.ELGAMAL_2048)
|
||||
throw new IllegalArgumentException();
|
||||
|
@ -12,10 +12,12 @@ import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
import net.i2p.crypto.EncType;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.TunnelId;
|
||||
import net.i2p.data.i2np.DatabaseLookupMessage;
|
||||
import net.i2p.data.i2np.I2NPMessage;
|
||||
import net.i2p.data.router.RouterIdentity;
|
||||
import net.i2p.data.router.RouterInfo;
|
||||
import net.i2p.kademlia.KBucketSet;
|
||||
import net.i2p.router.RouterContext;
|
||||
@ -86,7 +88,8 @@ class ExploreJob extends SearchJob {
|
||||
*/
|
||||
@Override
|
||||
protected I2NPMessage buildMessage(TunnelId replyTunnelId, Hash replyGateway, long expiration, RouterInfo peer) {
|
||||
DatabaseLookupMessage msg = new DatabaseLookupMessage(getContext(), true);
|
||||
final RouterContext ctx = getContext();
|
||||
DatabaseLookupMessage msg = new DatabaseLookupMessage(ctx, true);
|
||||
msg.setSearchKey(getState().getTarget());
|
||||
msg.setFrom(replyGateway);
|
||||
// Moved below now that DLM makes a copy
|
||||
@ -105,7 +108,7 @@ class ExploreJob extends SearchJob {
|
||||
}
|
||||
|
||||
KBucketSet<Hash> ks = _facade.getKBuckets();
|
||||
Hash rkey = getContext().routingKeyGenerator().getRoutingKey(getState().getTarget());
|
||||
Hash rkey = ctx.routingKeyGenerator().getRoutingKey(getState().getTarget());
|
||||
// in a few releases, we can (and should) remove this,
|
||||
// as routers will honor the above flag, and we want the table to include
|
||||
// only non-floodfills.
|
||||
@ -135,23 +138,42 @@ class ExploreJob extends SearchJob {
|
||||
msg.setDontIncludePeers(dontIncludePeers);
|
||||
|
||||
// Now encrypt if we can
|
||||
RouterIdentity ident = peer.getIdentity();
|
||||
EncType type = ident.getPublicKey().getType();
|
||||
boolean encryptElG = ctx.getProperty(IterativeSearchJob.PROP_ENCRYPT_RI, IterativeSearchJob.DEFAULT_ENCRYPT_RI);
|
||||
I2NPMessage outMsg;
|
||||
if (replyTunnelId != null &&
|
||||
getContext().getProperty(IterativeSearchJob.PROP_ENCRYPT_RI, IterativeSearchJob.DEFAULT_ENCRYPT_RI)) {
|
||||
((encryptElG && type == EncType.ELGAMAL_2048) || type == EncType.ECIES_X25519)) {
|
||||
EncType ourType = ctx.keyManager().getPublicKey().getType();
|
||||
boolean ratchet1 = ourType.equals(EncType.ECIES_X25519);
|
||||
boolean ratchet2 = DatabaseLookupMessage.supportsRatchetReplies(peer);
|
||||
// request encrypted reply?
|
||||
if (DatabaseLookupMessage.supportsEncryptedReplies(peer)) {
|
||||
if (DatabaseLookupMessage.supportsEncryptedReplies(peer) &&
|
||||
(ratchet2 || !ratchet1)) {
|
||||
boolean supportsRatchet = ratchet1 && ratchet2;
|
||||
MessageWrapper.OneTimeSession sess;
|
||||
sess = MessageWrapper.generateSession(getContext(), MAX_EXPLORE_TIME);
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info(getJobId() + ": Requesting encrypted reply from " + peer.getIdentity().calculateHash() +
|
||||
' ' + sess.key + ' ' + sess.tag);
|
||||
sess = MessageWrapper.generateSession(ctx, ctx.sessionKeyManager(), MAX_EXPLORE_TIME, !supportsRatchet);
|
||||
if (sess != null) {
|
||||
if (sess.tag != null) {
|
||||
if (_log.shouldInfo())
|
||||
_log.info(getJobId() + ": Requesting AES reply from " + ident.calculateHash() + " with: " + sess.key + ' ' + sess.tag);
|
||||
msg.setReplySession(sess.key, sess.tag);
|
||||
} else {
|
||||
if (_log.shouldInfo())
|
||||
_log.info(getJobId() + ": Requesting AEAD reply from " + ident.calculateHash() + " with: " + sess.key + ' ' + sess.rtag);
|
||||
msg.setReplySession(sess.key, sess.rtag);
|
||||
}
|
||||
} else {
|
||||
if (_log.shouldWarn())
|
||||
_log.warn(getJobId() + ": Failed encrypt to " + peer);
|
||||
// client went away, but send it anyway
|
||||
}
|
||||
}
|
||||
// may be null
|
||||
outMsg = MessageWrapper.wrap(getContext(), msg, peer);
|
||||
outMsg = MessageWrapper.wrap(ctx, msg, peer);
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug(getJobId() + ": Encrypted exploratory DLM for " + getState().getTarget() + " to " +
|
||||
peer.getIdentity().calculateHash());
|
||||
ident.calculateHash());
|
||||
} else {
|
||||
outMsg = msg;
|
||||
}
|
||||
|
@ -103,14 +103,15 @@ class FloodfillVerifyStoreJob extends JobImpl {
|
||||
return;
|
||||
}
|
||||
|
||||
final RouterContext ctx = getContext();
|
||||
boolean isInboundExploratory;
|
||||
TunnelInfo replyTunnelInfo;
|
||||
if (_isRouterInfo || getContext().keyRing().get(_key) != null ||
|
||||
if (_isRouterInfo || ctx.keyRing().get(_key) != null ||
|
||||
_type == DatabaseEntry.KEY_TYPE_META_LS2) {
|
||||
replyTunnelInfo = getContext().tunnelManager().selectInboundExploratoryTunnel(_target);
|
||||
replyTunnelInfo = ctx.tunnelManager().selectInboundExploratoryTunnel(_target);
|
||||
isInboundExploratory = true;
|
||||
} else {
|
||||
replyTunnelInfo = getContext().tunnelManager().selectInboundTunnel(_client, _target);
|
||||
replyTunnelInfo = ctx.tunnelManager().selectInboundTunnel(_client, _target);
|
||||
isInboundExploratory = false;
|
||||
}
|
||||
if (replyTunnelInfo == null) {
|
||||
@ -125,11 +126,11 @@ class FloodfillVerifyStoreJob extends JobImpl {
|
||||
// to avoid association by the exploratory tunnel OBEP.
|
||||
// Unless it is an encrypted leaseset.
|
||||
TunnelInfo outTunnel;
|
||||
if (_isRouterInfo || getContext().keyRing().get(_key) != null ||
|
||||
if (_isRouterInfo || ctx.keyRing().get(_key) != null ||
|
||||
_type == DatabaseEntry.KEY_TYPE_META_LS2) {
|
||||
outTunnel = getContext().tunnelManager().selectOutboundExploratoryTunnel(_target);
|
||||
outTunnel = ctx.tunnelManager().selectOutboundExploratoryTunnel(_target);
|
||||
} else {
|
||||
outTunnel = getContext().tunnelManager().selectOutboundTunnel(_client, _target);
|
||||
outTunnel = ctx.tunnelManager().selectOutboundTunnel(_client, _target);
|
||||
}
|
||||
if (outTunnel == null) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
@ -146,23 +147,41 @@ class FloodfillVerifyStoreJob extends JobImpl {
|
||||
_facade.verifyFinished(_key);
|
||||
return;
|
||||
}
|
||||
EncType type = peer.getIdentity().getPublicKey().getType();
|
||||
boolean supportsElGamal = true;
|
||||
boolean supportsRatchet = false;
|
||||
if (DatabaseLookupMessage.supportsEncryptedReplies(peer)) {
|
||||
// register the session with the right SKM
|
||||
MessageWrapper.OneTimeSession sess;
|
||||
if (isInboundExploratory) {
|
||||
sess = MessageWrapper.generateSession(getContext(), VERIFY_TIMEOUT);
|
||||
EncType ourType = ctx.keyManager().getPublicKey().getType();
|
||||
supportsRatchet = ourType == EncType.ECIES_X25519 &&
|
||||
type == EncType.ECIES_X25519;
|
||||
supportsElGamal = ourType == EncType.ELGAMAL_2048 &&
|
||||
type == EncType.ELGAMAL_2048;
|
||||
if (supportsElGamal || supportsRatchet) {
|
||||
sess = MessageWrapper.generateSession(ctx, ctx.sessionKeyManager(), VERIFY_TIMEOUT, !supportsRatchet);
|
||||
} else {
|
||||
LeaseSetKeys lsk = getContext().keyManager().getKeys(_client);
|
||||
// We don't have a compatible way to get a reply,
|
||||
// skip it for now.
|
||||
if (_log.shouldWarn())
|
||||
_log.warn("Skipping store verify for incompatible router " + peer);
|
||||
_facade.verifyFinished(_key);
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
LeaseSetKeys lsk = ctx.keyManager().getKeys(_client);
|
||||
// an ElG router supports ratchet replies
|
||||
supportsRatchet = lsk != null &&
|
||||
lsk.isSupported(EncType.ECIES_X25519) &&
|
||||
DatabaseLookupMessage.supportsRatchetReplies(peer);
|
||||
// but an ECIES router does not supports ElGamal requests
|
||||
supportsElGamal = lsk != null &&
|
||||
lsk.isSupported(EncType.ELGAMAL_2048);
|
||||
lsk.isSupported(EncType.ELGAMAL_2048) &&
|
||||
type == EncType.ELGAMAL_2048;
|
||||
if (supportsElGamal || supportsRatchet) {
|
||||
// garlic encrypt
|
||||
sess = MessageWrapper.generateSession(getContext(), _client, VERIFY_TIMEOUT, !supportsRatchet);
|
||||
sess = MessageWrapper.generateSession(ctx, _client, VERIFY_TIMEOUT, !supportsRatchet);
|
||||
if (sess == null) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("No SKM to reply to");
|
||||
@ -180,11 +199,11 @@ class FloodfillVerifyStoreJob extends JobImpl {
|
||||
}
|
||||
if (sess.tag != null) {
|
||||
if (_log.shouldInfo())
|
||||
_log.info(getJobId() + ": Requesting AES reply from " + peer + ' ' + sess.key + ' ' + sess.tag);
|
||||
_log.info(getJobId() + ": Requesting AES reply from " + _target + " with: " + sess.key + ' ' + sess.tag);
|
||||
lookup.setReplySession(sess.key, sess.tag);
|
||||
} else {
|
||||
if (_log.shouldInfo())
|
||||
_log.info(getJobId() + ": Requesting AEAD reply from " + peer + ' ' + sess.key + ' ' + sess.rtag);
|
||||
_log.info(getJobId() + ": Requesting AEAD reply from " + _target + " with: " + sess.key + ' ' + sess.rtag);
|
||||
lookup.setReplySession(sess.key, sess.rtag);
|
||||
}
|
||||
}
|
||||
@ -195,7 +214,7 @@ class FloodfillVerifyStoreJob extends JobImpl {
|
||||
fromKey = null;
|
||||
else
|
||||
fromKey = _client;
|
||||
_wrappedMessage = MessageWrapper.wrap(getContext(), lookup, fromKey, peer);
|
||||
_wrappedMessage = MessageWrapper.wrap(ctx, lookup, fromKey, peer);
|
||||
if (_wrappedMessage == null) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Fail Garlic encrypting");
|
||||
@ -205,7 +224,8 @@ class FloodfillVerifyStoreJob extends JobImpl {
|
||||
sent = _wrappedMessage.getMessage();
|
||||
} else {
|
||||
// force full ElG for ECIES fromkey
|
||||
sent = MessageWrapper.wrap(getContext(), lookup, peer);
|
||||
// or forces ECIES for ECIES peer
|
||||
sent = MessageWrapper.wrap(ctx, lookup, peer);
|
||||
if (sent == null) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Fail Garlic encrypting");
|
||||
@ -216,12 +236,12 @@ class FloodfillVerifyStoreJob extends JobImpl {
|
||||
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info(getJobId() + ": Starting verify (stored " + _key + " to " + _sentTo + "), asking " + _target);
|
||||
_sendTime = getContext().clock().now();
|
||||
_sendTime = ctx.clock().now();
|
||||
_expiration = _sendTime + VERIFY_TIMEOUT;
|
||||
getContext().messageRegistry().registerPending(new VerifyReplySelector(),
|
||||
ctx.messageRegistry().registerPending(new VerifyReplySelector(),
|
||||
new VerifyReplyJob(getContext()),
|
||||
new VerifyTimeoutJob(getContext()));
|
||||
getContext().tunnelDispatcher().dispatchOutbound(sent, outTunnel.getSendTunnelId(0), _target);
|
||||
ctx.tunnelDispatcher().dispatchOutbound(sent, outTunnel.getSendTunnelId(0), _target);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -313,8 +313,9 @@ public class IterativeSearchJob extends FloodSearchJob {
|
||||
* Send a DLM to the peer
|
||||
*/
|
||||
private void sendQuery(Hash peer) {
|
||||
TunnelManagerFacade tm = getContext().tunnelManager();
|
||||
RouterInfo ri = getContext().netDb().lookupRouterInfoLocally(peer);
|
||||
final RouterContext ctx = getContext();
|
||||
TunnelManagerFacade tm = ctx.tunnelManager();
|
||||
RouterInfo ri = ctx.netDb().lookupRouterInfoLocally(peer);
|
||||
if (ri != null) {
|
||||
// Now that most of the netdb is Ed RIs and EC LSs, don't even bother
|
||||
// querying old floodfills that don't know about those sig types.
|
||||
@ -337,7 +338,7 @@ public class IterativeSearchJob extends FloodSearchJob {
|
||||
outTunnel = tm.selectOutboundTunnel(_fromLocalDest, peer);
|
||||
if (outTunnel == null)
|
||||
outTunnel = tm.selectOutboundExploratoryTunnel(peer);
|
||||
LeaseSetKeys lsk = getContext().keyManager().getKeys(_fromLocalDest);
|
||||
LeaseSetKeys lsk = ctx.keyManager().getKeys(_fromLocalDest);
|
||||
supportsRatchet = lsk != null &&
|
||||
lsk.isSupported(EncType.ECIES_X25519) &&
|
||||
DatabaseLookupMessage.supportsRatchetReplies(ri);
|
||||
@ -357,7 +358,7 @@ public class IterativeSearchJob extends FloodSearchJob {
|
||||
replyTunnel = tm.selectInboundExploratoryTunnel(peer);
|
||||
}
|
||||
isDirect = false;
|
||||
} else if ((!_isLease) && ri != null && getContext().commSystem().isEstablished(peer)) {
|
||||
} else if ((!_isLease) && ri != null && ctx.commSystem().isEstablished(peer)) {
|
||||
// If it's a RI lookup, not from a client, and we're already connected, just ask directly
|
||||
// This also saves the ElG encryption for us and the decryption for the ff
|
||||
// There's no anonymity reason to use an expl. tunnel... the main reason
|
||||
@ -367,13 +368,13 @@ public class IterativeSearchJob extends FloodSearchJob {
|
||||
replyTunnel = null;
|
||||
isClientReplyTunnel = false;
|
||||
isDirect = true;
|
||||
getContext().statManager().addRateData("netDb.RILookupDirect", 1);
|
||||
ctx.statManager().addRateData("netDb.RILookupDirect", 1);
|
||||
} else {
|
||||
outTunnel = tm.selectOutboundExploratoryTunnel(peer);
|
||||
replyTunnel = tm.selectInboundExploratoryTunnel(peer);
|
||||
isClientReplyTunnel = false;
|
||||
isDirect = false;
|
||||
getContext().statManager().addRateData("netDb.RILookupDirect", 0);
|
||||
ctx.statManager().addRateData("netDb.RILookupDirect", 0);
|
||||
}
|
||||
if ((!isDirect) && (replyTunnel == null || outTunnel == null)) {
|
||||
failed();
|
||||
@ -401,14 +402,15 @@ public class IterativeSearchJob extends FloodSearchJob {
|
||||
}
|
||||
}
|
||||
|
||||
DatabaseLookupMessage dlm = new DatabaseLookupMessage(getContext(), true);
|
||||
DatabaseLookupMessage dlm = new DatabaseLookupMessage(ctx, true);
|
||||
if (isDirect) {
|
||||
dlm.setFrom(getContext().routerHash());
|
||||
dlm.setFrom(ctx.routerHash());
|
||||
} else {
|
||||
dlm.setFrom(replyTunnel.getPeer(0));
|
||||
dlm.setReplyTunnel(replyTunnel.getReceiveTunnelId(0));
|
||||
}
|
||||
dlm.setMessageExpiration(getContext().clock().now() + SINGLE_SEARCH_MSG_TIME);
|
||||
long now = ctx.clock().now();
|
||||
dlm.setMessageExpiration(now + SINGLE_SEARCH_MSG_TIME);
|
||||
dlm.setSearchKey(_key);
|
||||
dlm.setSearchType(_isLease ? DatabaseLookupMessage.Type.LS : DatabaseLookupMessage.Type.RI);
|
||||
|
||||
@ -423,15 +425,16 @@ public class IterativeSearchJob extends FloodSearchJob {
|
||||
" direct? " + isDirect +
|
||||
" reply via client tunnel? " + isClientReplyTunnel);
|
||||
}
|
||||
long now = getContext().clock().now();
|
||||
_sentTime.put(peer, Long.valueOf(now));
|
||||
|
||||
EncType type = ri != null ? ri.getIdentity().getPublicKey().getType() : null;
|
||||
boolean encryptElG = ctx.getProperty(PROP_ENCRYPT_RI, DEFAULT_ENCRYPT_RI);
|
||||
I2NPMessage outMsg = null;
|
||||
if (isDirect) {
|
||||
// never wrap
|
||||
} else if (_isLease ||
|
||||
(getContext().getProperty(PROP_ENCRYPT_RI, DEFAULT_ENCRYPT_RI) &&
|
||||
getContext().jobQueue().getMaxLag() < 300)) {
|
||||
(encryptElG && type == EncType.ELGAMAL_2048 && ctx.jobQueue().getMaxLag() < 300) ||
|
||||
type == EncType.ECIES_X25519) {
|
||||
// Full ElG is fairly expensive so only do it for LS lookups
|
||||
// and for RI lookups on fast boxes.
|
||||
// if we have the ff RI, garlic encrypt it
|
||||
@ -439,8 +442,7 @@ public class IterativeSearchJob extends FloodSearchJob {
|
||||
// request encrypted reply
|
||||
// now covered by version check above, which is more recent
|
||||
//if (DatabaseLookupMessage.supportsEncryptedReplies(ri)) {
|
||||
EncType type = ri.getIdentity().getPublicKey().getType();
|
||||
if (type != EncType.ELGAMAL_2048) {
|
||||
if (!LeaseSetKeys.SET_BOTH.contains(type)) {
|
||||
failed(peer, false);
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn(getJobId() + ": Can't do encrypted lookup to " + peer + " with EncType " + type);
|
||||
@ -448,23 +450,38 @@ public class IterativeSearchJob extends FloodSearchJob {
|
||||
}
|
||||
|
||||
MessageWrapper.OneTimeSession sess;
|
||||
if (isClientReplyTunnel)
|
||||
sess = MessageWrapper.generateSession(getContext(), _fromLocalDest, SINGLE_SEARCH_MSG_TIME, !supportsRatchet);
|
||||
else
|
||||
sess = MessageWrapper.generateSession(getContext(), SINGLE_SEARCH_MSG_TIME);
|
||||
if (isClientReplyTunnel) {
|
||||
sess = MessageWrapper.generateSession(ctx, _fromLocalDest, SINGLE_SEARCH_MSG_TIME, !supportsRatchet);
|
||||
} else {
|
||||
EncType ourType = ctx.keyManager().getPublicKey().getType();
|
||||
boolean ratchet1 = ourType.equals(EncType.ECIES_X25519);
|
||||
boolean ratchet2 = DatabaseLookupMessage.supportsRatchetReplies(ri);
|
||||
if (ratchet1 && !ratchet2) {
|
||||
failed(peer, false);
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn(getJobId() + ": Can't do encrypted lookup to " + peer + ", does not support AEAD replies");
|
||||
return;
|
||||
}
|
||||
supportsRatchet = ratchet1 && ratchet2;
|
||||
sess = MessageWrapper.generateSession(ctx, ctx.sessionKeyManager(), SINGLE_SEARCH_MSG_TIME, !supportsRatchet);
|
||||
}
|
||||
if (sess != null) {
|
||||
if (sess.tag != null) {
|
||||
if (_log.shouldInfo())
|
||||
_log.info(getJobId() + ": Requesting AES reply from " + peer + ' ' + sess.key + ' ' + sess.tag);
|
||||
_log.info(getJobId() + ": Requesting AES reply from " + peer + " with: " + sess.key + ' ' + sess.tag);
|
||||
dlm.setReplySession(sess.key, sess.tag);
|
||||
} else {
|
||||
if (_log.shouldInfo())
|
||||
_log.info(getJobId() + ": Requesting AEAD reply from " + peer + ' ' + sess.key + ' ' + sess.rtag);
|
||||
_log.info(getJobId() + ": Requesting AEAD reply from " + peer + " with: " + sess.key + ' ' + sess.rtag);
|
||||
dlm.setReplySession(sess.key, sess.rtag);
|
||||
}
|
||||
} // else client went away, but send it anyway
|
||||
} else {
|
||||
if (_log.shouldWarn())
|
||||
_log.warn(getJobId() + ": Failed encrypt to " + ri);
|
||||
// client went away, but send it anyway
|
||||
}
|
||||
|
||||
outMsg = MessageWrapper.wrap(getContext(), dlm, ri);
|
||||
outMsg = MessageWrapper.wrap(ctx, dlm, ri);
|
||||
// ElG can take a while so do a final check before we send it,
|
||||
// a response may have come in.
|
||||
if (_dead) {
|
||||
@ -479,7 +496,7 @@ public class IterativeSearchJob extends FloodSearchJob {
|
||||
if (outMsg == null)
|
||||
outMsg = dlm;
|
||||
if (isDirect) {
|
||||
OutNetMessage m = new OutNetMessage(getContext(), outMsg, outMsg.getMessageExpiration(),
|
||||
OutNetMessage m = new OutNetMessage(ctx, outMsg, outMsg.getMessageExpiration(),
|
||||
OutNetMessage.PRIORITY_MY_NETDB_LOOKUP, ri);
|
||||
// Should always succeed, we are connected already
|
||||
//m.setOnFailedReplyJob(onFail);
|
||||
@ -487,14 +504,14 @@ public class IterativeSearchJob extends FloodSearchJob {
|
||||
//m.setOnReplyJob(onReply);
|
||||
//m.setReplySelector(selector);
|
||||
//getContext().messageRegistry().registerPending(m);
|
||||
getContext().commSystem().processMessage(m);
|
||||
ctx.commSystem().processMessage(m);
|
||||
} else {
|
||||
getContext().tunnelDispatcher().dispatchOutbound(outMsg, outTunnel.getSendTunnelId(0), peer);
|
||||
ctx.tunnelDispatcher().dispatchOutbound(outMsg, outTunnel.getSendTunnelId(0), peer);
|
||||
}
|
||||
|
||||
// The timeout job is always run (never cancelled)
|
||||
// Note that the timeout is much shorter than the message expiration (see above)
|
||||
Job j = new IterativeTimeoutJob(getContext(), peer, this);
|
||||
Job j = new IterativeTimeoutJob(ctx, peer, this);
|
||||
long expire = Math.min(_expiration, now + _singleSearchTime);
|
||||
j.getTiming().setStartAfter(expire);
|
||||
getContext().jobQueue().addJob(j);
|
||||
|
@ -43,8 +43,11 @@ public class MessageWrapper {
|
||||
* to hide the contents from the OBEP.
|
||||
* Caller must call acked() or fail() on the returned object.
|
||||
*
|
||||
* ELGAMAL ONLY. Both from and to must support ElGamal.
|
||||
*
|
||||
* @param from must be a local client with a session key manager,
|
||||
* or null to use the router's session key manager
|
||||
* or null to use the router's session key manager.
|
||||
* SessionKeyManager MUST support ElGamal.
|
||||
* @param to must be ELGAMAL_2048 EncType
|
||||
* @return null on encrypt failure
|
||||
*/
|
||||
@ -185,9 +188,9 @@ public class MessageWrapper {
|
||||
|
||||
/**
|
||||
* Create a single key and tag, for receiving a single encrypted message,
|
||||
* and register it with the router's session key manager, to expire in two minutes.
|
||||
* The recipient can then send us an AES-encrypted message,
|
||||
* avoiding ElGamal.
|
||||
* and register it with the client's session key manager, to expire in the time specified.
|
||||
* The recipient can then send us an AES- or ChaCha- encrypted message,
|
||||
* avoiding full ElGamal or ECIES.
|
||||
*
|
||||
* @param expiration time from now
|
||||
* @since 0.9.7
|
||||
@ -198,9 +201,9 @@ public class MessageWrapper {
|
||||
|
||||
/**
|
||||
* Create a single key and tag, for receiving a single encrypted message,
|
||||
* and register it with the client's session key manager, to expire in two minutes.
|
||||
* The recipient can then send us an AES-encrypted message,
|
||||
* avoiding ElGamal.
|
||||
* and register it with the client's session key manager, to expire in the time specified.
|
||||
* The recipient can then send us an AES- or ChaCha- encrypted message,
|
||||
* avoiding full ElGamal or ECIES.
|
||||
*
|
||||
* @param expiration time from now
|
||||
* @return null if we can't find the SKM for the localDest
|
||||
@ -216,9 +219,9 @@ public class MessageWrapper {
|
||||
|
||||
/**
|
||||
* Create a single key and tag, for receiving a single encrypted message,
|
||||
* and register it with the given session key manager, to expire in two minutes.
|
||||
* The recipient can then send us an AES-encrypted message,
|
||||
* avoiding ElGamal.
|
||||
* and register it with the client's session key manager, to expire in the time specified.
|
||||
* The recipient can then send us an AES- or ChaCha- encrypted message,
|
||||
* avoiding full ElGamal or ECIES.
|
||||
*
|
||||
* @param expiration time from now
|
||||
* @return non-null
|
||||
|
@ -486,6 +486,8 @@ class SearchJob extends JobImpl {
|
||||
//I2NPMessage msg = buildMessage(expiration);
|
||||
I2NPMessage msg = buildMessage(null, router.getIdentity().getHash(), expiration, router);
|
||||
if (msg == null) {
|
||||
if (_log.shouldWarn())
|
||||
_log.warn("Failed to create DLM to : " + router);
|
||||
getContext().jobQueue().addJob(new FailedJob(getContext(), router));
|
||||
return;
|
||||
}
|
||||
|
@ -20,6 +20,7 @@ import net.i2p.data.DatabaseEntry;
|
||||
import net.i2p.data.DataFormatException;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.LeaseSet;
|
||||
import net.i2p.data.router.RouterIdentity;
|
||||
import net.i2p.data.router.RouterInfo;
|
||||
import net.i2p.data.TunnelId;
|
||||
import net.i2p.data.i2np.DatabaseStoreMessage;
|
||||
@ -459,7 +460,8 @@ abstract class StoreJob extends JobImpl {
|
||||
* @since 0.7.10
|
||||
*/
|
||||
private void sendStoreThroughClient(DatabaseStoreMessage msg, RouterInfo peer, long expiration) {
|
||||
long token = 1 + getContext().random().nextLong(I2NPMessage.MAX_ID_VALUE);
|
||||
final RouterContext ctx = getContext();
|
||||
long token = 1 + ctx.random().nextLong(I2NPMessage.MAX_ID_VALUE);
|
||||
Hash client;
|
||||
if (msg.getEntry().getType() == DatabaseEntry.KEY_TYPE_ENCRYPTED_LS2) {
|
||||
// get the real client hash
|
||||
@ -468,8 +470,9 @@ abstract class StoreJob extends JobImpl {
|
||||
client = msg.getKey();
|
||||
}
|
||||
|
||||
Hash to = peer.getIdentity().getHash();
|
||||
TunnelInfo replyTunnel = getContext().tunnelManager().selectInboundTunnel(client, to);
|
||||
RouterIdentity ident = peer.getIdentity();
|
||||
Hash to = ident.getHash();
|
||||
TunnelInfo replyTunnel = ctx.tunnelManager().selectInboundTunnel(client, to);
|
||||
if (replyTunnel == null) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("No reply inbound tunnels available!");
|
||||
@ -484,13 +487,15 @@ abstract class StoreJob extends JobImpl {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug(getJobId() + ": send(dbStore) w/ token expected " + token);
|
||||
|
||||
TunnelInfo outTunnel = getContext().tunnelManager().selectOutboundTunnel(client, to);
|
||||
TunnelInfo outTunnel = ctx.tunnelManager().selectOutboundTunnel(client, to);
|
||||
if (outTunnel != null) {
|
||||
I2NPMessage sent;
|
||||
LeaseSetKeys lsk = getContext().keyManager().getKeys(client);
|
||||
if (lsk == null || lsk.isSupported(EncType.ELGAMAL_2048)) {
|
||||
LeaseSetKeys lsk = ctx.keyManager().getKeys(client);
|
||||
EncType type = ident.getPublicKey().getType();
|
||||
if (type == EncType.ELGAMAL_2048 &&
|
||||
(lsk == null || lsk.isSupported(EncType.ELGAMAL_2048))) {
|
||||
// garlic encrypt
|
||||
MessageWrapper.WrappedMessage wm = MessageWrapper.wrap(getContext(), msg, client, peer);
|
||||
MessageWrapper.WrappedMessage wm = MessageWrapper.wrap(ctx, msg, client, peer);
|
||||
if (wm == null) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Fail garlic encrypting from: " + client);
|
||||
@ -499,9 +504,10 @@ abstract class StoreJob extends JobImpl {
|
||||
}
|
||||
sent = wm.getMessage();
|
||||
_state.addPending(to, wm);
|
||||
} else if (lsk.isSupported(EncType.ECIES_X25519)) {
|
||||
} else if (type == EncType.ECIES_X25519 ||
|
||||
lsk.isSupported(EncType.ECIES_X25519)) {
|
||||
// force full ElG for ECIES-only
|
||||
sent = MessageWrapper.wrap(getContext(), msg, peer);
|
||||
sent = MessageWrapper.wrap(ctx, msg, peer);
|
||||
if (sent == null) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Fail garlic encrypting from: " + client);
|
||||
@ -515,15 +521,15 @@ abstract class StoreJob extends JobImpl {
|
||||
sent = msg;
|
||||
_state.addPending(to);
|
||||
}
|
||||
SendSuccessJob onReply = new SendSuccessJob(getContext(), peer, outTunnel, sent.getMessageSize());
|
||||
FailedJob onFail = new FailedJob(getContext(), peer, getContext().clock().now());
|
||||
StoreMessageSelector selector = new StoreMessageSelector(getContext(), getJobId(), peer, token, expiration);
|
||||
SendSuccessJob onReply = new SendSuccessJob(ctx, peer, outTunnel, sent.getMessageSize());
|
||||
FailedJob onFail = new FailedJob(ctx, peer, ctx.clock().now());
|
||||
StoreMessageSelector selector = new StoreMessageSelector(ctx, getJobId(), peer, token, expiration);
|
||||
|
||||
if (_log.shouldLog(Log.DEBUG)) {
|
||||
_log.debug(getJobId() + ": sending encrypted store to " + peer.getIdentity().getHash() + " through " + outTunnel + ": " + sent);
|
||||
}
|
||||
getContext().messageRegistry().registerPending(selector, onReply, onFail);
|
||||
getContext().tunnelDispatcher().dispatchOutbound(sent, outTunnel.getSendTunnelId(0), null, to);
|
||||
ctx.messageRegistry().registerPending(selector, onReply, onFail);
|
||||
ctx.tunnelDispatcher().dispatchOutbound(sent, outTunnel.getSendTunnelId(0), null, to);
|
||||
} else {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("No outbound tunnels to send a dbStore out - delaying...");
|
||||
@ -531,9 +537,9 @@ abstract class StoreJob extends JobImpl {
|
||||
// This means we will skip the peer next time, can't be helped for now
|
||||
// without modding StoreState
|
||||
_state.replyTimeout(to);
|
||||
Job waiter = new WaitJob(getContext());
|
||||
waiter.getTiming().setStartAfter(getContext().clock().now() + 3*1000);
|
||||
getContext().jobQueue().addJob(waiter);
|
||||
Job waiter = new WaitJob(ctx);
|
||||
waiter.getTiming().setStartAfter(ctx.clock().now() + 3*1000);
|
||||
ctx.jobQueue().addJob(waiter);
|
||||
//fail();
|
||||
}
|
||||
}
|
||||
@ -549,9 +555,10 @@ abstract class StoreJob extends JobImpl {
|
||||
* @since 0.9.41
|
||||
*/
|
||||
private void sendWrappedStoreThroughExploratory(DatabaseStoreMessage msg, RouterInfo peer, long expiration) {
|
||||
long token = 1 + getContext().random().nextLong(I2NPMessage.MAX_ID_VALUE);
|
||||
final RouterContext ctx = getContext();
|
||||
long token = 1 + ctx.random().nextLong(I2NPMessage.MAX_ID_VALUE);
|
||||
Hash to = peer.getIdentity().getHash();
|
||||
TunnelInfo replyTunnel = getContext().tunnelManager().selectInboundExploratoryTunnel(to);
|
||||
TunnelInfo replyTunnel = ctx.tunnelManager().selectInboundExploratoryTunnel(to);
|
||||
if (replyTunnel == null) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("No inbound expl. tunnels for reply - delaying...");
|
||||
@ -559,9 +566,9 @@ abstract class StoreJob extends JobImpl {
|
||||
// This means we will skip the peer next time, can't be helped for now
|
||||
// without modding StoreState
|
||||
_state.replyTimeout(to);
|
||||
Job waiter = new WaitJob(getContext());
|
||||
waiter.getTiming().setStartAfter(getContext().clock().now() + 3*1000);
|
||||
getContext().jobQueue().addJob(waiter);
|
||||
Job waiter = new WaitJob(ctx);
|
||||
waiter.getTiming().setStartAfter(ctx.clock().now() + 3*1000);
|
||||
ctx.jobQueue().addJob(waiter);
|
||||
return;
|
||||
}
|
||||
TunnelId replyTunnelId = replyTunnel.getReceiveTunnelId(0);
|
||||
@ -572,11 +579,14 @@ abstract class StoreJob extends JobImpl {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug(getJobId() + ": send(dbStore) w/ token expected " + token);
|
||||
|
||||
TunnelInfo outTunnel = getContext().tunnelManager().selectOutboundExploratoryTunnel(to);
|
||||
TunnelInfo outTunnel = ctx.tunnelManager().selectOutboundExploratoryTunnel(to);
|
||||
if (outTunnel != null) {
|
||||
I2NPMessage sent;
|
||||
// garlic encrypt using router SKM
|
||||
MessageWrapper.WrappedMessage wm = MessageWrapper.wrap(getContext(), msg, null, peer);
|
||||
EncType ptype = peer.getIdentity().getPublicKey().getType();
|
||||
EncType mtype = ctx.keyManager().getPublicKey().getType();
|
||||
if (ptype == EncType.ELGAMAL_2048 && mtype == EncType.ELGAMAL_2048) {
|
||||
MessageWrapper.WrappedMessage wm = MessageWrapper.wrap(ctx, msg, null, peer);
|
||||
if (wm == null) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Fail garlic encrypting");
|
||||
@ -585,16 +595,19 @@ abstract class StoreJob extends JobImpl {
|
||||
}
|
||||
sent = wm.getMessage();
|
||||
_state.addPending(to, wm);
|
||||
|
||||
SendSuccessJob onReply = new SendSuccessJob(getContext(), peer, outTunnel, sent.getMessageSize());
|
||||
FailedJob onFail = new FailedJob(getContext(), peer, getContext().clock().now());
|
||||
StoreMessageSelector selector = new StoreMessageSelector(getContext(), getJobId(), peer, token, expiration);
|
||||
} else {
|
||||
sent = MessageWrapper.wrap(ctx, msg, peer);
|
||||
_state.addPending(to);
|
||||
}
|
||||
SendSuccessJob onReply = new SendSuccessJob(ctx, peer, outTunnel, sent.getMessageSize());
|
||||
FailedJob onFail = new FailedJob(ctx, peer, ctx.clock().now());
|
||||
StoreMessageSelector selector = new StoreMessageSelector(ctx, getJobId(), peer, token, expiration);
|
||||
|
||||
if (_log.shouldLog(Log.DEBUG)) {
|
||||
_log.debug(getJobId() + ": sending encrypted store to " + peer.getIdentity().getHash() + " through " + outTunnel + ": " + sent);
|
||||
}
|
||||
getContext().messageRegistry().registerPending(selector, onReply, onFail);
|
||||
getContext().tunnelDispatcher().dispatchOutbound(sent, outTunnel.getSendTunnelId(0), null, to);
|
||||
ctx.messageRegistry().registerPending(selector, onReply, onFail);
|
||||
ctx.tunnelDispatcher().dispatchOutbound(sent, outTunnel.getSendTunnelId(0), null, to);
|
||||
} else {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("No outbound expl. tunnels to send a dbStore out - delaying...");
|
||||
@ -602,9 +615,9 @@ abstract class StoreJob extends JobImpl {
|
||||
// This means we will skip the peer next time, can't be helped for now
|
||||
// without modding StoreState
|
||||
_state.replyTimeout(to);
|
||||
Job waiter = new WaitJob(getContext());
|
||||
waiter.getTiming().setStartAfter(getContext().clock().now() + 3*1000);
|
||||
getContext().jobQueue().addJob(waiter);
|
||||
Job waiter = new WaitJob(ctx);
|
||||
waiter.getTiming().setStartAfter(ctx.clock().now() + 3*1000);
|
||||
ctx.jobQueue().addJob(waiter);
|
||||
}
|
||||
}
|
||||
|
||||
@ -636,10 +649,7 @@ abstract class StoreJob extends JobImpl {
|
||||
RouterIdentity ident = ri.getIdentity();
|
||||
if (ident.getSigningPublicKey().getType() == SigType.DSA_SHA1)
|
||||
return false;
|
||||
// temp until router ratchet SKM implemented
|
||||
if (ident.getPublicKey().getType() != EncType.ELGAMAL_2048)
|
||||
return false;
|
||||
return true;
|
||||
return LeaseSetKeys.SET_BOTH.contains(ident.getPublicKey().getType());
|
||||
}
|
||||
|
||||
/** @since 0.9.38 */
|
||||
@ -771,11 +781,12 @@ abstract class StoreJob extends JobImpl {
|
||||
* Send was totally successful
|
||||
*/
|
||||
protected void succeed() {
|
||||
if (_log.shouldInfo()) {
|
||||
_log.info(getJobId() + ": Succeeded sending key " + _state.getTarget());
|
||||
// logged in subclass
|
||||
//if (_log.shouldInfo()) {
|
||||
// _log.info(getJobId() + ": Succeeded sending key " + _state.getTarget());
|
||||
if (_log.shouldDebug())
|
||||
_log.debug(getJobId() + ": State of successful send: " + _state);
|
||||
}
|
||||
//}
|
||||
if (_onSuccess != null)
|
||||
getContext().jobQueue().addJob(_onSuccess);
|
||||
_state.complete(true);
|
||||
|
Reference in New Issue
Block a user