2005-04-05 jrandom

* After a successfull netDb search for a leaseSet, republish it to all of
      the peers we have tried so far who did not give us the key (up to 10),
      rather than the old K closest (which may include peers who had given us
      the key)
    * Don't wait 5 minutes to publish a leaseSet (duh!), and rather than
      republish it every 5 minutes, republish it every 3.  In addition, always
      republish as soon as the leaseSet changes (duh^2).
    * Minor fix for oddball startup race (thanks travis_bickle!)
    * Minor AES update to allow in-place decryption.
This commit is contained in:
jrandom
2005-04-05 16:06:14 +00:00
committed by zzz
parent 400feb3ba7
commit bc626ece2d
13 changed files with 195 additions and 40 deletions

View File

@ -24,12 +24,18 @@ public class NewsFetcher implements Runnable, EepGet.StatusListener {
private boolean _updateAvailable;
private long _lastFetch;
private static NewsFetcher _instance;
public static final NewsFetcher getInstance() { return _instance; }
//public static final synchronized NewsFetcher getInstance() { return _instance; }
public static final synchronized NewsFetcher getInstance(I2PAppContext ctx) {
if (_instance != null)
return _instance;
_instance = new NewsFetcher(ctx);
return _instance;
}
private static final String NEWS_FILE = "docs/news.xml";
private static final String TEMP_NEWS_FILE = "docs/news.xml.temp";
public NewsFetcher(I2PAppContext ctx) {
private NewsFetcher(I2PAppContext ctx) {
_context = ctx;
_log = ctx.logManager().getLog(NewsFetcher.class);
_instance = this;

View File

@ -73,7 +73,8 @@ public class RouterConsoleRunner {
t.printStackTrace();
}
I2PThread t = new I2PThread(new NewsFetcher(I2PAppContext.getGlobalContext()), "NewsFetcher");
NewsFetcher fetcher = NewsFetcher.getInstance(I2PAppContext.getGlobalContext());
I2PThread t = new I2PThread(fetcher, "NewsFetcher");
t.setDaemon(true);
t.start();
}

View File

@ -469,6 +469,6 @@ public class SummaryHelper {
}
public boolean updateAvailable() {
return NewsFetcher.getInstance().updateAvailable();
return NewsFetcher.getInstance(_context).updateAvailable();
}
}

View File

@ -40,6 +40,19 @@ public class AESEngine {
* @param length how much data to encrypt
*/
public void encrypt(byte payload[], int payloadIndex, byte out[], int outIndex, SessionKey sessionKey, byte iv[], int length) {
encrypt(payload, payloadIndex, out, outIndex, sessionKey, iv, 0, length);
}
/** Encrypt the payload with the session key
* @param payload data to be encrypted
* @param payloadIndex index into the payload to start encrypting
* @param out where to store the result
* @param outIndex where in out to start writing
* @param sessionKey private esession key to encrypt to
* @param iv IV for CBC
* @param length how much data to encrypt
*/
public void encrypt(byte payload[], int payloadIndex, byte out[], int outIndex, SessionKey sessionKey, byte iv[], int ivOffset, int length) {
System.arraycopy(payload, payloadIndex, out, outIndex, length);
_log.warn("Warning: AES is disabled");
}
@ -120,6 +133,19 @@ public class AESEngine {
* @param length how much data to decrypt
*/
public void decrypt(byte payload[], int payloadIndex, byte out[], int outIndex, SessionKey sessionKey, byte iv[], int length) {
decrypt(payload, payloadIndex, out, outIndex, sessionKey, iv, 0, length);
}
/** Decrypt the data with the session key
* @param payload data to be decrypted
* @param payloadIndex index into the payload to start decrypting
* @param out where to store the cleartext
* @param outIndex where in out to start writing
* @param sessionKey private session key to decrypt to
* @param iv IV for CBC
* @param length how much data to decrypt
*/
public void decrypt(byte payload[], int payloadIndex, byte out[], int outIndex, SessionKey sessionKey, byte iv[], int ivOffset, int length) {
System.arraycopy(payload, payloadIndex, out, outIndex, length);
_log.warn("Warning: AES is disabled");
}

View File

@ -12,8 +12,10 @@ package net.i2p.crypto;
import java.security.InvalidKeyException;
import net.i2p.I2PAppContext;
import net.i2p.data.ByteArray;
import net.i2p.data.DataHelper;
import net.i2p.data.SessionKey;
import net.i2p.util.ByteCache;
import net.i2p.util.Log;
/**
@ -31,14 +33,20 @@ public class CryptixAESEngine extends AESEngine {
private final static byte FAKE_KEY = 0x2A;
private CryptixAESKeyCache _cache;
private static final ByteCache _prevCache = ByteCache.getInstance(16, 16);
public CryptixAESEngine(I2PAppContext context) {
super(context);
_log = context.logManager().getLog(CryptixAESEngine.class);
_cache = new CryptixAESKeyCache();
}
public void encrypt(byte payload[], int payloadIndex, byte out[], int outIndex, SessionKey sessionKey, byte iv[], int length) {
if ( (payload == null) || (out == null) || (sessionKey == null) || (iv == null) || (iv.length != 16) )
encrypt(payload, payloadIndex, out, outIndex, sessionKey, iv, 0, length);
}
public void encrypt(byte payload[], int payloadIndex, byte out[], int outIndex, SessionKey sessionKey, byte iv[], int ivOffset, int length) {
if ( (payload == null) || (out == null) || (sessionKey == null) || (iv == null) )
throw new NullPointerException("invalid args to aes");
if (payload.length < payloadIndex + length)
throw new IllegalArgumentException("Payload is too short");
@ -57,7 +65,7 @@ public class CryptixAESEngine extends AESEngine {
int numblock = length / 16;
DataHelper.xor(iv, 0, payload, payloadIndex, out, outIndex, 16);
DataHelper.xor(iv, ivOffset, payload, payloadIndex, out, outIndex, 16);
encryptBlock(out, outIndex, sessionKey, out, outIndex);
for (int x = 1; x < numblock; x++) {
DataHelper.xor(out, outIndex + (x-1) * 16, payload, payloadIndex + x * 16, out, outIndex + x * 16, 16);
@ -66,8 +74,10 @@ public class CryptixAESEngine extends AESEngine {
}
public void decrypt(byte payload[], int payloadIndex, byte out[], int outIndex, SessionKey sessionKey, byte iv[], int length) {
if ((iv== null) || (payload == null) || (payload.length <= 0) || (sessionKey == null)
|| (iv.length != 16) )
decrypt(payload, payloadIndex, out, outIndex, sessionKey, iv, 0, length);
}
public void decrypt(byte payload[], int payloadIndex, byte out[], int outIndex, SessionKey sessionKey, byte iv[], int ivOffset, int length) {
if ((iv== null) || (payload == null) || (payload.length <= 0) || (sessionKey == null) )
throw new IllegalArgumentException("bad setup");
else if (out == null)
throw new IllegalArgumentException("out is null");
@ -84,12 +94,32 @@ public class CryptixAESEngine extends AESEngine {
int numblock = length / 16;
if (length % 16 != 0) numblock++;
ByteArray prevA = _prevCache.acquire();
byte prev[] = prevA.getData();
ByteArray curA = _prevCache.acquire();
byte cur[] = curA.getData();
System.arraycopy(iv, ivOffset, prev, 0, 16);
for (int x = 0; x < numblock; x++) {
System.arraycopy(payload, payloadIndex + (x * 16), cur, 0, 16);
decryptBlock(payload, payloadIndex + (x * 16), sessionKey, out, outIndex + (x * 16));
DataHelper.xor(out, outIndex + x * 16, prev, 0, out, outIndex + x * 16, 16);
iv = prev; // just use IV to switch 'em around
prev = cur;
cur = iv;
}
/*
decryptBlock(payload, payloadIndex, sessionKey, out, outIndex);
DataHelper.xor(out, outIndex, iv, 0, out, outIndex, 16);
for (int x = 1; x < numblock; x++) {
decryptBlock(payload, payloadIndex + (x * 16), sessionKey, out, outIndex + (x * 16));
DataHelper.xor(out, outIndex + x * 16, payload, payloadIndex + (x - 1) * 16, out, outIndex + x * 16, 16);
}
*/
_prevCache.release(prevA);
_prevCache.release(curA);
}
public final void encryptBlock(byte payload[], int inIndex, SessionKey sessionKey, byte out[], int outIndex) {

View File

@ -63,11 +63,20 @@ public class HMACSHA256Generator {
* Calculate the HMAC of the data with the given key
*/
public Hash calculate(SessionKey key, byte data[]) {
if ((key == null) || (key.getData() == null) || (data == null))
throw new NullPointerException("Null arguments for HMAC");
return calculate(key, data, 0, data.length);
}
/**
* Calculate the HMAC of the data with the given key
*/
public Hash calculate(SessionKey key, byte data[], int offset, int length) {
if ((key == null) || (key.getData() == null) || (data == null))
throw new NullPointerException("Null arguments for HMAC");
Buffer buf = new Buffer(data.length);
calculate(key, data, buf);
Buffer buf = new Buffer(length);
calculate(key, data, offset, length, buf);
Hash rv = new Hash(buf.rv);
buf.releaseCached();
return rv;
@ -77,10 +86,17 @@ public class HMACSHA256Generator {
* Calculate the HMAC of the data with the given key
*/
public void calculate(SessionKey key, byte data[], Buffer buf) {
calculate(key, data, 0, data.length, buf);
}
/**
* Calculate the HMAC of the data with the given key
*/
public void calculate(SessionKey key, byte data[], int offset, int length, Buffer buf) {
// inner hash
padKey(key.getData(), _IPAD, buf.padded);
System.arraycopy(buf.padded, 0, buf.innerBuf, 0, PAD_LENGTH);
System.arraycopy(data, 0, buf.innerBuf, PAD_LENGTH, data.length);
System.arraycopy(data, offset, buf.innerBuf, PAD_LENGTH, length);
Hash h = _context.sha().calculateHash(buf.innerBuf, buf.innerEntry);

View File

@ -1,4 +1,15 @@
$Id: history.txt,v 1.185 2005/04/01 08:29:27 jrandom Exp $
$Id: history.txt,v 1.186 2005/04/03 07:50:12 jrandom Exp $
2005-04-05 jrandom
* After a successfull netDb search for a leaseSet, republish it to all of
the peers we have tried so far who did not give us the key (up to 10),
rather than the old K closest (which may include peers who had given us
the key)
* Don't wait 5 minutes to publish a leaseSet (duh!), and rather than
republish it every 5 minutes, republish it every 3. In addition, always
republish as soon as the leaseSet changes (duh^2).
* Minor fix for oddball startup race (thanks travis_bickle!)
* Minor AES update to allow in-place decryption.
2005-04-03 jrandom
* EepGet fix for open-ended HTTP fetches (such as the news.xml

View File

@ -15,9 +15,9 @@ import net.i2p.CoreVersion;
*
*/
public class RouterVersion {
public final static String ID = "$Revision: 1.178 $ $Date: 2005/04/01 08:29:26 $";
public final static String ID = "$Revision: 1.179 $ $Date: 2005/04/03 07:50:12 $";
public final static String VERSION = "0.5.0.5";
public final static long BUILD = 3;
public final static long BUILD = 4;
public static void main(String args[]) {
System.out.println("I2P Router version: " + VERSION);
System.out.println("Router ID: " + RouterVersion.ID);

View File

@ -289,8 +289,8 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
}
if (!_finished) {
if (_log.shouldLog(Log.ERROR))
_log.error("Unable to send to " + _toString + " because we couldn't find their leaseSet");
if (_log.shouldLog(Log.WARN))
_log.warn("Unable to send to " + _toString + " because we couldn't find their leaseSet");
}
dieFatal();

View File

@ -65,12 +65,12 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
private PeerSelector _peerSelector;
private RouterContext _context;
/**
* set of Hash objects of leases we're already managing (via RepublishLeaseSetJob).
* Map of Hash to RepublishLeaseSetJob for leases we'realready managing.
* This is added to when we create a new RepublishLeaseSetJob, and the values are
* removed when the job decides to stop running.
*
*/
private Set _publishingLeaseSets;
private Map _publishingLeaseSets;
/**
* Hash of the key currently being searched for, pointing the SearchJob that
@ -126,7 +126,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
_log = _context.logManager().getLog(KademliaNetworkDatabaseFacade.class);
_initialized = false;
_peerSelector = new PeerSelector(_context);
_publishingLeaseSets = new HashSet(8);
_publishingLeaseSets = new HashMap(8);
_lastExploreNew = 0;
_knownRouters = 0;
_activeRequests = new HashMap(8);
@ -440,14 +440,16 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
synchronized (_explicitSendKeys) {
_explicitSendKeys.add(h);
}
Job j = null;
RepublishLeaseSetJob j = null;
synchronized (_publishingLeaseSets) {
boolean isNew = _publishingLeaseSets.add(h);
if (isNew)
j = (RepublishLeaseSetJob)_publishingLeaseSets.get(h);
if (j == null) {
j = new RepublishLeaseSetJob(_context, this, h);
_publishingLeaseSets.put(h, j);
}
}
if (j != null)
_context.jobQueue().addJob(j);
j.getTiming().setStartAfter(_context.clock().now());
_context.jobQueue().addJob(j);
}
void stopPublishing(Hash target) {

View File

@ -22,7 +22,7 @@ import net.i2p.util.Log;
*/
public class RepublishLeaseSetJob extends JobImpl {
private Log _log;
private final static long REPUBLISH_LEASESET_DELAY = 5*60*1000; // 5 mins
private final static long REPUBLISH_LEASESET_DELAY = 3*60*1000; // 3 mins
private final static long REPUBLISH_LEASESET_TIMEOUT = 60*1000;
private Hash _dest;
private KademliaNetworkDatabaseFacade _facade;
@ -32,7 +32,7 @@ public class RepublishLeaseSetJob extends JobImpl {
_log = ctx.logManager().getLog(RepublishLeaseSetJob.class);
_facade = facade;
_dest = destHash;
getTiming().setStartAfter(ctx.clock().now()+REPUBLISH_LEASESET_DELAY);
//getTiming().setStartAfter(ctx.clock().now()+REPUBLISH_LEASESET_DELAY);
}
public String getName() { return "Republish a local leaseSet"; }
public void runJob() {
@ -40,23 +40,29 @@ public class RepublishLeaseSetJob extends JobImpl {
if (getContext().clientManager().isLocal(_dest)) {
LeaseSet ls = _facade.lookupLeaseSetLocally(_dest);
if (ls != null) {
_log.warn("Client " + _dest + " is local, so we're republishing it");
if (_log.shouldLog(Log.INFO))
_log.info("Client " + _dest + " is local, so we're republishing it");
if (!ls.isCurrent(Router.CLOCK_FUDGE_FACTOR)) {
_log.warn("Not publishing a LOCAL lease that isn't current - " + _dest, new Exception("Publish expired LOCAL lease?"));
if (_log.shouldLog(Log.WARN))
_log.warn("Not publishing a LOCAL lease that isn't current - " + _dest, new Exception("Publish expired LOCAL lease?"));
} else {
getContext().jobQueue().addJob(new StoreJob(getContext(), _facade, _dest, ls, new OnSuccess(getContext()), new OnFailure(getContext()), REPUBLISH_LEASESET_TIMEOUT));
}
} else {
_log.warn("Client " + _dest + " is local, but we can't find a valid LeaseSet? perhaps its being rebuilt?");
if (_log.shouldLog(Log.WARN))
_log.warn("Client " + _dest + " is local, but we can't find a valid LeaseSet? perhaps its being rebuilt?");
}
requeue(REPUBLISH_LEASESET_DELAY);
long republishDelay = getContext().random().nextLong(2*REPUBLISH_LEASESET_DELAY);
requeue(republishDelay);
return;
} else {
_log.info("Client " + _dest + " is no longer local, so no more republishing their leaseSet");
if (_log.shouldLog(Log.INFO))
_log.info("Client " + _dest + " is no longer local, so no more republishing their leaseSet");
}
_facade.stopPublishing(_dest);
} catch (RuntimeException re) {
_log.error("Uncaught error republishing the leaseSet", re);
if (_log.shouldLog(Log.ERROR))
_log.error("Uncaught error republishing the leaseSet", re);
_facade.stopPublishing(_dest);
throw re;
}

View File

@ -16,10 +16,12 @@ import java.util.Set;
import net.i2p.data.DataHelper;
import net.i2p.data.DataStructure;
import net.i2p.data.Hash;
import net.i2p.data.LeaseSet;
import net.i2p.data.RouterInfo;
import net.i2p.data.TunnelId;
import net.i2p.data.i2np.DatabaseLookupMessage;
import net.i2p.data.i2np.DatabaseSearchReplyMessage;
import net.i2p.data.i2np.DatabaseStoreMessage;
import net.i2p.router.Job;
import net.i2p.router.JobImpl;
import net.i2p.router.RouterContext;
@ -97,6 +99,7 @@ class SearchJob extends JobImpl {
getContext().statManager().createRateStat("netDb.searchReplyValidated", "How many search replies we get that we are able to validate (fetch)", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.searchReplyNotValidated", "How many search replies we get that we are NOT able to validate (fetch)", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.searchReplyValidationSkipped", "How many search replies we get from unreliable peers that we skip?", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
getContext().statManager().createRateStat("netDb.republishQuantity", "How many peers do we need to send a found leaseSet to?", "NetworkDatabase", new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
if (_log.shouldLog(Log.DEBUG))
_log.debug("Search (" + getClass().getName() + " for " + key.toBase64(), new Exception("Search enqueued by"));
}
@ -586,6 +589,15 @@ class SearchJob extends JobImpl {
resend();
}
/**
* After a successful search for a leaseSet, we resend that leaseSet to all
* of the peers we tried and failed to query. This var bounds how many of
* those peers will get the data, in case a search had to crawl about
* substantially.
*
*/
private static final int MAX_LEASE_RESEND = 10;
/**
* After we get the data we were searching for, rebroadcast it to the peers
* we would query first if we were to search for it again (healing the network).
@ -593,12 +605,54 @@ class SearchJob extends JobImpl {
*/
private void resend() {
DataStructure ds = _facade.lookupLeaseSetLocally(_state.getTarget());
if (ds == null)
if (ds == null) {
ds = _facade.lookupRouterInfoLocally(_state.getTarget());
if (ds != null)
getContext().jobQueue().addJob(new StoreJob(getContext(), _facade, _state.getTarget(),
ds, null, null, RESEND_TIMEOUT,
_state.getSuccessful()));
if (ds != null)
getContext().jobQueue().addJob(new StoreJob(getContext(), _facade, _state.getTarget(),
ds, null, null, RESEND_TIMEOUT,
_state.getSuccessful()));
} else {
Set sendTo = _state.getFailed();
sendTo.addAll(_state.getPending());
int numSent = 0;
for (Iterator iter = sendTo.iterator(); iter.hasNext(); ) {
Hash peer = (Hash)iter.next();
RouterInfo peerInfo = _facade.lookupRouterInfoLocally(peer);
if (peerInfo == null) continue;
if (resend(peerInfo, (LeaseSet)ds))
numSent++;
if (numSent >= MAX_LEASE_RESEND)
break;
}
getContext().statManager().addRateData("netDb.republishQuantity", numSent, numSent);
}
}
/**
* Resend the leaseSet to the peer who had previously failed to
* provide us with the data when we asked them.
*/
private boolean resend(RouterInfo toPeer, LeaseSet ls) {
DatabaseStoreMessage msg = new DatabaseStoreMessage(getContext());
msg.setKey(ls.getDestination().calculateHash());
msg.setLeaseSet(ls);
msg.setMessageExpiration(getContext().clock().now() + RESEND_TIMEOUT);
TunnelInfo outTunnel = getContext().tunnelManager().selectOutboundTunnel();
if (outTunnel != null) {
TunnelId targetTunnelId = null; // not needed
Job onSend = null; // not wanted
if (_log.shouldLog(Log.DEBUG))
_log.debug("resending leaseSet out to " + toPeer.getIdentity().getHash() + " through " + outTunnel + ": " + msg);
getContext().tunnelDispatcher().dispatchOutbound(msg, outTunnel.getSendTunnelId(0), null, toPeer.getIdentity().getHash());
return true;
} else {
if (_log.shouldLog(Log.WARN))
_log.warn("unable to resend a leaseSet - no outbound exploratory tunnels!");
return false;
}
}
/**

View File

@ -267,8 +267,11 @@ public class TunnelPoolManager implements TunnelManagerFacade {
*/
int allocateBuilds(int wanted) {
synchronized (this) {
if (_outstandingBuilds >= _maxOutstandingBuilds)
return 0;
if (_outstandingBuilds >= _maxOutstandingBuilds) {
// ok, as a failsafe, always let one through
_outstandingBuilds++;
return 1;
}
if (_outstandingBuilds + wanted < _maxOutstandingBuilds) {
_outstandingBuilds += wanted;
return wanted;