forked from I2P_Developers/i2p.i2p
2005-04-05 jrandom
* After a successfull netDb search for a leaseSet, republish it to all of the peers we have tried so far who did not give us the key (up to 10), rather than the old K closest (which may include peers who had given us the key) * Don't wait 5 minutes to publish a leaseSet (duh!), and rather than republish it every 5 minutes, republish it every 3. In addition, always republish as soon as the leaseSet changes (duh^2). * Minor fix for oddball startup race (thanks travis_bickle!) * Minor AES update to allow in-place decryption.
This commit is contained in:
@ -24,12 +24,18 @@ public class NewsFetcher implements Runnable, EepGet.StatusListener {
|
|||||||
private boolean _updateAvailable;
|
private boolean _updateAvailable;
|
||||||
private long _lastFetch;
|
private long _lastFetch;
|
||||||
private static NewsFetcher _instance;
|
private static NewsFetcher _instance;
|
||||||
public static final NewsFetcher getInstance() { return _instance; }
|
//public static final synchronized NewsFetcher getInstance() { return _instance; }
|
||||||
|
public static final synchronized NewsFetcher getInstance(I2PAppContext ctx) {
|
||||||
|
if (_instance != null)
|
||||||
|
return _instance;
|
||||||
|
_instance = new NewsFetcher(ctx);
|
||||||
|
return _instance;
|
||||||
|
}
|
||||||
|
|
||||||
private static final String NEWS_FILE = "docs/news.xml";
|
private static final String NEWS_FILE = "docs/news.xml";
|
||||||
private static final String TEMP_NEWS_FILE = "docs/news.xml.temp";
|
private static final String TEMP_NEWS_FILE = "docs/news.xml.temp";
|
||||||
|
|
||||||
public NewsFetcher(I2PAppContext ctx) {
|
private NewsFetcher(I2PAppContext ctx) {
|
||||||
_context = ctx;
|
_context = ctx;
|
||||||
_log = ctx.logManager().getLog(NewsFetcher.class);
|
_log = ctx.logManager().getLog(NewsFetcher.class);
|
||||||
_instance = this;
|
_instance = this;
|
||||||
|
@ -73,7 +73,8 @@ public class RouterConsoleRunner {
|
|||||||
t.printStackTrace();
|
t.printStackTrace();
|
||||||
}
|
}
|
||||||
|
|
||||||
I2PThread t = new I2PThread(new NewsFetcher(I2PAppContext.getGlobalContext()), "NewsFetcher");
|
NewsFetcher fetcher = NewsFetcher.getInstance(I2PAppContext.getGlobalContext());
|
||||||
|
I2PThread t = new I2PThread(fetcher, "NewsFetcher");
|
||||||
t.setDaemon(true);
|
t.setDaemon(true);
|
||||||
t.start();
|
t.start();
|
||||||
}
|
}
|
||||||
|
@ -469,6 +469,6 @@ public class SummaryHelper {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public boolean updateAvailable() {
|
public boolean updateAvailable() {
|
||||||
return NewsFetcher.getInstance().updateAvailable();
|
return NewsFetcher.getInstance(_context).updateAvailable();
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -40,6 +40,19 @@ public class AESEngine {
|
|||||||
* @param length how much data to encrypt
|
* @param length how much data to encrypt
|
||||||
*/
|
*/
|
||||||
public void encrypt(byte payload[], int payloadIndex, byte out[], int outIndex, SessionKey sessionKey, byte iv[], int length) {
|
public void encrypt(byte payload[], int payloadIndex, byte out[], int outIndex, SessionKey sessionKey, byte iv[], int length) {
|
||||||
|
encrypt(payload, payloadIndex, out, outIndex, sessionKey, iv, 0, length);
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Encrypt the payload with the session key
|
||||||
|
* @param payload data to be encrypted
|
||||||
|
* @param payloadIndex index into the payload to start encrypting
|
||||||
|
* @param out where to store the result
|
||||||
|
* @param outIndex where in out to start writing
|
||||||
|
* @param sessionKey private esession key to encrypt to
|
||||||
|
* @param iv IV for CBC
|
||||||
|
* @param length how much data to encrypt
|
||||||
|
*/
|
||||||
|
public void encrypt(byte payload[], int payloadIndex, byte out[], int outIndex, SessionKey sessionKey, byte iv[], int ivOffset, int length) {
|
||||||
System.arraycopy(payload, payloadIndex, out, outIndex, length);
|
System.arraycopy(payload, payloadIndex, out, outIndex, length);
|
||||||
_log.warn("Warning: AES is disabled");
|
_log.warn("Warning: AES is disabled");
|
||||||
}
|
}
|
||||||
@ -120,6 +133,19 @@ public class AESEngine {
|
|||||||
* @param length how much data to decrypt
|
* @param length how much data to decrypt
|
||||||
*/
|
*/
|
||||||
public void decrypt(byte payload[], int payloadIndex, byte out[], int outIndex, SessionKey sessionKey, byte iv[], int length) {
|
public void decrypt(byte payload[], int payloadIndex, byte out[], int outIndex, SessionKey sessionKey, byte iv[], int length) {
|
||||||
|
decrypt(payload, payloadIndex, out, outIndex, sessionKey, iv, 0, length);
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Decrypt the data with the session key
|
||||||
|
* @param payload data to be decrypted
|
||||||
|
* @param payloadIndex index into the payload to start decrypting
|
||||||
|
* @param out where to store the cleartext
|
||||||
|
* @param outIndex where in out to start writing
|
||||||
|
* @param sessionKey private session key to decrypt to
|
||||||
|
* @param iv IV for CBC
|
||||||
|
* @param length how much data to decrypt
|
||||||
|
*/
|
||||||
|
public void decrypt(byte payload[], int payloadIndex, byte out[], int outIndex, SessionKey sessionKey, byte iv[], int ivOffset, int length) {
|
||||||
System.arraycopy(payload, payloadIndex, out, outIndex, length);
|
System.arraycopy(payload, payloadIndex, out, outIndex, length);
|
||||||
_log.warn("Warning: AES is disabled");
|
_log.warn("Warning: AES is disabled");
|
||||||
}
|
}
|
||||||
|
@ -12,8 +12,10 @@ package net.i2p.crypto;
|
|||||||
import java.security.InvalidKeyException;
|
import java.security.InvalidKeyException;
|
||||||
|
|
||||||
import net.i2p.I2PAppContext;
|
import net.i2p.I2PAppContext;
|
||||||
|
import net.i2p.data.ByteArray;
|
||||||
import net.i2p.data.DataHelper;
|
import net.i2p.data.DataHelper;
|
||||||
import net.i2p.data.SessionKey;
|
import net.i2p.data.SessionKey;
|
||||||
|
import net.i2p.util.ByteCache;
|
||||||
import net.i2p.util.Log;
|
import net.i2p.util.Log;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -31,14 +33,20 @@ public class CryptixAESEngine extends AESEngine {
|
|||||||
private final static byte FAKE_KEY = 0x2A;
|
private final static byte FAKE_KEY = 0x2A;
|
||||||
private CryptixAESKeyCache _cache;
|
private CryptixAESKeyCache _cache;
|
||||||
|
|
||||||
|
private static final ByteCache _prevCache = ByteCache.getInstance(16, 16);
|
||||||
|
|
||||||
public CryptixAESEngine(I2PAppContext context) {
|
public CryptixAESEngine(I2PAppContext context) {
|
||||||
super(context);
|
super(context);
|
||||||
_log = context.logManager().getLog(CryptixAESEngine.class);
|
_log = context.logManager().getLog(CryptixAESEngine.class);
|
||||||
_cache = new CryptixAESKeyCache();
|
_cache = new CryptixAESKeyCache();
|
||||||
}
|
}
|
||||||
|
|
||||||
public void encrypt(byte payload[], int payloadIndex, byte out[], int outIndex, SessionKey sessionKey, byte iv[], int length) {
|
public void encrypt(byte payload[], int payloadIndex, byte out[], int outIndex, SessionKey sessionKey, byte iv[], int length) {
|
||||||
if ( (payload == null) || (out == null) || (sessionKey == null) || (iv == null) || (iv.length != 16) )
|
encrypt(payload, payloadIndex, out, outIndex, sessionKey, iv, 0, length);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void encrypt(byte payload[], int payloadIndex, byte out[], int outIndex, SessionKey sessionKey, byte iv[], int ivOffset, int length) {
|
||||||
|
if ( (payload == null) || (out == null) || (sessionKey == null) || (iv == null) )
|
||||||
throw new NullPointerException("invalid args to aes");
|
throw new NullPointerException("invalid args to aes");
|
||||||
if (payload.length < payloadIndex + length)
|
if (payload.length < payloadIndex + length)
|
||||||
throw new IllegalArgumentException("Payload is too short");
|
throw new IllegalArgumentException("Payload is too short");
|
||||||
@ -57,7 +65,7 @@ public class CryptixAESEngine extends AESEngine {
|
|||||||
|
|
||||||
int numblock = length / 16;
|
int numblock = length / 16;
|
||||||
|
|
||||||
DataHelper.xor(iv, 0, payload, payloadIndex, out, outIndex, 16);
|
DataHelper.xor(iv, ivOffset, payload, payloadIndex, out, outIndex, 16);
|
||||||
encryptBlock(out, outIndex, sessionKey, out, outIndex);
|
encryptBlock(out, outIndex, sessionKey, out, outIndex);
|
||||||
for (int x = 1; x < numblock; x++) {
|
for (int x = 1; x < numblock; x++) {
|
||||||
DataHelper.xor(out, outIndex + (x-1) * 16, payload, payloadIndex + x * 16, out, outIndex + x * 16, 16);
|
DataHelper.xor(out, outIndex + (x-1) * 16, payload, payloadIndex + x * 16, out, outIndex + x * 16, 16);
|
||||||
@ -66,8 +74,10 @@ public class CryptixAESEngine extends AESEngine {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public void decrypt(byte payload[], int payloadIndex, byte out[], int outIndex, SessionKey sessionKey, byte iv[], int length) {
|
public void decrypt(byte payload[], int payloadIndex, byte out[], int outIndex, SessionKey sessionKey, byte iv[], int length) {
|
||||||
if ((iv== null) || (payload == null) || (payload.length <= 0) || (sessionKey == null)
|
decrypt(payload, payloadIndex, out, outIndex, sessionKey, iv, 0, length);
|
||||||
|| (iv.length != 16) )
|
}
|
||||||
|
public void decrypt(byte payload[], int payloadIndex, byte out[], int outIndex, SessionKey sessionKey, byte iv[], int ivOffset, int length) {
|
||||||
|
if ((iv== null) || (payload == null) || (payload.length <= 0) || (sessionKey == null) )
|
||||||
throw new IllegalArgumentException("bad setup");
|
throw new IllegalArgumentException("bad setup");
|
||||||
else if (out == null)
|
else if (out == null)
|
||||||
throw new IllegalArgumentException("out is null");
|
throw new IllegalArgumentException("out is null");
|
||||||
@ -84,12 +94,32 @@ public class CryptixAESEngine extends AESEngine {
|
|||||||
int numblock = length / 16;
|
int numblock = length / 16;
|
||||||
if (length % 16 != 0) numblock++;
|
if (length % 16 != 0) numblock++;
|
||||||
|
|
||||||
|
ByteArray prevA = _prevCache.acquire();
|
||||||
|
byte prev[] = prevA.getData();
|
||||||
|
ByteArray curA = _prevCache.acquire();
|
||||||
|
byte cur[] = curA.getData();
|
||||||
|
System.arraycopy(iv, ivOffset, prev, 0, 16);
|
||||||
|
|
||||||
|
for (int x = 0; x < numblock; x++) {
|
||||||
|
System.arraycopy(payload, payloadIndex + (x * 16), cur, 0, 16);
|
||||||
|
decryptBlock(payload, payloadIndex + (x * 16), sessionKey, out, outIndex + (x * 16));
|
||||||
|
DataHelper.xor(out, outIndex + x * 16, prev, 0, out, outIndex + x * 16, 16);
|
||||||
|
iv = prev; // just use IV to switch 'em around
|
||||||
|
prev = cur;
|
||||||
|
cur = iv;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
decryptBlock(payload, payloadIndex, sessionKey, out, outIndex);
|
decryptBlock(payload, payloadIndex, sessionKey, out, outIndex);
|
||||||
DataHelper.xor(out, outIndex, iv, 0, out, outIndex, 16);
|
DataHelper.xor(out, outIndex, iv, 0, out, outIndex, 16);
|
||||||
for (int x = 1; x < numblock; x++) {
|
for (int x = 1; x < numblock; x++) {
|
||||||
decryptBlock(payload, payloadIndex + (x * 16), sessionKey, out, outIndex + (x * 16));
|
decryptBlock(payload, payloadIndex + (x * 16), sessionKey, out, outIndex + (x * 16));
|
||||||
DataHelper.xor(out, outIndex + x * 16, payload, payloadIndex + (x - 1) * 16, out, outIndex + x * 16, 16);
|
DataHelper.xor(out, outIndex + x * 16, payload, payloadIndex + (x - 1) * 16, out, outIndex + x * 16, 16);
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
|
_prevCache.release(prevA);
|
||||||
|
_prevCache.release(curA);
|
||||||
}
|
}
|
||||||
|
|
||||||
public final void encryptBlock(byte payload[], int inIndex, SessionKey sessionKey, byte out[], int outIndex) {
|
public final void encryptBlock(byte payload[], int inIndex, SessionKey sessionKey, byte out[], int outIndex) {
|
||||||
|
@ -63,11 +63,20 @@ public class HMACSHA256Generator {
|
|||||||
* Calculate the HMAC of the data with the given key
|
* Calculate the HMAC of the data with the given key
|
||||||
*/
|
*/
|
||||||
public Hash calculate(SessionKey key, byte data[]) {
|
public Hash calculate(SessionKey key, byte data[]) {
|
||||||
|
if ((key == null) || (key.getData() == null) || (data == null))
|
||||||
|
throw new NullPointerException("Null arguments for HMAC");
|
||||||
|
return calculate(key, data, 0, data.length);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Calculate the HMAC of the data with the given key
|
||||||
|
*/
|
||||||
|
public Hash calculate(SessionKey key, byte data[], int offset, int length) {
|
||||||
if ((key == null) || (key.getData() == null) || (data == null))
|
if ((key == null) || (key.getData() == null) || (data == null))
|
||||||
throw new NullPointerException("Null arguments for HMAC");
|
throw new NullPointerException("Null arguments for HMAC");
|
||||||
|
|
||||||
Buffer buf = new Buffer(data.length);
|
Buffer buf = new Buffer(length);
|
||||||
calculate(key, data, buf);
|
calculate(key, data, offset, length, buf);
|
||||||
Hash rv = new Hash(buf.rv);
|
Hash rv = new Hash(buf.rv);
|
||||||
buf.releaseCached();
|
buf.releaseCached();
|
||||||
return rv;
|
return rv;
|
||||||
@ -77,10 +86,17 @@ public class HMACSHA256Generator {
|
|||||||
* Calculate the HMAC of the data with the given key
|
* Calculate the HMAC of the data with the given key
|
||||||
*/
|
*/
|
||||||
public void calculate(SessionKey key, byte data[], Buffer buf) {
|
public void calculate(SessionKey key, byte data[], Buffer buf) {
|
||||||
|
calculate(key, data, 0, data.length, buf);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Calculate the HMAC of the data with the given key
|
||||||
|
*/
|
||||||
|
public void calculate(SessionKey key, byte data[], int offset, int length, Buffer buf) {
|
||||||
// inner hash
|
// inner hash
|
||||||
padKey(key.getData(), _IPAD, buf.padded);
|
padKey(key.getData(), _IPAD, buf.padded);
|
||||||
System.arraycopy(buf.padded, 0, buf.innerBuf, 0, PAD_LENGTH);
|
System.arraycopy(buf.padded, 0, buf.innerBuf, 0, PAD_LENGTH);
|
||||||
System.arraycopy(data, 0, buf.innerBuf, PAD_LENGTH, data.length);
|
System.arraycopy(data, offset, buf.innerBuf, PAD_LENGTH, length);
|
||||||
|
|
||||||
Hash h = _context.sha().calculateHash(buf.innerBuf, buf.innerEntry);
|
Hash h = _context.sha().calculateHash(buf.innerBuf, buf.innerEntry);
|
||||||
|
|
||||||
|
13
history.txt
13
history.txt
@ -1,4 +1,15 @@
|
|||||||
$Id: history.txt,v 1.185 2005/04/01 08:29:27 jrandom Exp $
|
$Id: history.txt,v 1.186 2005/04/03 07:50:12 jrandom Exp $
|
||||||
|
|
||||||
|
2005-04-05 jrandom
|
||||||
|
* After a successfull netDb search for a leaseSet, republish it to all of
|
||||||
|
the peers we have tried so far who did not give us the key (up to 10),
|
||||||
|
rather than the old K closest (which may include peers who had given us
|
||||||
|
the key)
|
||||||
|
* Don't wait 5 minutes to publish a leaseSet (duh!), and rather than
|
||||||
|
republish it every 5 minutes, republish it every 3. In addition, always
|
||||||
|
republish as soon as the leaseSet changes (duh^2).
|
||||||
|
* Minor fix for oddball startup race (thanks travis_bickle!)
|
||||||
|
* Minor AES update to allow in-place decryption.
|
||||||
|
|
||||||
2005-04-03 jrandom
|
2005-04-03 jrandom
|
||||||
* EepGet fix for open-ended HTTP fetches (such as the news.xml
|
* EepGet fix for open-ended HTTP fetches (such as the news.xml
|
||||||
|
@ -15,9 +15,9 @@ import net.i2p.CoreVersion;
|
|||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
public class RouterVersion {
|
public class RouterVersion {
|
||||||
public final static String ID = "$Revision: 1.178 $ $Date: 2005/04/01 08:29:26 $";
|
public final static String ID = "$Revision: 1.179 $ $Date: 2005/04/03 07:50:12 $";
|
||||||
public final static String VERSION = "0.5.0.5";
|
public final static String VERSION = "0.5.0.5";
|
||||||
public final static long BUILD = 3;
|
public final static long BUILD = 4;
|
||||||
public static void main(String args[]) {
|
public static void main(String args[]) {
|
||||||
System.out.println("I2P Router version: " + VERSION);
|
System.out.println("I2P Router version: " + VERSION);
|
||||||
System.out.println("Router ID: " + RouterVersion.ID);
|
System.out.println("Router ID: " + RouterVersion.ID);
|
||||||
|
@ -289,8 +289,8 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (!_finished) {
|
if (!_finished) {
|
||||||
if (_log.shouldLog(Log.ERROR))
|
if (_log.shouldLog(Log.WARN))
|
||||||
_log.error("Unable to send to " + _toString + " because we couldn't find their leaseSet");
|
_log.warn("Unable to send to " + _toString + " because we couldn't find their leaseSet");
|
||||||
}
|
}
|
||||||
|
|
||||||
dieFatal();
|
dieFatal();
|
||||||
|
@ -65,12 +65,12 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
|||||||
private PeerSelector _peerSelector;
|
private PeerSelector _peerSelector;
|
||||||
private RouterContext _context;
|
private RouterContext _context;
|
||||||
/**
|
/**
|
||||||
* set of Hash objects of leases we're already managing (via RepublishLeaseSetJob).
|
* Map of Hash to RepublishLeaseSetJob for leases we'realready managing.
|
||||||
* This is added to when we create a new RepublishLeaseSetJob, and the values are
|
* This is added to when we create a new RepublishLeaseSetJob, and the values are
|
||||||
* removed when the job decides to stop running.
|
* removed when the job decides to stop running.
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
private Set _publishingLeaseSets;
|
private Map _publishingLeaseSets;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Hash of the key currently being searched for, pointing the SearchJob that
|
* Hash of the key currently being searched for, pointing the SearchJob that
|
||||||
@ -126,7 +126,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
|||||||
_log = _context.logManager().getLog(KademliaNetworkDatabaseFacade.class);
|
_log = _context.logManager().getLog(KademliaNetworkDatabaseFacade.class);
|
||||||
_initialized = false;
|
_initialized = false;
|
||||||
_peerSelector = new PeerSelector(_context);
|
_peerSelector = new PeerSelector(_context);
|
||||||
_publishingLeaseSets = new HashSet(8);
|
_publishingLeaseSets = new HashMap(8);
|
||||||
_lastExploreNew = 0;
|
_lastExploreNew = 0;
|
||||||
_knownRouters = 0;
|
_knownRouters = 0;
|
||||||
_activeRequests = new HashMap(8);
|
_activeRequests = new HashMap(8);
|
||||||
@ -440,14 +440,16 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
|||||||
synchronized (_explicitSendKeys) {
|
synchronized (_explicitSendKeys) {
|
||||||
_explicitSendKeys.add(h);
|
_explicitSendKeys.add(h);
|
||||||
}
|
}
|
||||||
Job j = null;
|
RepublishLeaseSetJob j = null;
|
||||||
synchronized (_publishingLeaseSets) {
|
synchronized (_publishingLeaseSets) {
|
||||||
boolean isNew = _publishingLeaseSets.add(h);
|
j = (RepublishLeaseSetJob)_publishingLeaseSets.get(h);
|
||||||
if (isNew)
|
if (j == null) {
|
||||||
j = new RepublishLeaseSetJob(_context, this, h);
|
j = new RepublishLeaseSetJob(_context, this, h);
|
||||||
|
_publishingLeaseSets.put(h, j);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if (j != null)
|
j.getTiming().setStartAfter(_context.clock().now());
|
||||||
_context.jobQueue().addJob(j);
|
_context.jobQueue().addJob(j);
|
||||||
}
|
}
|
||||||
|
|
||||||
void stopPublishing(Hash target) {
|
void stopPublishing(Hash target) {
|
||||||
|
@ -22,7 +22,7 @@ import net.i2p.util.Log;
|
|||||||
*/
|
*/
|
||||||
public class RepublishLeaseSetJob extends JobImpl {
|
public class RepublishLeaseSetJob extends JobImpl {
|
||||||
private Log _log;
|
private Log _log;
|
||||||
private final static long REPUBLISH_LEASESET_DELAY = 5*60*1000; // 5 mins
|
private final static long REPUBLISH_LEASESET_DELAY = 3*60*1000; // 3 mins
|
||||||
private final static long REPUBLISH_LEASESET_TIMEOUT = 60*1000;
|
private final static long REPUBLISH_LEASESET_TIMEOUT = 60*1000;
|
||||||
private Hash _dest;
|
private Hash _dest;
|
||||||
private KademliaNetworkDatabaseFacade _facade;
|
private KademliaNetworkDatabaseFacade _facade;
|
||||||
@ -32,7 +32,7 @@ public class RepublishLeaseSetJob extends JobImpl {
|
|||||||
_log = ctx.logManager().getLog(RepublishLeaseSetJob.class);
|
_log = ctx.logManager().getLog(RepublishLeaseSetJob.class);
|
||||||
_facade = facade;
|
_facade = facade;
|
||||||
_dest = destHash;
|
_dest = destHash;
|
||||||
getTiming().setStartAfter(ctx.clock().now()+REPUBLISH_LEASESET_DELAY);
|
//getTiming().setStartAfter(ctx.clock().now()+REPUBLISH_LEASESET_DELAY);
|
||||||
}
|
}
|
||||||
public String getName() { return "Republish a local leaseSet"; }
|
public String getName() { return "Republish a local leaseSet"; }
|
||||||
public void runJob() {
|
public void runJob() {
|
||||||
@ -40,23 +40,29 @@ public class RepublishLeaseSetJob extends JobImpl {
|
|||||||
if (getContext().clientManager().isLocal(_dest)) {
|
if (getContext().clientManager().isLocal(_dest)) {
|
||||||
LeaseSet ls = _facade.lookupLeaseSetLocally(_dest);
|
LeaseSet ls = _facade.lookupLeaseSetLocally(_dest);
|
||||||
if (ls != null) {
|
if (ls != null) {
|
||||||
_log.warn("Client " + _dest + " is local, so we're republishing it");
|
if (_log.shouldLog(Log.INFO))
|
||||||
|
_log.info("Client " + _dest + " is local, so we're republishing it");
|
||||||
if (!ls.isCurrent(Router.CLOCK_FUDGE_FACTOR)) {
|
if (!ls.isCurrent(Router.CLOCK_FUDGE_FACTOR)) {
|
||||||
_log.warn("Not publishing a LOCAL lease that isn't current - " + _dest, new Exception("Publish expired LOCAL lease?"));
|
if (_log.shouldLog(Log.WARN))
|
||||||
|
_log.warn("Not publishing a LOCAL lease that isn't current - " + _dest, new Exception("Publish expired LOCAL lease?"));
|
||||||
} else {
|
} else {
|
||||||
getContext().jobQueue().addJob(new StoreJob(getContext(), _facade, _dest, ls, new OnSuccess(getContext()), new OnFailure(getContext()), REPUBLISH_LEASESET_TIMEOUT));
|
getContext().jobQueue().addJob(new StoreJob(getContext(), _facade, _dest, ls, new OnSuccess(getContext()), new OnFailure(getContext()), REPUBLISH_LEASESET_TIMEOUT));
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
_log.warn("Client " + _dest + " is local, but we can't find a valid LeaseSet? perhaps its being rebuilt?");
|
if (_log.shouldLog(Log.WARN))
|
||||||
|
_log.warn("Client " + _dest + " is local, but we can't find a valid LeaseSet? perhaps its being rebuilt?");
|
||||||
}
|
}
|
||||||
requeue(REPUBLISH_LEASESET_DELAY);
|
long republishDelay = getContext().random().nextLong(2*REPUBLISH_LEASESET_DELAY);
|
||||||
|
requeue(republishDelay);
|
||||||
return;
|
return;
|
||||||
} else {
|
} else {
|
||||||
_log.info("Client " + _dest + " is no longer local, so no more republishing their leaseSet");
|
if (_log.shouldLog(Log.INFO))
|
||||||
|
_log.info("Client " + _dest + " is no longer local, so no more republishing their leaseSet");
|
||||||
}
|
}
|
||||||
_facade.stopPublishing(_dest);
|
_facade.stopPublishing(_dest);
|
||||||
} catch (RuntimeException re) {
|
} catch (RuntimeException re) {
|
||||||
_log.error("Uncaught error republishing the leaseSet", re);
|
if (_log.shouldLog(Log.ERROR))
|
||||||
|
_log.error("Uncaught error republishing the leaseSet", re);
|
||||||
_facade.stopPublishing(_dest);
|
_facade.stopPublishing(_dest);
|
||||||
throw re;
|
throw re;
|
||||||
}
|
}
|
||||||
|
@ -16,10 +16,12 @@ import java.util.Set;
|
|||||||
import net.i2p.data.DataHelper;
|
import net.i2p.data.DataHelper;
|
||||||
import net.i2p.data.DataStructure;
|
import net.i2p.data.DataStructure;
|
||||||
import net.i2p.data.Hash;
|
import net.i2p.data.Hash;
|
||||||
|
import net.i2p.data.LeaseSet;
|
||||||
import net.i2p.data.RouterInfo;
|
import net.i2p.data.RouterInfo;
|
||||||
import net.i2p.data.TunnelId;
|
import net.i2p.data.TunnelId;
|
||||||
import net.i2p.data.i2np.DatabaseLookupMessage;
|
import net.i2p.data.i2np.DatabaseLookupMessage;
|
||||||
import net.i2p.data.i2np.DatabaseSearchReplyMessage;
|
import net.i2p.data.i2np.DatabaseSearchReplyMessage;
|
||||||
|
import net.i2p.data.i2np.DatabaseStoreMessage;
|
||||||
import net.i2p.router.Job;
|
import net.i2p.router.Job;
|
||||||
import net.i2p.router.JobImpl;
|
import net.i2p.router.JobImpl;
|
||||||
import net.i2p.router.RouterContext;
|
import net.i2p.router.RouterContext;
|
||||||
@ -97,6 +99,7 @@ class SearchJob extends JobImpl {
|
|||||||
getContext().statManager().createRateStat("netDb.searchReplyValidated", "How many search replies we get that we are able to validate (fetch)", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
getContext().statManager().createRateStat("netDb.searchReplyValidated", "How many search replies we get that we are able to validate (fetch)", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||||
getContext().statManager().createRateStat("netDb.searchReplyNotValidated", "How many search replies we get that we are NOT able to validate (fetch)", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
getContext().statManager().createRateStat("netDb.searchReplyNotValidated", "How many search replies we get that we are NOT able to validate (fetch)", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||||
getContext().statManager().createRateStat("netDb.searchReplyValidationSkipped", "How many search replies we get from unreliable peers that we skip?", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
getContext().statManager().createRateStat("netDb.searchReplyValidationSkipped", "How many search replies we get from unreliable peers that we skip?", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||||
|
getContext().statManager().createRateStat("netDb.republishQuantity", "How many peers do we need to send a found leaseSet to?", "NetworkDatabase", new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||||
if (_log.shouldLog(Log.DEBUG))
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
_log.debug("Search (" + getClass().getName() + " for " + key.toBase64(), new Exception("Search enqueued by"));
|
_log.debug("Search (" + getClass().getName() + " for " + key.toBase64(), new Exception("Search enqueued by"));
|
||||||
}
|
}
|
||||||
@ -586,6 +589,15 @@ class SearchJob extends JobImpl {
|
|||||||
resend();
|
resend();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* After a successful search for a leaseSet, we resend that leaseSet to all
|
||||||
|
* of the peers we tried and failed to query. This var bounds how many of
|
||||||
|
* those peers will get the data, in case a search had to crawl about
|
||||||
|
* substantially.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
private static final int MAX_LEASE_RESEND = 10;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* After we get the data we were searching for, rebroadcast it to the peers
|
* After we get the data we were searching for, rebroadcast it to the peers
|
||||||
* we would query first if we were to search for it again (healing the network).
|
* we would query first if we were to search for it again (healing the network).
|
||||||
@ -593,12 +605,54 @@ class SearchJob extends JobImpl {
|
|||||||
*/
|
*/
|
||||||
private void resend() {
|
private void resend() {
|
||||||
DataStructure ds = _facade.lookupLeaseSetLocally(_state.getTarget());
|
DataStructure ds = _facade.lookupLeaseSetLocally(_state.getTarget());
|
||||||
if (ds == null)
|
if (ds == null) {
|
||||||
ds = _facade.lookupRouterInfoLocally(_state.getTarget());
|
ds = _facade.lookupRouterInfoLocally(_state.getTarget());
|
||||||
if (ds != null)
|
if (ds != null)
|
||||||
getContext().jobQueue().addJob(new StoreJob(getContext(), _facade, _state.getTarget(),
|
getContext().jobQueue().addJob(new StoreJob(getContext(), _facade, _state.getTarget(),
|
||||||
ds, null, null, RESEND_TIMEOUT,
|
ds, null, null, RESEND_TIMEOUT,
|
||||||
_state.getSuccessful()));
|
_state.getSuccessful()));
|
||||||
|
} else {
|
||||||
|
Set sendTo = _state.getFailed();
|
||||||
|
sendTo.addAll(_state.getPending());
|
||||||
|
int numSent = 0;
|
||||||
|
for (Iterator iter = sendTo.iterator(); iter.hasNext(); ) {
|
||||||
|
Hash peer = (Hash)iter.next();
|
||||||
|
RouterInfo peerInfo = _facade.lookupRouterInfoLocally(peer);
|
||||||
|
if (peerInfo == null) continue;
|
||||||
|
if (resend(peerInfo, (LeaseSet)ds))
|
||||||
|
numSent++;
|
||||||
|
if (numSent >= MAX_LEASE_RESEND)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
getContext().statManager().addRateData("netDb.republishQuantity", numSent, numSent);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Resend the leaseSet to the peer who had previously failed to
|
||||||
|
* provide us with the data when we asked them.
|
||||||
|
*/
|
||||||
|
private boolean resend(RouterInfo toPeer, LeaseSet ls) {
|
||||||
|
DatabaseStoreMessage msg = new DatabaseStoreMessage(getContext());
|
||||||
|
msg.setKey(ls.getDestination().calculateHash());
|
||||||
|
msg.setLeaseSet(ls);
|
||||||
|
msg.setMessageExpiration(getContext().clock().now() + RESEND_TIMEOUT);
|
||||||
|
|
||||||
|
TunnelInfo outTunnel = getContext().tunnelManager().selectOutboundTunnel();
|
||||||
|
|
||||||
|
if (outTunnel != null) {
|
||||||
|
TunnelId targetTunnelId = null; // not needed
|
||||||
|
Job onSend = null; // not wanted
|
||||||
|
|
||||||
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
|
_log.debug("resending leaseSet out to " + toPeer.getIdentity().getHash() + " through " + outTunnel + ": " + msg);
|
||||||
|
getContext().tunnelDispatcher().dispatchOutbound(msg, outTunnel.getSendTunnelId(0), null, toPeer.getIdentity().getHash());
|
||||||
|
return true;
|
||||||
|
} else {
|
||||||
|
if (_log.shouldLog(Log.WARN))
|
||||||
|
_log.warn("unable to resend a leaseSet - no outbound exploratory tunnels!");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -267,8 +267,11 @@ public class TunnelPoolManager implements TunnelManagerFacade {
|
|||||||
*/
|
*/
|
||||||
int allocateBuilds(int wanted) {
|
int allocateBuilds(int wanted) {
|
||||||
synchronized (this) {
|
synchronized (this) {
|
||||||
if (_outstandingBuilds >= _maxOutstandingBuilds)
|
if (_outstandingBuilds >= _maxOutstandingBuilds) {
|
||||||
return 0;
|
// ok, as a failsafe, always let one through
|
||||||
|
_outstandingBuilds++;
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
if (_outstandingBuilds + wanted < _maxOutstandingBuilds) {
|
if (_outstandingBuilds + wanted < _maxOutstandingBuilds) {
|
||||||
_outstandingBuilds += wanted;
|
_outstandingBuilds += wanted;
|
||||||
return wanted;
|
return wanted;
|
||||||
|
Reference in New Issue
Block a user