2005-10-19 jrandom

* Bugfix for the auto-update code to handle different usage patterns
    * Decreased the addressbook recheck frequency to once every 12 hours
      instead of hourly.
    * Handle dynamically changing the HMAC size (again, unless your nym is
      toad or jrandom, ignore this ;)
    * Cleaned up some synchronization/locking code
This commit is contained in:
jrandom
2005-10-19 05:15:12 +00:00
committed by zzz
parent 2d70103f88
commit df4b998a6a
9 changed files with 135 additions and 108 deletions

View File

@ -143,7 +143,7 @@ public class Daemon {
defaultSettings.put("subscriptions", "subscriptions.txt"); defaultSettings.put("subscriptions", "subscriptions.txt");
defaultSettings.put("etags", "etags"); defaultSettings.put("etags", "etags");
defaultSettings.put("last_modified", "last_modified"); defaultSettings.put("last_modified", "last_modified");
defaultSettings.put("update_delay", "1"); defaultSettings.put("update_delay", "12");
File homeFile = new File(home); File homeFile = new File(home);
if (!homeFile.exists()) { if (!homeFile.exists()) {
@ -188,4 +188,4 @@ public class Daemon {
_instance.notifyAll(); _instance.notifyAll();
} }
} }
} }

View File

@ -31,7 +31,9 @@ public class UpdateHandler {
private static final String SIGNED_UPDATE_FILE = "i2pupdate.sud"; private static final String SIGNED_UPDATE_FILE = "i2pupdate.sud";
public UpdateHandler() {} public UpdateHandler() {
this(ContextHelper.getContext(null));
}
public UpdateHandler(RouterContext ctx) { public UpdateHandler(RouterContext ctx) {
_context = ctx; _context = ctx;
_log = ctx.logManager().getLog(UpdateHandler.class); _log = ctx.logManager().getLog(UpdateHandler.class);

View File

@ -128,10 +128,13 @@ public class HMACSHA256Generator {
if (_available.size() > 0) if (_available.size() > 0)
return (HMac)_available.remove(0); return (HMac)_available.remove(0);
} }
// the HMAC is hardcoded to use SHA256 digest size
// for backwards compatability. next time we have a backwards
// incompatible change, we should update this by removing ", 32"
if (_useMD5) if (_useMD5)
return new HMac(new MD5Digest()); return new HMac(new MD5Digest(), 32);
else else
return new HMac(new SHA256Digest()); return new HMac(new SHA256Digest(), 32);
} }
private void release(HMac mac) { private void release(HMac mac) {
synchronized (_available) { synchronized (_available) {

View File

@ -115,27 +115,29 @@ public class BufferedStatLog implements StatLog {
int writeStart = -1; int writeStart = -1;
int writeEnd = -1; int writeEnd = -1;
while (true) { while (true) {
synchronized (_events) { try {
if (_eventNext > _lastWrite) { synchronized (_events) {
if (_eventNext - _lastWrite < _flushFrequency) if (_eventNext > _lastWrite) {
try { _events.wait(30*1000); } catch (InterruptedException ie) {} if (_eventNext - _lastWrite < _flushFrequency)
} else { _events.wait(30*1000);
if (_events.length - 1 - _lastWrite + _eventNext < _flushFrequency) } else {
try { _events.wait(30*1000); } catch (InterruptedException ie) {} if (_events.length - 1 - _lastWrite + _eventNext < _flushFrequency)
_events.wait(30*1000);
}
writeStart = (_lastWrite + 1) % _events.length;
writeEnd = _eventNext;
_lastWrite = (writeEnd == 0 ? _events.length-1 : writeEnd - 1);
} }
writeStart = (_lastWrite + 1) % _events.length; if (writeStart != writeEnd) {
writeEnd = _eventNext; try {
_lastWrite = (writeEnd == 0 ? _events.length-1 : writeEnd - 1); if (_log.shouldLog(Log.DEBUG))
} _log.debug("writing " + writeStart +"->"+ writeEnd);
if (writeStart != writeEnd) { writeEvents(writeStart, writeEnd);
try { } catch (Exception e) {
if (_log.shouldLog(Log.DEBUG)) _log.error("error writing " + writeStart +"->"+ writeEnd, e);
_log.debug("writing " + writeStart +"->"+ writeEnd); }
writeEvents(writeStart, writeEnd);
} catch (Exception e) {
_log.error("error writing " + writeStart +"->"+ writeEnd, e);
} }
} } catch (InterruptedException ie) {}
} }
} }

View File

@ -58,9 +58,14 @@ implements Mac
public HMac( public HMac(
Digest digest) Digest digest)
{
this(digest, digest.getDigestSize());
}
public HMac(
Digest digest, int sz)
{ {
this.digest = digest; this.digest = digest;
digestSize = digest.getDigestSize(); this.digestSize = sz;
} }
public String getAlgorithmName() public String getAlgorithmName()
@ -141,7 +146,7 @@ implements Mac
byte[] out, byte[] out,
int outOff) int outOff)
{ {
byte[] tmp = acquireTmp(); byte[] tmp = acquireTmp(digestSize);
//byte[] tmp = new byte[digestSize]; //byte[] tmp = new byte[digestSize];
digest.doFinal(tmp, 0); digest.doFinal(tmp, 0);
@ -156,23 +161,27 @@ implements Mac
return len; return len;
} }
private static ArrayList _tmpBuf = new ArrayList(); /**
private static byte[] acquireTmp() { * list of buffers - index 0 is the cache for 32 byte arrays, while index 1 is the cache for 16 byte arrays
*/
private static ArrayList _tmpBuf[] = new ArrayList[] { new ArrayList(), new ArrayList() };
private static byte[] acquireTmp(int sz) {
byte rv[] = null; byte rv[] = null;
synchronized (_tmpBuf) { synchronized (_tmpBuf[sz == 32 ? 0 : 1]) {
if (_tmpBuf.size() > 0) if (_tmpBuf[sz == 32 ? 0 : 1].size() > 0)
rv = (byte[])_tmpBuf.remove(0); rv = (byte[])_tmpBuf[sz == 32 ? 0 : 1].remove(0);
} }
if (rv != null) if (rv != null)
Arrays.fill(rv, (byte)0x0); Arrays.fill(rv, (byte)0x0);
else else
rv = new byte[32]; // hard coded against SHA256 (should be digestSize) rv = new byte[sz];
return rv; return rv;
} }
private static void releaseTmp(byte buf[]) { private static void releaseTmp(byte buf[]) {
synchronized (_tmpBuf) { if (buf == null) return;
if (_tmpBuf.size() < 100) synchronized (_tmpBuf[buf.length == 32 ? 0 : 1]) {
_tmpBuf.add((Object)buf); if (_tmpBuf[buf.length == 32 ? 0 : 1].size() < 100)
_tmpBuf[buf.length == 32 ? 0 : 1].add((Object)buf);
} }
} }

View File

@ -1,4 +1,12 @@
$Id: history.txt,v 1.299 2005/10/17 19:39:46 jrandom Exp $ $Id: history.txt,v 1.300 2005/10/17 22:14:01 dust Exp $
2005-10-19 jrandom
* Bugfix for the auto-update code to handle different usage patterns
* Decreased the addressbook recheck frequency to once every 12 hours
instead of hourly.
* Handle dynamically changing the HMAC size (again, unless your nym is
toad or jrandom, ignore this ;)
* Cleaned up some synchronization/locking code
2005-10-17 dust 2005-10-17 dust
* Exchange the remaining URL with EepGet in Sucker. * Exchange the remaining URL with EepGet in Sucker.

View File

@ -1,3 +1,4 @@
#!/bin/sh #!/bin/sh
export I2P=~i2p/i2p #export I2P=~i2p/i2p
export I2P=.
java -cp $I2P/lib/i2p.jar net.i2p.util.EepGet $* java -cp $I2P/lib/i2p.jar net.i2p.util.EepGet $*

View File

@ -135,6 +135,7 @@ public class JobQueue {
long numReady = 0; long numReady = 0;
boolean alreadyExists = false; boolean alreadyExists = false;
boolean dropped = false;
synchronized (_jobLock) { synchronized (_jobLock) {
if (_readyJobs.contains(job)) if (_readyJobs.contains(job))
alreadyExists = true; alreadyExists = true;
@ -144,34 +145,33 @@ public class JobQueue {
alreadyExists = true; alreadyExists = true;
} }
_context.statManager().addRateData("jobQueue.readyJobs", numReady, 0);
if (shouldDrop(job, numReady)) { if (shouldDrop(job, numReady)) {
if (_log.shouldLog(Log.WARN))
_log.warn("Dropping job due to overload! # ready jobs: "
+ numReady + ": job = " + job);
job.dropped(); job.dropped();
_context.statManager().addRateData("jobQueue.droppedJobs", 1, 1); dropped = true;
_jobLock.notifyAll();
return;
}
if (!alreadyExists) {
if (job.getTiming().getStartAfter() <= _context.clock().now()) {
// don't skew us - its 'start after' its been queued, or later
job.getTiming().setStartAfter(_context.clock().now());
if (job instanceof JobImpl)
((JobImpl)job).madeReady();
_readyJobs.add(job);
_jobLock.notifyAll();
} else {
_timedJobs.add(job);
_jobLock.notifyAll();
}
} else { } else {
if (_log.shouldLog(Log.DEBUG)) if (!alreadyExists) {
_log.debug("Not adding already enqueued job " + job.getName()); if (job.getTiming().getStartAfter() <= _context.clock().now()) {
// don't skew us - its 'start after' its been queued, or later
job.getTiming().setStartAfter(_context.clock().now());
if (job instanceof JobImpl)
((JobImpl)job).madeReady();
_readyJobs.add(job);
} else {
_timedJobs.add(job);
}
}
} }
_jobLock.notifyAll();
} }
_context.statManager().addRateData("jobQueue.readyJobs", numReady, 0);
if (dropped) {
_context.statManager().addRateData("jobQueue.droppedJobs", 1, 1);
if (_log.shouldLog(Log.WARN))
_log.warn("Dropping job due to overload! # ready jobs: "
+ numReady + ": job = " + job);
}
return; return;
} }
@ -329,13 +329,15 @@ public class JobQueue {
*/ */
Job getNext() { Job getNext() {
while (_alive) { while (_alive) {
synchronized (_jobLock) { try {
if (_readyJobs.size() > 0) { synchronized (_jobLock) {
return (Job)_readyJobs.remove(0); if (_readyJobs.size() > 0) {
} else { return (Job)_readyJobs.remove(0);
try { _jobLock.wait(); } catch (InterruptedException ie) {} } else {
_jobLock.wait();
}
} }
} } catch (InterruptedException ie) {}
} }
if (_log.shouldLog(Log.WARN)) if (_log.shouldLog(Log.WARN))
_log.warn("No longer alive, returning null"); _log.warn("No longer alive, returning null");
@ -403,50 +405,50 @@ public class JobQueue {
long now = _context.clock().now(); long now = _context.clock().now();
long timeToWait = -1; long timeToWait = -1;
ArrayList toAdd = null; ArrayList toAdd = null;
synchronized (_jobLock) { try {
for (int i = 0; i < _timedJobs.size(); i++) { synchronized (_jobLock) {
Job j = (Job)_timedJobs.get(i); for (int i = 0; i < _timedJobs.size(); i++) {
// find jobs due to start before now Job j = (Job)_timedJobs.get(i);
long timeLeft = j.getTiming().getStartAfter() - now; // find jobs due to start before now
if (timeLeft <= 0) { long timeLeft = j.getTiming().getStartAfter() - now;
if (j instanceof JobImpl) if (timeLeft <= 0) {
((JobImpl)j).madeReady(); if (j instanceof JobImpl)
((JobImpl)j).madeReady();
if (toAdd == null) toAdd = new ArrayList(4); if (toAdd == null) toAdd = new ArrayList(4);
toAdd.add(j); toAdd.add(j);
_timedJobs.remove(i); _timedJobs.remove(i);
i--; // so the index stays consistent i--; // so the index stays consistent
} else { } else {
if ( (timeToWait <= 0) || (timeLeft < timeToWait) ) if ( (timeToWait <= 0) || (timeLeft < timeToWait) )
timeToWait = timeLeft; timeToWait = timeLeft;
}
} }
}
if (toAdd != null) { if (toAdd != null) {
if (_log.shouldLog(Log.DEBUG)) if (_log.shouldLog(Log.DEBUG))
_log.debug("Not waiting - we have " + toAdd.size() + " newly ready jobs"); _log.debug("Not waiting - we have " + toAdd.size() + " newly ready jobs");
// rather than addAll, which allocs a byte array rv before adding, // rather than addAll, which allocs a byte array rv before adding,
// we iterate, since toAdd is usually going to only be 1 or 2 entries // we iterate, since toAdd is usually going to only be 1 or 2 entries
// and since readyJobs will often have the space, we can avoid the // and since readyJobs will often have the space, we can avoid the
// extra alloc. (no, i'm not just being insane - i'm updating this based // extra alloc. (no, i'm not just being insane - i'm updating this based
// on some profiling data ;) // on some profiling data ;)
for (int i = 0; i < toAdd.size(); i++) for (int i = 0; i < toAdd.size(); i++)
_readyJobs.add(toAdd.get(i)); _readyJobs.add(toAdd.get(i));
_jobLock.notifyAll(); _jobLock.notifyAll();
} else { } else {
if (timeToWait < 0) if (timeToWait < 0)
timeToWait = 30*1000; timeToWait = 30*1000;
else if (timeToWait < 10) else if (timeToWait < 10)
timeToWait = 10; timeToWait = 10;
else if (timeToWait > 10*1000) else if (timeToWait > 10*1000)
timeToWait = 10*1000; timeToWait = 10*1000;
//if (_log.shouldLog(Log.DEBUG)) //if (_log.shouldLog(Log.DEBUG))
// _log.debug("Waiting " + timeToWait + " before rechecking the timed queue"); // _log.debug("Waiting " + timeToWait + " before rechecking the timed queue");
try {
_jobLock.wait(timeToWait); _jobLock.wait(timeToWait);
} catch (InterruptedException ie) {} }
} } // synchronize (_jobLock)
} // synchronize (_jobLock) } catch (InterruptedException ie) {}
} // while (_alive) } // while (_alive)
} catch (Throwable t) { } catch (Throwable t) {
_context.clock().removeUpdateListener(this); _context.clock().removeUpdateListener(this);

View File

@ -15,9 +15,9 @@ import net.i2p.CoreVersion;
* *
*/ */
public class RouterVersion { public class RouterVersion {
public final static String ID = "$Revision: 1.270 $ $Date: 2005/10/14 08:48:05 $"; public final static String ID = "$Revision: 1.271 $ $Date: 2005/10/17 19:39:46 $";
public final static String VERSION = "0.6.1.3"; public final static String VERSION = "0.6.1.3";
public final static long BUILD = 1; public final static long BUILD = 2;
public static void main(String args[]) { public static void main(String args[]) {
System.out.println("I2P Router version: " + VERSION + "-" + BUILD); System.out.println("I2P Router version: " + VERSION + "-" + BUILD);
System.out.println("Router ID: " + RouterVersion.ID); System.out.println("Router ID: " + RouterVersion.ID);