merge of '52a34b607bb02c92ab1dac1ef16a3ce64462e39e'

and 'e6920b673f432050c4d56a74a2ff2074a2959e6a'
This commit is contained in:
dev
2011-01-11 17:51:46 +00:00
11 changed files with 180 additions and 64 deletions

View File

@ -755,7 +755,7 @@
</target>
<target name="findbugs" depends="build2">
<echo message="Starting findbugs, this will take a while..." />
<exec executable="nice">
<exec executable="nice" failonerror="true">
<arg value="findbugs"/>
<arg value="-textui"/>
<arg value="-projectName"/>

View File

@ -23,13 +23,18 @@ public final class SHA256Generator {
return I2PAppContext.getGlobalContext().sha();
}
/** Calculate the SHA-256 has of the source
/**
* Calculate the SHA-256 hash of the source and cache the result.
* @param source what to hash
* @return hash of the source
*/
public final Hash calculateHash(byte[] source) {
return calculateHash(source, 0, source.length);
}
/**
* Calculate the hash and cache the result.
*/
public final Hash calculateHash(byte[] source, int start, int len) {
Sha256Standalone digest = acquireGnu();
digest.update(source, start, len);
@ -39,6 +44,10 @@ public final class SHA256Generator {
return Hash.create(rv);
}
/**
* Use this if you only need the data, not a Hash object.
* Does not cache.
*/
public final void calculateHash(byte[] source, int start, int len, byte out[], int outOffset) {
Sha256Standalone digest = acquireGnu();
digest.update(source, start, len);

View File

@ -31,6 +31,7 @@ import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.Date;
import java.util.Iterator;
import java.util.List;
@ -1040,25 +1041,45 @@ public class DataHelper {
}
/**
* Sort based on the Hash of the DataStructure
* Sort based on the Hash of the DataStructure.
* Warning - relatively slow.
* Only used by RouterInfo
* Why? Just because it has to be consistent so signing will work?
* WARNING - this sort order must be consistent network-wide, so while the order is arbitrary,
* it cannot be changed.
* Why? Just because it has to be consistent so signing will work.
* How to spec as returning the same type as the param?
* DEPRECATED - Only used by RouterInfo.
*/
public static List<? extends DataStructure> sortStructures(Collection<? extends DataStructure> dataStructures) {
if (dataStructures == null) return Collections.EMPTY_LIST;
ArrayList<DataStructure> rv = new ArrayList(dataStructures.size());
TreeMap<String, DataStructure> tm = new TreeMap();
for (DataStructure struct : dataStructures) {
tm.put(struct.calculateHash().toString(), struct);
}
for (DataStructure struct : tm.values()) {
rv.add(struct);
}
// This used to use Hash.toString(), which is insane, since a change to toString()
// would break the whole network. Now use Hash.toBase64().
// Note that the Base64 sort order is NOT the same as the raw byte sort order,
// despite what you may read elsewhere.
//ArrayList<DataStructure> rv = new ArrayList(dataStructures.size());
//TreeMap<String, DataStructure> tm = new TreeMap();
//for (DataStructure struct : dataStructures) {
// tm.put(struct.calculateHash().toString(), struct);
//}
//for (DataStructure struct : tm.values()) {
// rv.add(struct);
//}
ArrayList<DataStructure> rv = new ArrayList(dataStructures);
Collections.sort(rv, new DataStructureComparator());
return rv;
}
/**
* See sortStructures() comments.
* @since 0.8.3
*/
private static class DataStructureComparator implements Comparator<DataStructure> {
public int compare(DataStructure l, DataStructure r) {
return l.calculateHash().toBase64().compareTo(r.calculateHash().toBase64());
}
}
/**
* NOTE: formatDuration2() recommended in most cases for readability
*/

View File

@ -131,10 +131,13 @@ public class RouterAddress extends DataStructureImpl {
&& DataHelper.eq(_transportStyle, addr.getTransportStyle());
}
/** the style should be sufficient, for speed */
/**
* Just use style and hashCode for speed (expiration is always null).
* If we add multiple addresses of the same style, this may need to be changed.
*/
@Override
public int hashCode() {
return DataHelper.hashCode(_transportStyle);
return DataHelper.hashCode(_transportStyle) ^ _cost;
}
/**

View File

@ -14,6 +14,7 @@ import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.Collection;
import java.util.Collections;
import java.util.Date;
import java.util.HashSet;
import java.util.Iterator;
@ -39,7 +40,8 @@ public class RouterInfo extends DatabaseEntry {
private RouterIdentity _identity;
private volatile long _published;
private final Set<RouterAddress> _addresses;
private final Set<Hash> _peers;
/** may be null to save memory, no longer final */
private Set<Hash> _peers;
private /* FIXME final FIXME */ Properties _options;
private volatile boolean _validated;
private volatile boolean _isValid;
@ -47,6 +49,10 @@ public class RouterInfo extends DatabaseEntry {
private volatile byte _byteified[];
private volatile int _hashCode;
private volatile boolean _hashCodeInitialized;
/** should we cache the byte and string versions _byteified ? **/
private boolean _shouldCache;
/** maybe we should check if we are floodfill? */
private static final boolean CACHE_ALL = Runtime.getRuntime().maxMemory() > 128*1024*1024l;
public static final String PROP_NETWORK_ID = "netId";
public static final String PROP_CAPABILITIES = "caps";
@ -58,7 +64,6 @@ public class RouterInfo extends DatabaseEntry {
public RouterInfo() {
_addresses = new HashSet(2);
_peers = new HashSet(0);
_options = new OrderedProperties();
}
@ -70,6 +75,7 @@ public class RouterInfo extends DatabaseEntry {
setPeers(old.getPeers());
setOptions(old.getOptions());
setSignature(old.getSignature());
// copy over _byteified?
}
public long getDate() {
@ -105,6 +111,11 @@ public class RouterInfo extends DatabaseEntry {
public void setIdentity(RouterIdentity ident) {
_identity = ident;
resetCache();
// We only want to cache the bytes for our own RI, which is frequently written.
// To cache for all RIs doubles the RI memory usage.
// setIdentity() is only called when we are creating our own RI.
// Otherwise, the data is populated with readBytes().
_shouldCache = true;
}
/**
@ -159,6 +170,8 @@ public class RouterInfo extends DatabaseEntry {
* @deprecated Implemented here but unused elsewhere
*/
public Set<Hash> getPeers() {
if (_peers == null)
return Collections.EMPTY_SET;
return _peers;
}
@ -169,9 +182,15 @@ public class RouterInfo extends DatabaseEntry {
* @deprecated Implemented here but unused elsewhere
*/
public void setPeers(Set<Hash> peers) {
if (peers == null || peers.isEmpty()) {
_peers = null;
return;
}
if (_peers == null)
_peers = new HashSet(2);
synchronized (_peers) {
_peers.clear();
if (peers != null) _peers.addAll(peers);
_peers.addAll(peers);
}
resetCache();
}
@ -223,7 +242,6 @@ public class RouterInfo extends DatabaseEntry {
if (_byteified != null) return _byteified;
if (_identity == null) throw new DataFormatException("Router identity isn't set? wtf!");
if (_addresses == null) throw new DataFormatException("Router addressess isn't set? wtf!");
if (_peers == null) throw new DataFormatException("Router peers isn't set? wtf!");
if (_options == null) throw new DataFormatException("Router options isn't set? wtf!");
long before = Clock.getInstance().now();
@ -239,6 +257,9 @@ public class RouterInfo extends DatabaseEntry {
DataHelper.writeLong(out, 1, sz);
Collection<RouterAddress> addresses = _addresses;
if (sz > 1)
// WARNING this sort algorithm cannot be changed, as it must be consistent
// network-wide. The signature is not checked at readin time, but only
// later, and the addresses are stored in a Set, not a List.
addresses = (Collection<RouterAddress>) DataHelper.sortStructures(addresses);
for (RouterAddress addr : addresses) {
addr.writeBytes(out);
@ -248,12 +269,14 @@ public class RouterInfo extends DatabaseEntry {
// answer: they're always empty... they're a placeholder for one particular
// method of trusted links, which isn't implemented in the router
// at the moment, and may not be later.
// fixme to reduce objects - allow _peers == null
int psz = _peers.size();
int psz = _peers == null ? 0 : _peers.size();
DataHelper.writeLong(out, 1, psz);
if (psz > 0) {
Collection<Hash> peers = _peers;
if (psz > 1)
// WARNING this sort algorithm cannot be changed, as it must be consistent
// network-wide. The signature is not checked at readin time, but only
// later, and the hashes are stored in a Set, not a List.
peers = (Collection<Hash>) DataHelper.sortStructures(peers);
for (Hash peerHash : peers) {
peerHash.writeBytes(out);
@ -266,7 +289,8 @@ public class RouterInfo extends DatabaseEntry {
byte data[] = out.toByteArray();
long after = Clock.getInstance().now();
_log.debug("getBytes() took " + (after - before) + "ms");
_byteified = data;
if (CACHE_ALL || _shouldCache)
_byteified = data;
return data;
}
@ -466,10 +490,15 @@ public class RouterInfo extends DatabaseEntry {
_addresses.add(address);
}
int numPeers = (int) DataHelper.readLong(in, 1);
for (int i = 0; i < numPeers; i++) {
Hash peerIdentityHash = new Hash();
peerIdentityHash.readBytes(in);
_peers.add(peerIdentityHash);
if (numPeers == 0) {
_peers = null;
} else {
_peers = new HashSet(numPeers);
for (int i = 0; i < numPeers; i++) {
Hash peerIdentityHash = new Hash();
peerIdentityHash.readBytes(in);
_peers.add(peerIdentityHash);
}
}
_options = DataHelper.readProperties(in);
_signature = new Signature();
@ -504,7 +533,7 @@ public class RouterInfo extends DatabaseEntry {
&& _published == info.getPublished()
&& DataHelper.eq(_addresses, info.getAddresses())
&& DataHelper.eq(_options, info.getOptions())
&& DataHelper.eq(_peers, info.getPeers());
&& DataHelper.eq(getPeers(), info.getPeers());
}
@Override
@ -530,7 +559,7 @@ public class RouterInfo extends DatabaseEntry {
RouterAddress addr = (RouterAddress) iter.next();
buf.append("\n\t\tAddress: ").append(addr);
}
Set peers = _peers; // getPeers()
Set peers = getPeers();
buf.append("\n\tPeers: #: ").append(peers.size());
for (Iterator iter = peers.iterator(); iter.hasNext();) {
Hash hash = (Hash) iter.next();

View File

@ -1,3 +1,12 @@
2011-01-09 zzz
* DataHelper: Speed up and annotate sortStructures()
* Data Structures: More caching improvements, don't cache where we shouldn't
* NetDB: Don't rescan netDb directory unless changed,
to reduce Hash cache thrash (backport from test4)
* RouterInfo:
- Don't cache byteified data by default, to save ~1.5 MB
- Don't create empty peers Set, to save ~100KB
2011-01-07 zzz
* Data Structures: More caching
* i2psnark: Improve request tracking to reduce memory usage

View File

@ -20,6 +20,7 @@ import net.i2p.data.DataHelper;
import net.i2p.data.DataStructureImpl;
import net.i2p.data.Hash;
import net.i2p.util.Log;
import net.i2p.util.SimpleByteCache;
/**
* Defines the base message implementation.
@ -72,6 +73,7 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM
* Read the header, then read the rest into buffer, then call
* readMessage in the implemented message type
*
*<pre>
* Specifically:
* 1 byte type (if caller didn't read already, as specified by the type param
* 4 byte ID
@ -79,9 +81,11 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM
* 2 byte size
* 1 byte checksum
* size bytes of payload (read by readMessage() in implementation)
*</pre>
*
* @param type the message type or -1 if we should read it here
* @param buffer temp buffer to use
* @return total length of the message
*/
public int readBytes(InputStream in, int type, byte buffer[]) throws I2NPMessageException, IOException {
try {
@ -110,9 +114,11 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM
cur += numRead;
}
Hash calc = _context.sha().calculateHash(buffer, 0, size);
byte[] calc = SimpleByteCache.acquire(Hash.HASH_LENGTH);
_context.sha().calculateHash(buffer, 0, size, calc, 0);
//boolean eq = calc.equals(h);
boolean eq = DataHelper.eq(checksum, 0, calc.getData(), 0, CHECKSUM_LENGTH);
boolean eq = DataHelper.eq(checksum, 0, calc, 0, CHECKSUM_LENGTH);
SimpleByteCache.release(calc);
if (!eq)
throw new I2NPMessageException("Hash does not match for " + getClass().getName());
@ -123,11 +129,29 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM
//long time = _context.clock().now() - start;
//if (time > 50)
// _context.statManager().addRateData("i2np.readTime", time, time);
return size + Hash.HASH_LENGTH + 1 + 4 + DataHelper.DATE_LENGTH;
return CHECKSUM_LENGTH + 1 + 2 + 4 + DataHelper.DATE_LENGTH + size;
} catch (DataFormatException dfe) {
throw new I2NPMessageException("Error reading the message header", dfe);
}
}
/**
* Read the header, then read the rest into buffer, then call
* readMessage in the implemented message type
*
*<pre>
* Specifically:
* 1 byte type (if caller didn't read already, as specified by the type param
* 4 byte ID
* 8 byte expiration
* 2 byte size
* 1 byte checksum
* size bytes of payload (read by readMessage() in implementation)
*</pre>
*
* @param type the message type or -1 if we should read it here
* @return total length of the message
*/
public int readBytes(byte data[], int type, int offset) throws I2NPMessageException, IOException {
int cur = offset;
if (type < 0) {
@ -153,9 +177,10 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM
+ " cur=" + cur
+ " wanted=" + size + "]: " + getClass().getName());
Hash calc = _context.sha().calculateHash(data, cur, size);
//boolean eq = calc.equals(h);
boolean eq = DataHelper.eq(hdata, 0, calc.getData(), 0, CHECKSUM_LENGTH);
byte[] calc = SimpleByteCache.acquire(Hash.HASH_LENGTH);
_context.sha().calculateHash(data, cur, size, calc, 0);
boolean eq = DataHelper.eq(hdata, 0, calc, 0, CHECKSUM_LENGTH);
SimpleByteCache.release(calc);
if (!eq)
throw new I2NPMessageException("Hash does not match for " + getClass().getName());
@ -231,7 +256,8 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM
try {
int writtenLen = writeMessageBody(buffer, prefixLen);
int payloadLen = writtenLen - prefixLen;
Hash h = _context.sha().calculateHash(buffer, prefixLen, payloadLen);
byte[] h = SimpleByteCache.acquire(Hash.HASH_LENGTH);
_context.sha().calculateHash(buffer, prefixLen, payloadLen, h, 0);
int off = 0;
DataHelper.toLong(buffer, off, 1, getType());
@ -242,7 +268,8 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM
off += DataHelper.DATE_LENGTH;
DataHelper.toLong(buffer, off, 2, payloadLen);
off += 2;
System.arraycopy(h.getData(), 0, buffer, off, CHECKSUM_LENGTH);
System.arraycopy(h, 0, buffer, off, CHECKSUM_LENGTH);
SimpleByteCache.release(h);
//long time = _context.clock().now() - start;
//if (time > 50)

View File

@ -18,7 +18,7 @@ public class RouterVersion {
/** deprecated */
public final static String ID = "Monotone";
public final static String VERSION = CoreVersion.VERSION;
public final static long BUILD = 9;
public final static long BUILD = 10;
/** for example "-test" */
public final static String EXTRA = "";

View File

@ -247,7 +247,11 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
_enforceNetId = DEFAULT_ENFORCE_NETID;
_kb = new KBucketSet(_context, ri.getIdentity().getHash());
_ds = new PersistentDataStore(_context, dbDir, this);
try {
_ds = new PersistentDataStore(_context, dbDir, this);
} catch (IOException ioe) {
throw new RuntimeException("Unable to initialize netdb storage", ioe);
}
//_ds = new TransientDataStore();
// _exploreKeys = new HashSet(64);
_dbDir = dbDir;

View File

@ -39,19 +39,22 @@ import net.i2p.util.SecureFileOutputStream;
*
*/
class PersistentDataStore extends TransientDataStore {
private Log _log;
private String _dbDir;
private KademliaNetworkDatabaseFacade _facade;
private Writer _writer;
private ReadJob _readJob;
private final Log _log;
private final File _dbDir;
private final KademliaNetworkDatabaseFacade _facade;
private final Writer _writer;
private final ReadJob _readJob;
private boolean _initialized;
private final static int READ_DELAY = 60*1000;
public PersistentDataStore(RouterContext ctx, String dbDir, KademliaNetworkDatabaseFacade facade) {
/**
* @param dbDir relative path
*/
public PersistentDataStore(RouterContext ctx, String dbDir, KademliaNetworkDatabaseFacade facade) throws IOException {
super(ctx);
_log = ctx.logManager().getLog(PersistentDataStore.class);
_dbDir = dbDir;
_dbDir = getDbDir(dbDir);
_facade = facade;
_readJob = new ReadJob();
_context.jobQueue().addJob(_readJob);
@ -79,7 +82,6 @@ class PersistentDataStore extends TransientDataStore {
@Override
public void restart() {
super.restart();
_dbDir = _facade.getDbDir();
}
@Override
@ -160,8 +162,7 @@ class PersistentDataStore extends TransientDataStore {
if (_log.shouldLog(Log.INFO))
_log.info("Removing key " + _key /* , getAddedBy() */);
try {
File dbDir = getDbDir();
removeFile(_key, dbDir);
removeFile(_key, _dbDir);
} catch (IOException ioe) {
_log.error("Error removing key " + _key, ioe);
}
@ -236,7 +237,10 @@ class PersistentDataStore extends TransientDataStore {
if (key != null) {
if (data != null) {
write(key, data);
// synch with the reader job
synchronized (_dbDir) {
write(key, data);
}
data = null;
}
key = null;
@ -278,7 +282,6 @@ class PersistentDataStore extends TransientDataStore {
File dbFile = null;
try {
String filename = null;
File dbDir = getDbDir();
if (data instanceof LeaseSet)
filename = getLeaseSetName(key);
@ -287,7 +290,7 @@ class PersistentDataStore extends TransientDataStore {
else
throw new IOException("We don't know how to write objects of type " + data.getClass().getName());
dbFile = new File(dbDir, filename);
dbFile = new File(_dbDir, filename);
long dataPublishDate = getPublishDate(data);
if (dbFile.lastModified() < dataPublishDate) {
// our filesystem is out of date, lets replace it
@ -326,14 +329,26 @@ class PersistentDataStore extends TransientDataStore {
/** This is only for manual reseeding? Why bother every 60 sec??? */
private class ReadJob extends JobImpl {
private boolean _alreadyWarned;
private long _lastModified;
public ReadJob() {
super(PersistentDataStore.this._context);
_alreadyWarned = false;
}
public String getName() { return "DB Read Job"; }
public void runJob() {
_log.info("Rereading new files");
readFiles();
// check directory mod time to save a lot of object churn in scanning all the file names
long lastMod = _dbDir.lastModified();
if (lastMod > _lastModified) {
_lastModified = lastMod;
_log.info("Rereading new files");
// synch with the writer job
synchronized (_dbDir) {
readFiles();
}
}
requeue(READ_DELAY);
}
@ -343,9 +358,8 @@ class PersistentDataStore extends TransientDataStore {
private void readFiles() {
int routerCount = 0;
try {
File dbDir = getDbDir();
File routerInfoFiles[] = dbDir.listFiles(RouterInfoFilter.getInstance());
File routerInfoFiles[] = _dbDir.listFiles(RouterInfoFilter.getInstance());
if (routerInfoFiles != null) {
routerCount += routerInfoFiles.length;
if (routerInfoFiles.length > 5)
@ -360,9 +374,6 @@ class PersistentDataStore extends TransientDataStore {
}
}
}
} catch (IOException ioe) {
_log.error("Error reading files in the db dir", ioe);
}
if (!_alreadyWarned) {
ReseedChecker.checkReseed(_context, routerCount);
@ -442,8 +453,8 @@ class PersistentDataStore extends TransientDataStore {
}
private File getDbDir() throws IOException {
File f = new SecureDirectory(_context.getRouterDir(), _dbDir);
private File getDbDir(String dbDir) throws IOException {
File f = new SecureDirectory(_context.getRouterDir(), dbDir);
if (!f.exists()) {
boolean created = f.mkdirs();
if (!created)

View File

@ -15,6 +15,7 @@ import net.i2p.data.i2np.I2NPMessageHandler;
import net.i2p.router.RouterContext;
import net.i2p.util.ByteCache;
import net.i2p.util.Log;
import net.i2p.util.SimpleByteCache;
import net.i2p.util.SimpleTimer;
/**
@ -241,19 +242,21 @@ public class FragmentHandler {
if (_log.shouldLog(Log.DEBUG))
_log.debug("endpoint IV: " + Base64.encode(preV, validLength - HopProcessor.IV_LENGTH, HopProcessor.IV_LENGTH));
Hash v = _context.sha().calculateHash(preV, 0, validLength);
byte[] v = SimpleByteCache.acquire(Hash.HASH_LENGTH);
_context.sha().calculateHash(preV, 0, validLength, v, 0);
_validateCache.release(ba);
boolean eq = DataHelper.eq(v.getData(), 0, preprocessed, offset + HopProcessor.IV_LENGTH, 4);
boolean eq = DataHelper.eq(v, 0, preprocessed, offset + HopProcessor.IV_LENGTH, 4);
if (!eq) {
if (_log.shouldLog(Log.WARN)) {
_log.warn("Corrupt tunnel message - verification fails: " + Base64.encode(preprocessed, offset+HopProcessor.IV_LENGTH, 4)
+ " != " + Base64.encode(v.getData(), 0, 4));
+ " != " + Base64.encode(v, 0, 4));
_log.warn("No matching endpoint: # pad bytes: " + (paddingEnd-(HopProcessor.IV_LENGTH+4)-1)
+ " offset=" + offset + " length=" + length + " paddingEnd=" + paddingEnd + ' '
+ Base64.encode(preprocessed, offset, length), new Exception("trace"));
}
}
SimpleByteCache.release(v);
if (eq) {
int excessPadding = paddingEnd - (HopProcessor.IV_LENGTH + 4 + 1);