* NetDB, DatabaseeStoreMessage:

- Convert everything from DataStructure to the
        new DatabaseEntry superclass
      - Optimizations made possible by DatabaseEntry
      - Don't rescan netDb directory unless changed
This commit is contained in:
zzz
2011-01-02 14:23:26 +00:00
parent 0eebfbacd7
commit 378490886f
27 changed files with 355 additions and 348 deletions

View File

@ -12,6 +12,7 @@ import java.io.ByteArrayInputStream;
import java.io.IOException; import java.io.IOException;
import net.i2p.I2PAppContext; import net.i2p.I2PAppContext;
import net.i2p.data.DatabaseEntry;
import net.i2p.data.DataFormatException; import net.i2p.data.DataFormatException;
import net.i2p.data.DataHelper; import net.i2p.data.DataHelper;
import net.i2p.data.Hash; import net.i2p.data.Hash;
@ -28,59 +29,39 @@ import net.i2p.data.TunnelId;
public class DatabaseStoreMessage extends I2NPMessageImpl { public class DatabaseStoreMessage extends I2NPMessageImpl {
public final static int MESSAGE_TYPE = 1; public final static int MESSAGE_TYPE = 1;
private Hash _key; private Hash _key;
private int _type; private DatabaseEntry _dbEntry;
private LeaseSet _leaseSet; private byte[] _byteCache;
private RouterInfo _info;
private byte[] _leaseSetCache;
private byte[] _routerInfoCache;
private long _replyToken; private long _replyToken;
private TunnelId _replyTunnel; private TunnelId _replyTunnel;
private Hash _replyGateway; private Hash _replyGateway;
public final static int KEY_TYPE_ROUTERINFO = 0;
public final static int KEY_TYPE_LEASESET = 1;
public DatabaseStoreMessage(I2PAppContext context) { public DatabaseStoreMessage(I2PAppContext context) {
super(context); super(context);
setValueType(-1);
} }
/** /**
* Defines the key in the network database being stored * Defines the key in the network database being stored
* *
*/ */
public Hash getKey() { return _key; } public Hash getKey() {
public void setKey(Hash key) { _key = key; } if (_key != null)
return _key; // receive
/** if (_dbEntry != null)
* Defines the router info value in the network database being stored return _dbEntry.getHash(); // create
* return null;
*/
public RouterInfo getRouterInfo() { return _info; }
public void setRouterInfo(RouterInfo routerInfo) {
_info = routerInfo;
if (_info != null)
setValueType(KEY_TYPE_ROUTERINFO);
} }
/** /**
* Defines the lease set value in the network database being stored * Defines the entry in the network database being stored
*
*/ */
public LeaseSet getLeaseSet() { return _leaseSet; } public DatabaseEntry getEntry() { return _dbEntry; }
public void setLeaseSet(LeaseSet leaseSet) {
_leaseSet = leaseSet;
if (_leaseSet != null)
setValueType(KEY_TYPE_LEASESET);
}
/** /**
* Defines type of key being stored in the network database - * This also sets the key
* either KEY_TYPE_ROUTERINFO or KEY_TYPE_LEASESET
*
*/ */
public int getValueType() { return _type; } public void setEntry(DatabaseEntry entry) {
public void setValueType(int type) { _type = type; } _dbEntry = entry;
}
/** /**
* If a reply is desired, this token specifies the message ID that should * If a reply is desired, this token specifies the message ID that should
@ -90,6 +71,7 @@ public class DatabaseStoreMessage extends I2NPMessageImpl {
* @return positive reply token ID, or 0 if no reply is necessary. * @return positive reply token ID, or 0 if no reply is necessary.
*/ */
public long getReplyToken() { return _replyToken; } public long getReplyToken() { return _replyToken; }
/** /**
* Update the reply token. * Update the reply token.
* *
@ -113,13 +95,10 @@ public class DatabaseStoreMessage extends I2NPMessageImpl {
if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message"); if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message");
int curIndex = offset; int curIndex = offset;
//byte keyData[] = new byte[Hash.HASH_LENGTH];
//System.arraycopy(data, curIndex, keyData, 0, Hash.HASH_LENGTH);
_key = Hash.create(data, curIndex); _key = Hash.create(data, curIndex);
curIndex += Hash.HASH_LENGTH; curIndex += Hash.HASH_LENGTH;
//_key = new Hash(keyData);
_type = (int)DataHelper.fromLong(data, curIndex, 1); type = (int)DataHelper.fromLong(data, curIndex, 1);
curIndex++; curIndex++;
_replyToken = DataHelper.fromLong(data, curIndex, 4); _replyToken = DataHelper.fromLong(data, curIndex, 4);
@ -131,39 +110,38 @@ public class DatabaseStoreMessage extends I2NPMessageImpl {
_replyTunnel = new TunnelId(tunnel); _replyTunnel = new TunnelId(tunnel);
curIndex += 4; curIndex += 4;
//byte gw[] = new byte[Hash.HASH_LENGTH];
//System.arraycopy(data, curIndex, gw, 0, Hash.HASH_LENGTH);
_replyGateway = Hash.create(data, curIndex); _replyGateway = Hash.create(data, curIndex);
curIndex += Hash.HASH_LENGTH; curIndex += Hash.HASH_LENGTH;
//_replyGateway = new Hash(gw);
} else { } else {
_replyTunnel = null; _replyTunnel = null;
_replyGateway = null; _replyGateway = null;
} }
if (_type == KEY_TYPE_LEASESET) { if (type == DatabaseEntry.KEY_TYPE_LEASESET) {
_leaseSet = new LeaseSet(); _dbEntry = new LeaseSet();
try { try {
_leaseSet.readBytes(new ByteArrayInputStream(data, curIndex, data.length-curIndex)); _dbEntry.readBytes(new ByteArrayInputStream(data, curIndex, data.length-curIndex));
} catch (DataFormatException dfe) { } catch (DataFormatException dfe) {
throw new I2NPMessageException("Error reading the leaseSet", dfe); throw new I2NPMessageException("Error reading the leaseSet", dfe);
} }
} else if (_type == KEY_TYPE_ROUTERINFO) { } else if (type == DatabaseEntry.KEY_TYPE_ROUTERINFO) {
_info = new RouterInfo(); _dbEntry = new RouterInfo();
int compressedSize = (int)DataHelper.fromLong(data, curIndex, 2); int compressedSize = (int)DataHelper.fromLong(data, curIndex, 2);
curIndex += 2; curIndex += 2;
try { try {
byte decompressed[] = DataHelper.decompress(data, curIndex, compressedSize); byte decompressed[] = DataHelper.decompress(data, curIndex, compressedSize);
_info.readBytes(new ByteArrayInputStream(decompressed)); _dbEntry.readBytes(new ByteArrayInputStream(decompressed));
} catch (DataFormatException dfe) { } catch (DataFormatException dfe) {
throw new I2NPMessageException("Error reading the routerInfo", dfe); throw new I2NPMessageException("Error reading the routerInfo", dfe);
} catch (IOException ioe) { } catch (IOException ioe) {
throw new I2NPMessageException("Compressed routerInfo was corrupt", ioe); throw new I2NPMessageException("Compressed routerInfo was corrupt", ioe);
} }
} else { } else {
throw new I2NPMessageException("Invalid type of key read from the structure - " + _type); throw new I2NPMessageException("Invalid type of key read from the structure - " + type);
} }
//if (!key.equals(_dbEntry.getHash()))
// throw new I2NPMessageException("Hash mismatch in DSM");
} }
@ -172,28 +150,28 @@ public class DatabaseStoreMessage extends I2NPMessageImpl {
int len = Hash.HASH_LENGTH + 1 + 4; // key+type+replyToken int len = Hash.HASH_LENGTH + 1 + 4; // key+type+replyToken
if (_replyToken > 0) if (_replyToken > 0)
len += 4 + Hash.HASH_LENGTH; // replyTunnel+replyGateway len += 4 + Hash.HASH_LENGTH; // replyTunnel+replyGateway
if (_type == KEY_TYPE_LEASESET) { if (_dbEntry.getType() == DatabaseEntry.KEY_TYPE_LEASESET) {
_leaseSetCache = _leaseSet.toByteArray(); _byteCache = _dbEntry.toByteArray();
len += _leaseSetCache.length; } else if (_dbEntry.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO) {
} else if (_type == KEY_TYPE_ROUTERINFO) { byte uncompressed[] = _dbEntry.toByteArray();
byte uncompressed[] = _info.toByteArray(); _byteCache = DataHelper.compress(uncompressed);
byte compressed[] = DataHelper.compress(uncompressed); len += 2;
_routerInfoCache = compressed;
len += compressed.length + 2;
} }
len += _byteCache.length;
return len; return len;
} }
/** write the message body to the output array, starting at the given index */ /** write the message body to the output array, starting at the given index */
protected int writeMessageBody(byte out[], int curIndex) throws I2NPMessageException { protected int writeMessageBody(byte out[], int curIndex) throws I2NPMessageException {
if (_key == null) throw new I2NPMessageException("Invalid key"); if (_dbEntry == null) throw new I2NPMessageException("Missing entry");
if ( (_type != KEY_TYPE_LEASESET) && (_type != KEY_TYPE_ROUTERINFO) ) throw new I2NPMessageException("Invalid key type"); int type = _dbEntry.getType();
if ( (_type == KEY_TYPE_LEASESET) && (_leaseSet == null) ) throw new I2NPMessageException("Missing lease set"); if (type != DatabaseEntry.KEY_TYPE_LEASESET && type != DatabaseEntry.KEY_TYPE_ROUTERINFO)
if ( (_type == KEY_TYPE_ROUTERINFO) && (_info == null) ) throw new I2NPMessageException("Missing router info"); throw new I2NPMessageException("Invalid key type");
System.arraycopy(_key.getData(), 0, out, curIndex, Hash.HASH_LENGTH); // Use the hash of the DatabaseEntry
System.arraycopy(getKey().getData(), 0, out, curIndex, Hash.HASH_LENGTH);
curIndex += Hash.HASH_LENGTH; curIndex += Hash.HASH_LENGTH;
byte type[] = DataHelper.toLong(1, _type); out[curIndex++] = (byte) type;
out[curIndex++] = type[0];
byte tok[] = DataHelper.toLong(4, _replyToken); byte tok[] = DataHelper.toLong(4, _replyToken);
System.arraycopy(tok, 0, out, curIndex, 4); System.arraycopy(tok, 0, out, curIndex, 4);
curIndex += 4; curIndex += 4;
@ -209,17 +187,14 @@ public class DatabaseStoreMessage extends I2NPMessageImpl {
curIndex += Hash.HASH_LENGTH; curIndex += Hash.HASH_LENGTH;
} }
if (_type == KEY_TYPE_LEASESET) { // _byteCache initialized in calculateWrittenLength
// initialized in calculateWrittenLength if (type == DatabaseEntry.KEY_TYPE_ROUTERINFO) {
System.arraycopy(_leaseSetCache, 0, out, curIndex, _leaseSetCache.length); byte len[] = DataHelper.toLong(2, _byteCache.length);
curIndex += _leaseSetCache.length;
} else if (_type == KEY_TYPE_ROUTERINFO) {
byte len[] = DataHelper.toLong(2, _routerInfoCache.length);
out[curIndex++] = len[0]; out[curIndex++] = len[0];
out[curIndex++] = len[1]; out[curIndex++] = len[1];
System.arraycopy(_routerInfoCache, 0, out, curIndex, _routerInfoCache.length);
curIndex += _routerInfoCache.length;
} }
System.arraycopy(_byteCache, 0, out, curIndex, _byteCache.length);
curIndex += _byteCache.length;
return curIndex; return curIndex;
} }
@ -228,9 +203,7 @@ public class DatabaseStoreMessage extends I2NPMessageImpl {
@Override @Override
public int hashCode() { public int hashCode() {
return DataHelper.hashCode(getKey()) + return DataHelper.hashCode(getKey()) +
DataHelper.hashCode(getLeaseSet()) + DataHelper.hashCode(_dbEntry) +
DataHelper.hashCode(getRouterInfo()) +
getValueType() +
(int)getReplyToken() + (int)getReplyToken() +
DataHelper.hashCode(getReplyTunnel()) + DataHelper.hashCode(getReplyTunnel()) +
DataHelper.hashCode(getReplyGateway()); DataHelper.hashCode(getReplyGateway());
@ -241,9 +214,7 @@ public class DatabaseStoreMessage extends I2NPMessageImpl {
if ( (object != null) && (object instanceof DatabaseStoreMessage) ) { if ( (object != null) && (object instanceof DatabaseStoreMessage) ) {
DatabaseStoreMessage msg = (DatabaseStoreMessage)object; DatabaseStoreMessage msg = (DatabaseStoreMessage)object;
return DataHelper.eq(getKey(),msg.getKey()) && return DataHelper.eq(getKey(),msg.getKey()) &&
DataHelper.eq(getLeaseSet(),msg.getLeaseSet()) && DataHelper.eq(_dbEntry,msg.getEntry()) &&
DataHelper.eq(getRouterInfo(),msg.getRouterInfo()) &&
_type == msg.getValueType() &&
getReplyToken() == msg.getReplyToken() && getReplyToken() == msg.getReplyToken() &&
DataHelper.eq(getReplyTunnel(), msg.getReplyTunnel()) && DataHelper.eq(getReplyTunnel(), msg.getReplyTunnel()) &&
DataHelper.eq(getReplyGateway(), msg.getReplyGateway()); DataHelper.eq(getReplyGateway(), msg.getReplyGateway());
@ -259,9 +230,7 @@ public class DatabaseStoreMessage extends I2NPMessageImpl {
buf.append("\n\tExpiration: ").append(getMessageExpiration()); buf.append("\n\tExpiration: ").append(getMessageExpiration());
buf.append("\n\tUnique ID: ").append(getUniqueId()); buf.append("\n\tUnique ID: ").append(getUniqueId());
buf.append("\n\tKey: ").append(getKey()); buf.append("\n\tKey: ").append(getKey());
buf.append("\n\tValue Type: ").append(getValueType()); buf.append("\n\tEntry: ").append(_dbEntry);
buf.append("\n\tRouter Info: ").append(getRouterInfo());
buf.append("\n\tLease Set: ").append(getLeaseSet());
buf.append("\n\tReply token: ").append(getReplyToken()); buf.append("\n\tReply token: ").append(getReplyToken());
buf.append("\n\tReply tunnel: ").append(getReplyTunnel()); buf.append("\n\tReply tunnel: ").append(getReplyTunnel());
buf.append("\n\tReply gateway: ").append(getReplyGateway()); buf.append("\n\tReply gateway: ").append(getReplyGateway());

View File

@ -16,6 +16,7 @@ import java.util.HashSet;
import java.util.Map; import java.util.Map;
import java.util.Set; import java.util.Set;
import net.i2p.data.DatabaseEntry;
import net.i2p.data.Hash; import net.i2p.data.Hash;
import net.i2p.data.LeaseSet; import net.i2p.data.LeaseSet;
import net.i2p.data.RouterInfo; import net.i2p.data.RouterInfo;
@ -36,6 +37,7 @@ class DummyNetworkDatabaseFacade extends NetworkDatabaseFacade {
_routers.put(info.getIdentity().getHash(), info); _routers.put(info.getIdentity().getHash(), info);
} }
public DatabaseEntry lookupLocally(Hash key) { return null; }
public void lookupLeaseSet(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs) {} public void lookupLeaseSet(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs) {}
public LeaseSet lookupLeaseSetLocally(Hash key) { return null; } public LeaseSet lookupLeaseSetLocally(Hash key) { return null; }
public void lookupRouterInfo(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs) { public void lookupRouterInfo(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs) {

View File

@ -13,6 +13,7 @@ import java.io.Writer;
import java.util.Collections; import java.util.Collections;
import java.util.Set; import java.util.Set;
import net.i2p.data.DatabaseEntry;
import net.i2p.data.Hash; import net.i2p.data.Hash;
import net.i2p.data.LeaseSet; import net.i2p.data.LeaseSet;
import net.i2p.data.RouterInfo; import net.i2p.data.RouterInfo;
@ -32,6 +33,11 @@ public abstract class NetworkDatabaseFacade implements Service {
*/ */
public abstract Set<Hash> findNearestRouters(Hash key, int maxNumRouters, Set<Hash> peersToIgnore); public abstract Set<Hash> findNearestRouters(Hash key, int maxNumRouters, Set<Hash> peersToIgnore);
/**
* @return RouterInfo, LeaseSet, or null
* @since 0.8.3
*/
public abstract DatabaseEntry lookupLocally(Hash key);
public abstract void lookupLeaseSet(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs); public abstract void lookupLeaseSet(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs);
public abstract LeaseSet lookupLeaseSetLocally(Hash key); public abstract LeaseSet lookupLeaseSetLocally(Hash key);
public abstract void lookupRouterInfo(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs); public abstract void lookupRouterInfo(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs);

View File

@ -227,9 +227,8 @@ class OutboundClientMessageJobHelper {
clove.setExpiration(expiration); clove.setExpiration(expiration);
clove.setId(ctx.random().nextLong(I2NPMessage.MAX_ID_VALUE)); clove.setId(ctx.random().nextLong(I2NPMessage.MAX_ID_VALUE));
DatabaseStoreMessage msg = new DatabaseStoreMessage(ctx); DatabaseStoreMessage msg = new DatabaseStoreMessage(ctx);
msg.setLeaseSet(replyLeaseSet); msg.setEntry(replyLeaseSet);
msg.setMessageExpiration(expiration); msg.setMessageExpiration(expiration);
msg.setKey(replyLeaseSet.getDestination().calculateHash());
clove.setPayload(msg); clove.setPayload(msg);
clove.setRecipientPublicKey(null); clove.setRecipientPublicKey(null);
clove.setRequestAck(false); clove.setRequestAck(false);

View File

@ -12,7 +12,7 @@ import java.util.HashSet;
import java.util.Iterator; import java.util.Iterator;
import java.util.Set; import java.util.Set;
import net.i2p.data.DataStructure; import net.i2p.data.DatabaseEntry;
import net.i2p.data.Hash; import net.i2p.data.Hash;
import net.i2p.data.LeaseSet; import net.i2p.data.LeaseSet;
import net.i2p.data.RouterIdentity; import net.i2p.data.RouterIdentity;
@ -227,20 +227,19 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
return routerHashSet.contains(getContext().routerHash()); return routerHashSet.contains(getContext().routerHash());
} }
private void sendData(Hash key, DataStructure data, Hash toPeer, TunnelId replyTunnel) { private void sendData(Hash key, DatabaseEntry data, Hash toPeer, TunnelId replyTunnel) {
if (!key.equals(data.getHash())) {
_log.error("Hash mismatch HDLMJ");
return;
}
if (_log.shouldLog(Log.DEBUG)) if (_log.shouldLog(Log.DEBUG))
_log.debug("Sending data matching key " + key.toBase64() + " to peer " + toPeer.toBase64() _log.debug("Sending data matching key " + key.toBase64() + " to peer " + toPeer.toBase64()
+ " tunnel " + replyTunnel); + " tunnel " + replyTunnel);
DatabaseStoreMessage msg = new DatabaseStoreMessage(getContext()); DatabaseStoreMessage msg = new DatabaseStoreMessage(getContext());
msg.setKey(key); if (data.getType() == DatabaseEntry.KEY_TYPE_LEASESET) {
if (data instanceof LeaseSet) {
msg.setLeaseSet((LeaseSet)data);
msg.setValueType(DatabaseStoreMessage.KEY_TYPE_LEASESET);
getContext().statManager().addRateData("netDb.lookupsMatchedLeaseSet", 1, 0); getContext().statManager().addRateData("netDb.lookupsMatchedLeaseSet", 1, 0);
} else if (data instanceof RouterInfo) {
msg.setRouterInfo((RouterInfo)data);
msg.setValueType(DatabaseStoreMessage.KEY_TYPE_ROUTERINFO);
} }
msg.setEntry(data);
getContext().statManager().addRateData("netDb.lookupsMatched", 1, 0); getContext().statManager().addRateData("netDb.lookupsMatched", 1, 0);
getContext().statManager().addRateData("netDb.lookupsHandled", 1, 0); getContext().statManager().addRateData("netDb.lookupsHandled", 1, 0);
sendMessage(msg, toPeer, replyTunnel); sendMessage(msg, toPeer, replyTunnel);

View File

@ -10,9 +10,11 @@ package net.i2p.router.networkdb;
import java.util.Date; import java.util.Date;
import net.i2p.data.DatabaseEntry;
import net.i2p.data.Hash; import net.i2p.data.Hash;
import net.i2p.data.LeaseSet; import net.i2p.data.LeaseSet;
import net.i2p.data.RouterIdentity; import net.i2p.data.RouterIdentity;
import net.i2p.data.RouterInfo;
import net.i2p.data.i2np.DatabaseStoreMessage; import net.i2p.data.i2np.DatabaseStoreMessage;
import net.i2p.data.i2np.DeliveryStatusMessage; import net.i2p.data.i2np.DeliveryStatusMessage;
import net.i2p.router.JobImpl; import net.i2p.router.JobImpl;
@ -59,16 +61,17 @@ public class HandleDatabaseStoreMessageJob extends JobImpl {
String invalidMessage = null; String invalidMessage = null;
boolean wasNew = false; boolean wasNew = false;
if (_message.getValueType() == DatabaseStoreMessage.KEY_TYPE_LEASESET) { DatabaseEntry entry = _message.getEntry();
if (entry.getType() == DatabaseEntry.KEY_TYPE_LEASESET) {
getContext().statManager().addRateData("netDb.storeLeaseSetHandled", 1, 0); getContext().statManager().addRateData("netDb.storeLeaseSetHandled", 1, 0);
try { try {
LeaseSet ls = _message.getLeaseSet(); LeaseSet ls = (LeaseSet) entry;
// mark it as something we received, so we'll answer queries // mark it as something we received, so we'll answer queries
// for it. this flag does NOT get set on entries that we // for it. this flag does NOT get set on entries that we
// receive in response to our own lookups. // receive in response to our own lookups.
ls.setReceivedAsPublished(true); ls.setReceivedAsPublished(true);
LeaseSet match = getContext().netDb().store(_message.getKey(), _message.getLeaseSet()); LeaseSet match = getContext().netDb().store(_message.getKey(), ls);
if (match == null) { if (match == null) {
wasNew = true; wasNew = true;
} else { } else {
@ -78,13 +81,14 @@ public class HandleDatabaseStoreMessageJob extends JobImpl {
} catch (IllegalArgumentException iae) { } catch (IllegalArgumentException iae) {
invalidMessage = iae.getMessage(); invalidMessage = iae.getMessage();
} }
} else if (_message.getValueType() == DatabaseStoreMessage.KEY_TYPE_ROUTERINFO) { } else if (entry.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO) {
RouterInfo ri = (RouterInfo) entry;
getContext().statManager().addRateData("netDb.storeRouterInfoHandled", 1, 0); getContext().statManager().addRateData("netDb.storeRouterInfoHandled", 1, 0);
if (_log.shouldLog(Log.INFO)) if (_log.shouldLog(Log.INFO))
_log.info("Handling dbStore of router " + _message.getKey() + " with publishDate of " _log.info("Handling dbStore of router " + _message.getKey() + " with publishDate of "
+ new Date(_message.getRouterInfo().getPublished())); + new Date(ri.getPublished()));
try { try {
Object match = getContext().netDb().store(_message.getKey(), _message.getRouterInfo()); Object match = getContext().netDb().store(_message.getKey(), ri);
wasNew = (null == match); wasNew = (null == match);
getContext().profileManager().heardAbout(_message.getKey()); getContext().profileManager().heardAbout(_message.getKey());
} catch (IllegalArgumentException iae) { } catch (IllegalArgumentException iae) {
@ -92,7 +96,7 @@ public class HandleDatabaseStoreMessageJob extends JobImpl {
} }
} else { } else {
if (_log.shouldLog(Log.ERROR)) if (_log.shouldLog(Log.ERROR))
_log.error("Invalid DatabaseStoreMessage data type - " + _message.getValueType() _log.error("Invalid DatabaseStoreMessage data type - " + entry.getType()
+ ": " + _message); + ": " + _message);
} }

View File

@ -8,21 +8,27 @@ package net.i2p.router.networkdb.kademlia;
* *
*/ */
import java.util.Collection;
import java.util.Map;
import java.util.Set; import java.util.Set;
import net.i2p.data.DataStructure; import net.i2p.data.DatabaseEntry;
import net.i2p.data.Hash; import net.i2p.data.Hash;
public interface DataStore { public interface DataStore {
public boolean isInitialized(); public boolean isInitialized();
public boolean isKnown(Hash key); public boolean isKnown(Hash key);
public DataStructure get(Hash key); public DatabaseEntry get(Hash key);
public DataStructure get(Hash key, boolean persist); public DatabaseEntry get(Hash key, boolean persist);
public boolean put(Hash key, DataStructure data); public boolean put(Hash key, DatabaseEntry data);
public boolean put(Hash key, DataStructure data, boolean persist); public boolean put(Hash key, DatabaseEntry data, boolean persist);
public DataStructure remove(Hash key); public DatabaseEntry remove(Hash key);
public DataStructure remove(Hash key, boolean persist); public DatabaseEntry remove(Hash key, boolean persist);
public Set<Hash> getKeys(); public Set<Hash> getKeys();
/** @since 0.8.3 */
public Collection<DatabaseEntry> getEntries();
/** @since 0.8.3 */
public Set<Map.Entry<Hash, DatabaseEntry>> getMapEntries();
public void stop(); public void stop();
public void restart(); public void restart();
public void rescan(); public void rescan();

View File

@ -12,6 +12,7 @@ import java.util.HashSet;
import java.util.Iterator; import java.util.Iterator;
import java.util.Set; import java.util.Set;
import net.i2p.data.DatabaseEntry;
import net.i2p.data.Hash; import net.i2p.data.Hash;
import net.i2p.data.LeaseSet; import net.i2p.data.LeaseSet;
import net.i2p.router.JobImpl; import net.i2p.router.JobImpl;
@ -61,8 +62,8 @@ class ExpireLeasesJob extends JobImpl {
Set toExpire = new HashSet(128); Set toExpire = new HashSet(128);
for (Iterator iter = keys.iterator(); iter.hasNext(); ) { for (Iterator iter = keys.iterator(); iter.hasNext(); ) {
Hash key = (Hash)iter.next(); Hash key = (Hash)iter.next();
Object obj = _facade.getDataStore().get(key); DatabaseEntry obj = _facade.getDataStore().get(key);
if (obj instanceof LeaseSet) { if (obj.getType() == DatabaseEntry.KEY_TYPE_LEASESET) {
LeaseSet ls = (LeaseSet)obj; LeaseSet ls = (LeaseSet)obj;
if (!ls.isCurrent(Router.CLOCK_FUDGE_FACTOR)) if (!ls.isCurrent(Router.CLOCK_FUDGE_FACTOR))
toExpire.add(key); toExpire.add(key);

View File

@ -1,5 +1,8 @@
package net.i2p.router.networkdb.kademlia; package net.i2p.router.networkdb.kademlia;
import net.i2p.data.DatabaseEntry;
import net.i2p.data.LeaseSet;
import net.i2p.data.RouterInfo;
import net.i2p.data.i2np.DatabaseSearchReplyMessage; import net.i2p.data.i2np.DatabaseSearchReplyMessage;
import net.i2p.data.i2np.DatabaseStoreMessage; import net.i2p.data.i2np.DatabaseStoreMessage;
import net.i2p.data.i2np.I2NPMessage; import net.i2p.data.i2np.I2NPMessage;
@ -61,14 +64,15 @@ class FloodOnlyLookupMatchJob extends JobImpl implements ReplyJob {
// We do it here first to make sure it is in the DB before // We do it here first to make sure it is in the DB before
// runJob() and search.success() is called??? // runJob() and search.success() is called???
// Should we just pass the DataStructure directly back to somebody? // Should we just pass the DataStructure directly back to somebody?
if (dsm.getValueType() == DatabaseStoreMessage.KEY_TYPE_LEASESET) { if (dsm.getEntry().getType() == DatabaseEntry.KEY_TYPE_LEASESET) {
// Since HFDSMJ wants to setReceivedAsPublished(), we have to // Since HFDSMJ wants to setReceivedAsPublished(), we have to
// set a flag saying this was really the result of a query, // set a flag saying this was really the result of a query,
// so don't do that. // so don't do that.
dsm.getLeaseSet().setReceivedAsReply(); LeaseSet ls = (LeaseSet) dsm.getEntry();
getContext().netDb().store(dsm.getKey(), dsm.getLeaseSet()); ls.setReceivedAsReply();
getContext().netDb().store(dsm.getKey(), ls);
} else { } else {
getContext().netDb().store(dsm.getKey(), dsm.getRouterInfo()); getContext().netDb().store(dsm.getKey(), (RouterInfo) dsm.getEntry());
} }
} catch (IllegalArgumentException iae) { } catch (IllegalArgumentException iae) {
if (_log.shouldLog(Log.WARN)) if (_log.shouldLog(Log.WARN))

View File

@ -182,8 +182,7 @@ public class FloodSearchJob extends JobImpl {
_search = job; _search = job;
} }
public void runJob() { public void runJob() {
if ( (getContext().netDb().lookupLeaseSetLocally(_search.getKey()) != null) || if (getContext().netDb().lookupLocally(_search.getKey()) != null) {
(getContext().netDb().lookupRouterInfoLocally(_search.getKey()) != null) ) {
_search.success(); _search.success();
} else { } else {
int remaining = _search.getLookupsRemaining(); int remaining = _search.getLookupsRemaining();

View File

@ -7,8 +7,8 @@ import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Set; import java.util.Set;
import net.i2p.data.DatabaseEntry;
import net.i2p.data.DataFormatException; import net.i2p.data.DataFormatException;
import net.i2p.data.DataStructure;
import net.i2p.data.Hash; import net.i2p.data.Hash;
import net.i2p.data.LeaseSet; import net.i2p.data.LeaseSet;
import net.i2p.data.RouterInfo; import net.i2p.data.RouterInfo;
@ -93,11 +93,11 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
} }
@Override @Override
public void sendStore(Hash key, DataStructure ds, Job onSuccess, Job onFailure, long sendTimeout, Set toIgnore) { public void sendStore(Hash key, DatabaseEntry ds, Job onSuccess, Job onFailure, long sendTimeout, Set toIgnore) {
// if we are a part of the floodfill netDb, don't send out our own leaseSets as part // if we are a part of the floodfill netDb, don't send out our own leaseSets as part
// of the flooding - instead, send them to a random floodfill peer so *they* can flood 'em out. // of the flooding - instead, send them to a random floodfill peer so *they* can flood 'em out.
// perhaps statistically adjust this so we are the source every 1/N times... or something. // perhaps statistically adjust this so we are the source every 1/N times... or something.
if (floodfillEnabled() && (ds instanceof RouterInfo)) { if (floodfillEnabled() && (ds.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO)) {
flood(ds); flood(ds);
if (onSuccess != null) if (onSuccess != null)
_context.jobQueue().addJob(onSuccess); _context.jobQueue().addJob(onSuccess);
@ -129,12 +129,8 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
* We do this to implement Kademlia within the floodfills, i.e. * We do this to implement Kademlia within the floodfills, i.e.
* we flood to those closest to the key. * we flood to those closest to the key.
*/ */
public void flood(DataStructure ds) { public void flood(DatabaseEntry ds) {
Hash key; Hash key = ds.getHash();
if (ds instanceof LeaseSet)
key = ((LeaseSet)ds).getDestination().calculateHash();
else
key = ((RouterInfo)ds).getIdentity().calculateHash();
Hash rkey = _context.routingKeyGenerator().getRoutingKey(key); Hash rkey = _context.routingKeyGenerator().getRoutingKey(key);
FloodfillPeerSelector sel = (FloodfillPeerSelector)getPeerSelector(); FloodfillPeerSelector sel = (FloodfillPeerSelector)getPeerSelector();
List peers = sel.selectFloodfillParticipants(rkey, MAX_TO_FLOOD, getKBuckets()); List peers = sel.selectFloodfillParticipants(rkey, MAX_TO_FLOOD, getKBuckets());
@ -151,12 +147,7 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
if (peer.equals(_context.routerHash())) if (peer.equals(_context.routerHash()))
continue; continue;
DatabaseStoreMessage msg = new DatabaseStoreMessage(_context); DatabaseStoreMessage msg = new DatabaseStoreMessage(_context);
if (ds instanceof LeaseSet) { msg.setEntry(ds);
msg.setLeaseSet((LeaseSet)ds);
} else {
msg.setRouterInfo((RouterInfo)ds);
}
msg.setKey(key);
msg.setReplyGateway(null); msg.setReplyGateway(null);
msg.setReplyToken(0); msg.setReplyToken(0);
msg.setReplyTunnel(null); msg.setReplyTunnel(null);
@ -242,15 +233,11 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
List<RouterInfo> rv = new ArrayList(); List<RouterInfo> rv = new ArrayList();
DataStore ds = getDataStore(); DataStore ds = getDataStore();
if (ds != null) { if (ds != null) {
Set keys = ds.getKeys(); for (DatabaseEntry o : ds.getEntries()) {
if (keys != null) { if (o.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO)
for (Iterator iter = keys.iterator(); iter.hasNext(); ) {
Object o = ds.get((Hash)iter.next());
if (o instanceof RouterInfo)
rv.add((RouterInfo)o); rv.add((RouterInfo)o);
} }
} }
}
return rv; return rv;
} }

View File

@ -12,7 +12,7 @@ import java.util.Iterator;
import java.util.NoSuchElementException; import java.util.NoSuchElementException;
import java.util.Set; import java.util.Set;
import net.i2p.data.DataStructure; import net.i2p.data.DatabaseEntry;
import net.i2p.data.Hash; import net.i2p.data.Hash;
import net.i2p.data.LeaseSet; import net.i2p.data.LeaseSet;
import net.i2p.data.RouterInfo; import net.i2p.data.RouterInfo;
@ -30,7 +30,7 @@ class FloodfillStoreJob extends StoreJob {
* Send a data structure to the floodfills * Send a data structure to the floodfills
* *
*/ */
public FloodfillStoreJob(RouterContext context, FloodfillNetworkDatabaseFacade facade, Hash key, DataStructure data, Job onSuccess, Job onFailure, long timeoutMs) { public FloodfillStoreJob(RouterContext context, FloodfillNetworkDatabaseFacade facade, Hash key, DatabaseEntry data, Job onSuccess, Job onFailure, long timeoutMs) {
this(context, facade, key, data, onSuccess, onFailure, timeoutMs, null); this(context, facade, key, data, onSuccess, onFailure, timeoutMs, null);
} }
@ -38,7 +38,7 @@ class FloodfillStoreJob extends StoreJob {
* @param toSkip set of peer hashes of people we dont want to send the data to (e.g. we * @param toSkip set of peer hashes of people we dont want to send the data to (e.g. we
* already know they have it). This can be null. * already know they have it). This can be null.
*/ */
public FloodfillStoreJob(RouterContext context, FloodfillNetworkDatabaseFacade facade, Hash key, DataStructure data, Job onSuccess, Job onFailure, long timeoutMs, Set<Hash> toSkip) { public FloodfillStoreJob(RouterContext context, FloodfillNetworkDatabaseFacade facade, Hash key, DatabaseEntry data, Job onSuccess, Job onFailure, long timeoutMs, Set<Hash> toSkip) {
super(context, facade, key, data, onSuccess, onFailure, timeoutMs, toSkip); super(context, facade, key, data, onSuccess, onFailure, timeoutMs, toSkip);
_facade = facade; _facade = facade;
} }
@ -63,15 +63,12 @@ class FloodfillStoreJob extends StoreJob {
} }
// Get the time stamp from the data we sent, so the Verify job can meke sure that // Get the time stamp from the data we sent, so the Verify job can meke sure that
// it finds something stamped with that time or newer. // it finds something stamped with that time or newer.
long published = 0; DatabaseEntry data = _state.getData();
DataStructure data = _state.getData(); boolean isRouterInfo = data.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO;
boolean isRouterInfo = data instanceof RouterInfo; long published = data.getDate();
if (isRouterInfo) { if (isRouterInfo) {
published = ((RouterInfo) data).getPublished();
// Temporarily disable // Temporarily disable
return; return;
} else if (data instanceof LeaseSet) {
published = ((LeaseSet) data).getEarliestLeaseDate();
} }
// we should always have exactly one successful entry // we should always have exactly one successful entry
Hash sentTo = null; Hash sentTo = null;

View File

@ -4,7 +4,7 @@ import java.util.HashSet;
import java.util.List; import java.util.List;
import java.util.Set; import java.util.Set;
import net.i2p.data.DataStructure; import net.i2p.data.DatabaseEntry;
import net.i2p.data.Hash; import net.i2p.data.Hash;
import net.i2p.data.RouterInfo; import net.i2p.data.RouterInfo;
import net.i2p.data.i2np.DatabaseLookupMessage; import net.i2p.data.i2np.DatabaseLookupMessage;
@ -201,10 +201,7 @@ public class FloodfillVerifyStoreJob extends JobImpl {
// Verify it's as recent as the one we sent // Verify it's as recent as the one we sent
boolean success = false; boolean success = false;
DatabaseStoreMessage dsm = (DatabaseStoreMessage)_message; DatabaseStoreMessage dsm = (DatabaseStoreMessage)_message;
if (_isRouterInfo && dsm.getValueType() == DatabaseStoreMessage.KEY_TYPE_ROUTERINFO) success = dsm.getEntry().getDate() >= _published;
success = dsm.getRouterInfo().getPublished() >= _published;
else if ((!_isRouterInfo) && dsm.getValueType() == DatabaseStoreMessage.KEY_TYPE_LEASESET)
success = dsm.getLeaseSet().getEarliestLeaseDate() >= _published;
if (success) { if (success) {
// store ok, w00t! // store ok, w00t!
getContext().profileManager().dbLookupSuccessful(_target, delay); getContext().profileManager().dbLookupSuccessful(_target, delay);
@ -218,7 +215,7 @@ public class FloodfillVerifyStoreJob extends JobImpl {
if (_log.shouldLog(Log.WARN)) if (_log.shouldLog(Log.WARN))
_log.warn("Verify failed (older) for " + _key); _log.warn("Verify failed (older) for " + _key);
if (_log.shouldLog(Log.INFO)) if (_log.shouldLog(Log.INFO))
_log.info("Rcvd older lease: " + dsm.getLeaseSet()); _log.info("Rcvd older lease: " + dsm.getEntry());
} else if (_message instanceof DatabaseSearchReplyMessage) { } else if (_message instanceof DatabaseSearchReplyMessage) {
// assume 0 old, all new, 0 invalid, 0 dup // assume 0 old, all new, 0 invalid, 0 dup
getContext().profileManager().dbLookupReply(_target, 0, getContext().profileManager().dbLookupReply(_target, 0,
@ -245,11 +242,7 @@ public class FloodfillVerifyStoreJob extends JobImpl {
* So at least we'll try THREE ffs round-robin if things continue to fail... * So at least we'll try THREE ffs round-robin if things continue to fail...
*/ */
private void resend() { private void resend() {
DataStructure ds; DatabaseEntry ds = _facade.lookupLocally(_key);
if (_isRouterInfo)
ds = _facade.lookupRouterInfoLocally(_key);
else
ds = _facade.lookupLeaseSetLocally(_key);
if (ds != null) { if (ds != null) {
Set<Hash> toSkip = new HashSet(2); Set<Hash> toSkip = new HashSet(2);
if (_sentTo != null) if (_sentTo != null)

View File

@ -57,9 +57,7 @@ public class HandleFloodfillDatabaseLookupMessageJob extends HandleDatabaseLooku
// that would increment the netDb.lookupsHandled and netDb.lookupsMatched stats // that would increment the netDb.lookupsHandled and netDb.lookupsMatched stats
DatabaseStoreMessage msg = new DatabaseStoreMessage(getContext()); DatabaseStoreMessage msg = new DatabaseStoreMessage(getContext());
RouterInfo me = getContext().router().getRouterInfo(); RouterInfo me = getContext().router().getRouterInfo();
msg.setKey(me.getIdentity().getHash()); msg.setEntry(me);
msg.setRouterInfo(me);
msg.setValueType(DatabaseStoreMessage.KEY_TYPE_ROUTERINFO);
sendMessage(msg, toPeer, replyTunnel); sendMessage(msg, toPeer, replyTunnel);
} }
} }

View File

@ -11,6 +11,7 @@ package net.i2p.router.networkdb.kademlia;
import java.util.Date; import java.util.Date;
import java.util.Set; import java.util.Set;
import net.i2p.data.DatabaseEntry;
import net.i2p.data.Hash; import net.i2p.data.Hash;
import net.i2p.data.LeaseSet; import net.i2p.data.LeaseSet;
import net.i2p.data.RouterIdentity; import net.i2p.data.RouterIdentity;
@ -55,7 +56,8 @@ public class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
boolean wasNew = false; boolean wasNew = false;
RouterInfo prevNetDb = null; RouterInfo prevNetDb = null;
Hash key = _message.getKey(); Hash key = _message.getKey();
if (_message.getValueType() == DatabaseStoreMessage.KEY_TYPE_LEASESET) { DatabaseEntry entry = _message.getEntry();
if (entry.getType() == DatabaseEntry.KEY_TYPE_LEASESET) {
getContext().statManager().addRateData("netDb.storeLeaseSetHandled", 1, 0); getContext().statManager().addRateData("netDb.storeLeaseSetHandled", 1, 0);
if (_log.shouldLog(Log.INFO)) if (_log.shouldLog(Log.INFO))
_log.info("Handling dbStore of leaseset " + _message); _log.info("Handling dbStore of leaseset " + _message);
@ -75,7 +77,7 @@ public class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
throw new IllegalArgumentException("Peer attempted to store local leaseSet: " + throw new IllegalArgumentException("Peer attempted to store local leaseSet: " +
key.toBase64().substring(0, 4)); key.toBase64().substring(0, 4));
} }
LeaseSet ls = _message.getLeaseSet(); LeaseSet ls = (LeaseSet) entry;
//boolean oldrar = ls.getReceivedAsReply(); //boolean oldrar = ls.getReceivedAsReply();
//boolean oldrap = ls.getReceivedAsPublished(); //boolean oldrap = ls.getReceivedAsPublished();
// If this was received as a response to a query, // If this was received as a response to a query,
@ -91,10 +93,10 @@ public class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
//boolean rap = ls.getReceivedAsPublished(); //boolean rap = ls.getReceivedAsPublished();
//if (_log.shouldLog(Log.INFO)) //if (_log.shouldLog(Log.INFO))
// _log.info("oldrap? " + oldrap + " oldrar? " + oldrar + " newrap? " + rap); // _log.info("oldrap? " + oldrap + " oldrar? " + oldrar + " newrap? " + rap);
LeaseSet match = getContext().netDb().store(key, _message.getLeaseSet()); LeaseSet match = getContext().netDb().store(key, ls);
if (match == null) { if (match == null) {
wasNew = true; wasNew = true;
} else if (match.getEarliestLeaseDate() < _message.getLeaseSet().getEarliestLeaseDate()) { } else if (match.getEarliestLeaseDate() < ls.getEarliestLeaseDate()) {
wasNew = true; wasNew = true;
// If it is in our keyspace and we are talking to it // If it is in our keyspace and we are talking to it
@ -117,11 +119,12 @@ public class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
} catch (IllegalArgumentException iae) { } catch (IllegalArgumentException iae) {
invalidMessage = iae.getMessage(); invalidMessage = iae.getMessage();
} }
} else if (_message.getValueType() == DatabaseStoreMessage.KEY_TYPE_ROUTERINFO) { } else if (entry.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO) {
RouterInfo ri = (RouterInfo) entry;
getContext().statManager().addRateData("netDb.storeRouterInfoHandled", 1, 0); getContext().statManager().addRateData("netDb.storeRouterInfoHandled", 1, 0);
if (_log.shouldLog(Log.INFO)) if (_log.shouldLog(Log.INFO))
_log.info("Handling dbStore of router " + key + " with publishDate of " _log.info("Handling dbStore of router " + key + " with publishDate of "
+ new Date(_message.getRouterInfo().getPublished())); + new Date(ri.getPublished()));
try { try {
// Never store our RouterInfo received from somebody else. // Never store our RouterInfo received from somebody else.
// This generally happens from a FloodfillVerifyStoreJob. // This generally happens from a FloodfillVerifyStoreJob.
@ -132,8 +135,8 @@ public class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
// throw rather than return, so that we send the ack below (prevent easy attack) // throw rather than return, so that we send the ack below (prevent easy attack)
throw new IllegalArgumentException("Peer attempted to store our RouterInfo"); throw new IllegalArgumentException("Peer attempted to store our RouterInfo");
} }
prevNetDb = getContext().netDb().store(key, _message.getRouterInfo()); prevNetDb = getContext().netDb().store(key, ri);
wasNew = ((null == prevNetDb) || (prevNetDb.getPublished() < _message.getRouterInfo().getPublished())); wasNew = ((null == prevNetDb) || (prevNetDb.getPublished() < ri.getPublished()));
// Check new routerinfo address against blocklist // Check new routerinfo address against blocklist
if (wasNew) { if (wasNew) {
if (prevNetDb == null) { if (prevNetDb == null) {
@ -143,7 +146,7 @@ public class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
_log.warn("Blocklisting new peer " + key); _log.warn("Blocklisting new peer " + key);
} else { } else {
Set oldAddr = prevNetDb.getAddresses(); Set oldAddr = prevNetDb.getAddresses();
Set newAddr = _message.getRouterInfo().getAddresses(); Set newAddr = ri.getAddresses();
if (newAddr != null && (!newAddr.equals(oldAddr)) && if (newAddr != null && (!newAddr.equals(oldAddr)) &&
(!getContext().shitlist().isShitlistedForever(key)) && (!getContext().shitlist().isShitlistedForever(key)) &&
getContext().blocklist().isBlocklisted(key) && getContext().blocklist().isBlocklisted(key) &&
@ -157,7 +160,7 @@ public class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
} }
} else { } else {
if (_log.shouldLog(Log.ERROR)) if (_log.shouldLog(Log.ERROR))
_log.error("Invalid DatabaseStoreMessage data type - " + _message.getValueType() _log.error("Invalid DatabaseStoreMessage data type - " + entry.getType()
+ ": " + _message); + ": " + _message);
} }
@ -198,12 +201,9 @@ public class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
return; return;
} }
long floodBegin = System.currentTimeMillis(); long floodBegin = System.currentTimeMillis();
if (_message.getValueType() == DatabaseStoreMessage.KEY_TYPE_LEASESET) _facade.flood(_message.getEntry());
_facade.flood(_message.getLeaseSet());
// ERR: see comment in HandleDatabaseLookupMessageJob regarding hidden mode // ERR: see comment in HandleDatabaseLookupMessageJob regarding hidden mode
//else if (!_message.getRouterInfo().isHidden()) //else if (!_message.getRouterInfo().isHidden())
else
_facade.flood(_message.getRouterInfo());
long floodEnd = System.currentTimeMillis(); long floodEnd = System.currentTimeMillis();
getContext().statManager().addRateData("netDb.storeFloodNew", floodEnd-floodBegin, 0); getContext().statManager().addRateData("netDb.storeFloodNew", floodEnd-floodBegin, 0);
} else { } else {

View File

@ -24,8 +24,8 @@ import java.util.Properties;
import java.util.Set; import java.util.Set;
import java.util.TreeSet; import java.util.TreeSet;
import net.i2p.data.DatabaseEntry;
import net.i2p.data.DataHelper; import net.i2p.data.DataHelper;
import net.i2p.data.DataStructure;
import net.i2p.data.Destination; import net.i2p.data.Destination;
import net.i2p.data.Hash; import net.i2p.data.Hash;
import net.i2p.data.Lease; import net.i2p.data.Lease;
@ -235,11 +235,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
public void startup() { public void startup() {
_log.info("Starting up the kademlia network database"); _log.info("Starting up the kademlia network database");
RouterInfo ri = _context.router().getRouterInfo(); RouterInfo ri = _context.router().getRouterInfo();
String dbDir = _context.router().getConfigSetting(PROP_DB_DIR); String dbDir = _context.getProperty(PROP_DB_DIR, DEFAULT_DB_DIR);
if (dbDir == null) {
_log.info("No DB dir specified [" + PROP_DB_DIR + "], using [" + DEFAULT_DB_DIR + "]");
dbDir = DEFAULT_DB_DIR;
}
String enforce = _context.getProperty(PROP_ENFORCE_NETID); String enforce = _context.getProperty(PROP_ENFORCE_NETID);
if (enforce != null) if (enforce != null)
_enforceNetId = Boolean.valueOf(enforce).booleanValue(); _enforceNetId = Boolean.valueOf(enforce).booleanValue();
@ -247,7 +243,11 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
_enforceNetId = DEFAULT_ENFORCE_NETID; _enforceNetId = DEFAULT_ENFORCE_NETID;
_kb = new KBucketSet(_context, ri.getIdentity().getHash()); _kb = new KBucketSet(_context, ri.getIdentity().getHash());
try {
_ds = new PersistentDataStore(_context, dbDir, this); _ds = new PersistentDataStore(_context, dbDir, this);
} catch (IOException ioe) {
throw new RuntimeException("Unable to initialize netdb storage", ioe);
}
//_ds = new TransientDataStore(); //_ds = new TransientDataStore();
// _exploreKeys = new HashSet(64); // _exploreKeys = new HashSet(64);
_dbDir = dbDir; _dbDir = dbDir;
@ -350,21 +350,11 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
/** get the hashes for all known routers */ /** get the hashes for all known routers */
public Set<Hash> getAllRouters() { public Set<Hash> getAllRouters() {
if (!_initialized) return Collections.EMPTY_SET; if (!_initialized) return Collections.EMPTY_SET;
Set<Hash> keys = _ds.getKeys(); Set<Map.Entry<Hash, DatabaseEntry>> entries = _ds.getMapEntries();
Set<Hash> rv = new HashSet(keys.size()); Set<Hash> rv = new HashSet(entries.size());
if (_log.shouldLog(Log.DEBUG)) for (Map.Entry<Hash, DatabaseEntry> entry : entries) {
_log.debug("getAllRouters(): # keys in the datastore: " + keys.size()); if (entry.getValue().getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO) {
for (Hash key : keys) { rv.add(entry.getKey());
DataStructure ds = _ds.get(key);
if (ds == null) {
if (_log.shouldLog(Log.INFO))
_log.info("Selected hash " + key.toBase64() + " is not stored locally");
} else if ( !(ds instanceof RouterInfo) ) {
// leaseSet
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("getAllRouters(): key is router: " + key.toBase64());
rv.add(key);
} }
} }
return rv; return rv;
@ -383,8 +373,8 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
public int size() { return _count; } public int size() { return _count; }
public void add(Hash entry) { public void add(Hash entry) {
if (_ds == null) return; if (_ds == null) return;
Object o = _ds.get(entry); DatabaseEntry o = _ds.get(entry);
if (o instanceof RouterInfo) if (o != null && o.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO)
_count++; _count++;
} }
} }
@ -400,12 +390,9 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
public int getKnownLeaseSets() { public int getKnownLeaseSets() {
if (_ds == null) return 0; if (_ds == null) return 0;
//return _ds.countLeaseSets(); //return _ds.countLeaseSets();
Set<Hash> keys = _ds.getKeys();
int rv = 0; int rv = 0;
for (Hash key : keys) { for (DatabaseEntry ds : _ds.getEntries()) {
DataStructure ds = _ds.get(key); if (ds.getType() == DatabaseEntry.KEY_TYPE_LEASESET &&
if (ds != null &&
ds instanceof LeaseSet &&
((LeaseSet)ds).getReceivedAsPublished()) ((LeaseSet)ds).getReceivedAsPublished())
rv++; rv++;
} }
@ -418,8 +405,8 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
public int size() { return _count; } public int size() { return _count; }
public void add(Hash entry) { public void add(Hash entry) {
if (_ds == null) return; if (_ds == null) return;
Object o = _ds.get(entry); DatabaseEntry o = _ds.get(entry);
if (o instanceof LeaseSet) if (o != null && o.getType() == DatabaseEntry.KEY_TYPE_LEASESET)
_count++; _count++;
} }
} }
@ -434,6 +421,32 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
return _kb.size(); return _kb.size();
} }
/**
* @return RouterInfo, LeaseSet, or null, validated
* @since 0.8.3
*/
public DatabaseEntry lookupLocally(Hash key) {
if (!_initialized)
return null;
DatabaseEntry rv = _ds.get(key);
if (rv == null)
return null;
if (rv.getType() == DatabaseEntry.KEY_TYPE_LEASESET) {
LeaseSet ls = (LeaseSet)rv;
if (ls.isCurrent(Router.CLOCK_FUDGE_FACTOR))
return rv;
else
fail(key);
} else if (rv.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO) {
try {
if (validate(key, (RouterInfo)rv) == null)
return rv;
} catch (IllegalArgumentException iae) {}
fail(key);
}
return null;
}
public void lookupLeaseSet(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs) { public void lookupLeaseSet(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs) {
if (!_initialized) return; if (!_initialized) return;
LeaseSet ls = lookupLeaseSetLocally(key); LeaseSet ls = lookupLeaseSetLocally(key);
@ -453,9 +466,9 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
public LeaseSet lookupLeaseSetLocally(Hash key) { public LeaseSet lookupLeaseSetLocally(Hash key) {
if (!_initialized) return null; if (!_initialized) return null;
if (_ds.isKnown(key)) { DatabaseEntry ds = _ds.get(key);
DataStructure ds = _ds.get(key); if (ds != null) {
if (ds instanceof LeaseSet) { if (ds.getType() == DatabaseEntry.KEY_TYPE_LEASESET) {
LeaseSet ls = (LeaseSet)ds; LeaseSet ls = (LeaseSet)ds;
if (ls.isCurrent(Router.CLOCK_FUDGE_FACTOR)) { if (ls.isCurrent(Router.CLOCK_FUDGE_FACTOR)) {
return ls; return ls;
@ -489,9 +502,9 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
public RouterInfo lookupRouterInfoLocally(Hash key) { public RouterInfo lookupRouterInfoLocally(Hash key) {
if (!_initialized) return null; if (!_initialized) return null;
DataStructure ds = _ds.get(key); DatabaseEntry ds = _ds.get(key);
if (ds != null) { if (ds != null) {
if (ds instanceof RouterInfo) { if (ds.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO) {
// more aggressive than perhaps is necessary, but makes sure we // more aggressive than perhaps is necessary, but makes sure we
// drop old references that we had accepted on startup (since // drop old references that we had accepted on startup (since
// startup allows some lax rules). // startup allows some lax rules).
@ -610,6 +623,8 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
* Determine whether this leaseSet will be accepted as valid and current * Determine whether this leaseSet will be accepted as valid and current
* given what we know now. * given what we know now.
* *
* TODO this is called several times, only check the key and signature once
*
* @return reason why the entry is not valid, or null if it is valid * @return reason why the entry is not valid, or null if it is valid
*/ */
String validate(Hash key, LeaseSet leaseSet) { String validate(Hash key, LeaseSet leaseSet) {
@ -692,6 +707,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
* Determine whether this routerInfo will be accepted as valid and current * Determine whether this routerInfo will be accepted as valid and current
* given what we know now. * given what we know now.
* *
* TODO this is called several times, only check the key and signature once
*/ */
String validate(Hash key, RouterInfo routerInfo) throws IllegalArgumentException { String validate(Hash key, RouterInfo routerInfo) throws IllegalArgumentException {
long now = _context.clock().now(); long now = _context.clock().now();
@ -807,30 +823,26 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
public void fail(Hash dbEntry) { public void fail(Hash dbEntry) {
if (!_initialized) return; if (!_initialized) return;
boolean isRouterInfo = false; DatabaseEntry o = _ds.get(dbEntry);
Object o = _ds.get(dbEntry); if (o == null) {
if (o instanceof RouterInfo) // if we dont know the key, lets make sure it isn't a now-dead peer
isRouterInfo = true; _kb.remove(dbEntry);
_context.peerManager().removeCapabilities(dbEntry);
return;
}
if (isRouterInfo) { if (o.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO) {
lookupBeforeDropping(dbEntry, (RouterInfo)o); lookupBeforeDropping(dbEntry, (RouterInfo)o);
return; return;
} else { }
// we always drop leaseSets that are failed [timed out], // we always drop leaseSets that are failed [timed out],
// regardless of how many routers we have. this is called on a lease if // regardless of how many routers we have. this is called on a lease if
// it has expired *or* its tunnels are failing and we want to see if there // it has expired *or* its tunnels are failing and we want to see if there
// are any updates // are any updates
if (_log.shouldLog(Log.INFO)) if (_log.shouldLog(Log.INFO))
_log.info("Dropping a lease: " + dbEntry); _log.info("Dropping a lease: " + dbEntry);
} _ds.remove(dbEntry, false);
if (o == null) {
_kb.remove(dbEntry);
_context.peerManager().removeCapabilities(dbEntry);
// if we dont know the key, lets make sure it isn't a now-dead peer
}
_ds.remove(dbEntry, isRouterInfo);
} }
/** don't use directly - see F.N.D.F. override */ /** don't use directly - see F.N.D.F. override */
@ -852,7 +864,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
public void unpublish(LeaseSet localLeaseSet) { public void unpublish(LeaseSet localLeaseSet) {
if (!_initialized) return; if (!_initialized) return;
Hash h = localLeaseSet.getDestination().calculateHash(); Hash h = localLeaseSet.getDestination().calculateHash();
DataStructure data = _ds.remove(h); DatabaseEntry data = _ds.remove(h);
if (data == null) { if (data == null) {
if (_log.shouldLog(Log.WARN)) if (_log.shouldLog(Log.WARN))
@ -906,8 +918,8 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
Set keys = getDataStore().getKeys(); Set keys = getDataStore().getKeys();
for (Iterator iter = keys.iterator(); iter.hasNext(); ) { for (Iterator iter = keys.iterator(); iter.hasNext(); ) {
Hash key = (Hash)iter.next(); Hash key = (Hash)iter.next();
Object o = getDataStore().get(key); DatabaseEntry o = getDataStore().get(key);
if (o instanceof LeaseSet) if (o.getType() == DatabaseEntry.KEY_TYPE_LEASESET)
leases.add(o); leases.add(o);
} }
return leases; return leases;
@ -920,8 +932,8 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
Set keys = getDataStore().getKeys(); Set keys = getDataStore().getKeys();
for (Iterator iter = keys.iterator(); iter.hasNext(); ) { for (Iterator iter = keys.iterator(); iter.hasNext(); ) {
Hash key = (Hash)iter.next(); Hash key = (Hash)iter.next();
Object o = getDataStore().get(key); DatabaseEntry o = getDataStore().get(key);
if (o instanceof RouterInfo) if (o.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO)
routers.add(o); routers.add(o);
} }
return routers; return routers;
@ -953,7 +965,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
} }
/** unused (overridden in FNDF) */ /** unused (overridden in FNDF) */
public void sendStore(Hash key, DataStructure ds, Job onSuccess, Job onFailure, long sendTimeout, Set toIgnore) { public void sendStore(Hash key, DatabaseEntry ds, Job onSuccess, Job onFailure, long sendTimeout, Set toIgnore) {
if ( (ds == null) || (key == null) ) { if ( (ds == null) || (key == null) ) {
if (onFailure != null) if (onFailure != null)
_context.jobQueue().addJob(onFailure); _context.jobQueue().addJob(onFailure);

View File

@ -18,8 +18,8 @@ import java.util.Map;
import java.util.NoSuchElementException; import java.util.NoSuchElementException;
import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentHashMap;
import net.i2p.data.DatabaseEntry;
import net.i2p.data.DataFormatException; import net.i2p.data.DataFormatException;
import net.i2p.data.DataStructure;
import net.i2p.data.Hash; import net.i2p.data.Hash;
import net.i2p.data.LeaseSet; import net.i2p.data.LeaseSet;
import net.i2p.data.RouterInfo; import net.i2p.data.RouterInfo;
@ -38,19 +38,22 @@ import net.i2p.util.SecureFileOutputStream;
* *
*/ */
class PersistentDataStore extends TransientDataStore { class PersistentDataStore extends TransientDataStore {
private Log _log; private final Log _log;
private String _dbDir; private final File _dbDir;
private KademliaNetworkDatabaseFacade _facade; private final KademliaNetworkDatabaseFacade _facade;
private Writer _writer; private final Writer _writer;
private ReadJob _readJob; private final ReadJob _readJob;
private boolean _initialized; private boolean _initialized;
private final static int READ_DELAY = 60*1000; private final static int READ_DELAY = 60*1000;
public PersistentDataStore(RouterContext ctx, String dbDir, KademliaNetworkDatabaseFacade facade) { /**
* @param dbDir relative path
*/
public PersistentDataStore(RouterContext ctx, String dbDir, KademliaNetworkDatabaseFacade facade) throws IOException {
super(ctx); super(ctx);
_log = ctx.logManager().getLog(PersistentDataStore.class); _log = ctx.logManager().getLog(PersistentDataStore.class);
_dbDir = dbDir; _dbDir = getDbDir(dbDir);
_facade = facade; _facade = facade;
_readJob = new ReadJob(); _readJob = new ReadJob();
_context.jobQueue().addJob(_readJob); _context.jobQueue().addJob(_readJob);
@ -78,7 +81,6 @@ class PersistentDataStore extends TransientDataStore {
@Override @Override
public void restart() { public void restart() {
super.restart(); super.restart();
_dbDir = _facade.getDbDir();
} }
@Override @Override
@ -88,7 +90,7 @@ class PersistentDataStore extends TransientDataStore {
} }
@Override @Override
public DataStructure get(Hash key) { public DatabaseEntry get(Hash key) {
return get(key, true); return get(key, true);
} }
@ -97,8 +99,8 @@ class PersistentDataStore extends TransientDataStore {
* @param persist if false, call super only, don't access disk * @param persist if false, call super only, don't access disk
*/ */
@Override @Override
public DataStructure get(Hash key, boolean persist) { public DatabaseEntry get(Hash key, boolean persist) {
DataStructure rv = super.get(key); DatabaseEntry rv = super.get(key);
/***** /*****
if (rv != null || !persist) if (rv != null || !persist)
return rv; return rv;
@ -113,7 +115,7 @@ class PersistentDataStore extends TransientDataStore {
} }
@Override @Override
public DataStructure remove(Hash key) { public DatabaseEntry remove(Hash key) {
return remove(key, true); return remove(key, true);
} }
@ -121,7 +123,7 @@ class PersistentDataStore extends TransientDataStore {
* @param persist if false, call super only, don't access disk * @param persist if false, call super only, don't access disk
*/ */
@Override @Override
public DataStructure remove(Hash key, boolean persist) { public DatabaseEntry remove(Hash key, boolean persist) {
if (persist) { if (persist) {
_writer.remove(key); _writer.remove(key);
_context.jobQueue().addJob(new RemoveJob(key)); _context.jobQueue().addJob(new RemoveJob(key));
@ -130,7 +132,7 @@ class PersistentDataStore extends TransientDataStore {
} }
@Override @Override
public boolean put(Hash key, DataStructure data) { public boolean put(Hash key, DatabaseEntry data) {
return put(key, data, true); return put(key, data, true);
} }
@ -139,11 +141,11 @@ class PersistentDataStore extends TransientDataStore {
* @return success * @return success
*/ */
@Override @Override
public boolean put(Hash key, DataStructure data, boolean persist) { public boolean put(Hash key, DatabaseEntry data, boolean persist) {
if ( (data == null) || (key == null) ) return false; if ( (data == null) || (key == null) ) return false;
boolean rv = super.put(key, data); boolean rv = super.put(key, data);
// Don't bother writing LeaseSets to disk // Don't bother writing LeaseSets to disk
if (rv && persist && data instanceof RouterInfo) if (rv && persist && data.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO)
_writer.queue(key, data); _writer.queue(key, data);
return rv; return rv;
} }
@ -159,8 +161,7 @@ class PersistentDataStore extends TransientDataStore {
if (_log.shouldLog(Log.INFO)) if (_log.shouldLog(Log.INFO))
_log.info("Removing key " + _key /* , getAddedBy() */); _log.info("Removing key " + _key /* , getAddedBy() */);
try { try {
File dbDir = getDbDir(); removeFile(_key, _dbDir);
removeFile(_key, dbDir);
} catch (IOException ioe) { } catch (IOException ioe) {
_log.error("Error removing key " + _key, ioe); _log.error("Error removing key " + _key, ioe);
} }
@ -179,10 +180,10 @@ class PersistentDataStore extends TransientDataStore {
* We store a reference to the data here too, * We store a reference to the data here too,
* rather than simply pull it from super.get(), because * rather than simply pull it from super.get(), because
* we will soon have to implement a scheme for keeping only * we will soon have to implement a scheme for keeping only
* a subset of all DataStructures in memory and keeping the rest on disk. * a subset of all DatabaseEntrys in memory and keeping the rest on disk.
*/ */
private class Writer implements Runnable { private class Writer implements Runnable {
private final Map<Hash, DataStructure>_keys; private final Map<Hash, DatabaseEntry>_keys;
private final Object _waitLock; private final Object _waitLock;
private volatile boolean _quit; private volatile boolean _quit;
@ -191,7 +192,7 @@ class PersistentDataStore extends TransientDataStore {
_waitLock = new Object(); _waitLock = new Object();
} }
public void queue(Hash key, DataStructure data) { public void queue(Hash key, DatabaseEntry data) {
int pending = _keys.size(); int pending = _keys.size();
boolean exists = (null != _keys.put(key, data)); boolean exists = (null != _keys.put(key, data));
if (exists) if (exists)
@ -200,7 +201,7 @@ class PersistentDataStore extends TransientDataStore {
} }
/** check to see if it's in the write queue */ /** check to see if it's in the write queue */
public DataStructure get(Hash key) { public DatabaseEntry get(Hash key) {
return _keys.get(key); return _keys.get(key);
} }
@ -211,16 +212,16 @@ class PersistentDataStore extends TransientDataStore {
public void run() { public void run() {
_quit = false; _quit = false;
Hash key = null; Hash key = null;
DataStructure data = null; DatabaseEntry data = null;
int count = 0; int count = 0;
int lastCount = 0; int lastCount = 0;
long startTime = 0; long startTime = 0;
while (true) { while (true) {
// get a new iterator every time to get a random entry without // get a new iterator every time to get a random entry without
// having concurrency issues or copying to a List or Array // having concurrency issues or copying to a List or Array
Iterator<Map.Entry<Hash, DataStructure>> iter = _keys.entrySet().iterator(); Iterator<Map.Entry<Hash, DatabaseEntry>> iter = _keys.entrySet().iterator();
try { try {
Map.Entry<Hash, DataStructure> entry = iter.next(); Map.Entry<Hash, DatabaseEntry> entry = iter.next();
key = entry.getKey(); key = entry.getKey();
data = entry.getValue(); data = entry.getValue();
iter.remove(); iter.remove();
@ -235,7 +236,10 @@ class PersistentDataStore extends TransientDataStore {
if (key != null) { if (key != null) {
if (data != null) { if (data != null) {
// synch with the reader job
synchronized (_dbDir) {
write(key, data); write(key, data);
}
data = null; data = null;
} }
key = null; key = null;
@ -270,23 +274,22 @@ class PersistentDataStore extends TransientDataStore {
} }
} }
private void write(Hash key, DataStructure data) { private void write(Hash key, DatabaseEntry data) {
if (_log.shouldLog(Log.INFO)) if (_log.shouldLog(Log.INFO))
_log.info("Writing key " + key); _log.info("Writing key " + key);
FileOutputStream fos = null; FileOutputStream fos = null;
File dbFile = null; File dbFile = null;
try { try {
String filename = null; String filename = null;
File dbDir = getDbDir();
if (data instanceof LeaseSet) if (data.getType() == DatabaseEntry.KEY_TYPE_LEASESET)
filename = getLeaseSetName(key); filename = getLeaseSetName(key);
else if (data instanceof RouterInfo) else if (data.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO)
filename = getRouterInfoName(key); filename = getRouterInfoName(key);
else else
throw new IOException("We don't know how to write objects of type " + data.getClass().getName()); throw new IOException("We don't know how to write objects of type " + data.getClass().getName());
dbFile = new File(dbDir, filename); dbFile = new File(_dbDir, filename);
long dataPublishDate = getPublishDate(data); long dataPublishDate = getPublishDate(data);
if (dbFile.lastModified() < dataPublishDate) { if (dbFile.lastModified() < dataPublishDate) {
// our filesystem is out of date, lets replace it // our filesystem is out of date, lets replace it
@ -312,27 +315,33 @@ class PersistentDataStore extends TransientDataStore {
if (fos != null) try { fos.close(); } catch (IOException ioe) {} if (fos != null) try { fos.close(); } catch (IOException ioe) {}
} }
} }
private long getPublishDate(DataStructure data) { private long getPublishDate(DatabaseEntry data) {
if (data instanceof RouterInfo) { return data.getDate();
return ((RouterInfo)data).getPublished();
} else if (data instanceof LeaseSet) {
return ((LeaseSet)data).getEarliestLeaseDate();
} else {
return -1;
}
} }
/** This is only for manual reseeding? Why bother every 60 sec??? */ /** This is only for manual reseeding? Why bother every 60 sec??? */
private class ReadJob extends JobImpl { private class ReadJob extends JobImpl {
private boolean _alreadyWarned; private boolean _alreadyWarned;
private long _lastModified;
public ReadJob() { public ReadJob() {
super(PersistentDataStore.this._context); super(PersistentDataStore.this._context);
_alreadyWarned = false; _alreadyWarned = false;
} }
public String getName() { return "DB Read Job"; } public String getName() { return "DB Read Job"; }
public void runJob() { public void runJob() {
// check directory mod time to save a lot of object churn in scanning all the file names
long lastMod = _dbDir.lastModified();
if (lastMod > _lastModified) {
_lastModified = lastMod;
_log.info("Rereading new files"); _log.info("Rereading new files");
// synch with the writer job
synchronized (_dbDir) {
readFiles(); readFiles();
}
}
requeue(READ_DELAY); requeue(READ_DELAY);
} }
@ -342,9 +351,8 @@ class PersistentDataStore extends TransientDataStore {
private void readFiles() { private void readFiles() {
int routerCount = 0; int routerCount = 0;
try {
File dbDir = getDbDir(); File routerInfoFiles[] = _dbDir.listFiles(RouterInfoFilter.getInstance());
File routerInfoFiles[] = dbDir.listFiles(RouterInfoFilter.getInstance());
if (routerInfoFiles != null) { if (routerInfoFiles != null) {
routerCount += routerInfoFiles.length; routerCount += routerInfoFiles.length;
if (routerInfoFiles.length > 5) if (routerInfoFiles.length > 5)
@ -359,9 +367,6 @@ class PersistentDataStore extends TransientDataStore {
} }
} }
} }
} catch (IOException ioe) {
_log.error("Error reading files in the db dir", ioe);
}
if (!_alreadyWarned) { if (!_alreadyWarned) {
ReseedChecker.checkReseed(_context, routerCount); ReseedChecker.checkReseed(_context, routerCount);
@ -383,9 +388,9 @@ class PersistentDataStore extends TransientDataStore {
private boolean shouldRead() { private boolean shouldRead() {
// persist = false to call only super.get() // persist = false to call only super.get()
DataStructure data = get(_key, false); DatabaseEntry data = get(_key, false);
if (data == null) return true; if (data == null) return true;
if (data instanceof RouterInfo) { if (data.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO) {
long knownDate = ((RouterInfo)data).getPublished(); long knownDate = ((RouterInfo)data).getPublished();
long fileDate = _routerFile.lastModified(); long fileDate = _routerFile.lastModified();
if (fileDate > knownDate) if (fileDate > knownDate)
@ -441,8 +446,8 @@ class PersistentDataStore extends TransientDataStore {
} }
private File getDbDir() throws IOException { private File getDbDir(String dbDir) throws IOException {
File f = new SecureDirectory(_context.getRouterDir(), _dbDir); File f = new SecureDirectory(_context.getRouterDir(), dbDir);
if (!f.exists()) { if (!f.exists()) {
boolean created = f.mkdirs(); boolean created = f.mkdirs();
if (!created) if (!created)

View File

@ -14,8 +14,8 @@ import java.util.Iterator;
import java.util.List; import java.util.List;
import java.util.Set; import java.util.Set;
import net.i2p.data.DatabaseEntry;
import net.i2p.data.DataHelper; import net.i2p.data.DataHelper;
import net.i2p.data.DataStructure;
import net.i2p.data.Hash; import net.i2p.data.Hash;
import net.i2p.data.LeaseSet; import net.i2p.data.LeaseSet;
import net.i2p.data.RouterInfo; import net.i2p.data.RouterInfo;
@ -293,12 +293,12 @@ class SearchJob extends JobImpl {
attempted.addAll(closestHashes); attempted.addAll(closestHashes);
for (Iterator iter = closestHashes.iterator(); iter.hasNext(); ) { for (Iterator iter = closestHashes.iterator(); iter.hasNext(); ) {
Hash peer = (Hash)iter.next(); Hash peer = (Hash)iter.next();
DataStructure ds = _facade.getDataStore().get(peer); DatabaseEntry ds = _facade.getDataStore().get(peer);
if (ds == null) { if (ds == null) {
if (_log.shouldLog(Log.INFO)) if (_log.shouldLog(Log.INFO))
_log.info("Next closest peer " + peer + " was only recently referred to us, sending a search for them"); _log.info("Next closest peer " + peer + " was only recently referred to us, sending a search for them");
getContext().netDb().lookupRouterInfo(peer, null, null, _timeoutMs); getContext().netDb().lookupRouterInfo(peer, null, null, _timeoutMs);
} else if (!(ds instanceof RouterInfo)) { } else if (!(ds.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO)) {
if (_log.shouldLog(Log.WARN)) if (_log.shouldLog(Log.WARN))
_log.warn(getJobId() + ": Error selecting closest hash that wasnt a router! " _log.warn(getJobId() + ": Error selecting closest hash that wasnt a router! "
+ peer + " : " + ds.getClass().getName()); + peer + " : " + ds.getClass().getName());
@ -635,7 +635,7 @@ class SearchJob extends JobImpl {
* *
*/ */
private void resend() { private void resend() {
DataStructure ds = _facade.lookupLeaseSetLocally(_state.getTarget()); DatabaseEntry ds = _facade.lookupLeaseSetLocally(_state.getTarget());
if (ds == null) { if (ds == null) {
if (SHOULD_RESEND_ROUTERINFO) { if (SHOULD_RESEND_ROUTERINFO) {
ds = _facade.lookupRouterInfoLocally(_state.getTarget()); ds = _facade.lookupRouterInfoLocally(_state.getTarget());
@ -665,8 +665,7 @@ class SearchJob extends JobImpl {
*/ */
private boolean resend(RouterInfo toPeer, LeaseSet ls) { private boolean resend(RouterInfo toPeer, LeaseSet ls) {
DatabaseStoreMessage msg = new DatabaseStoreMessage(getContext()); DatabaseStoreMessage msg = new DatabaseStoreMessage(getContext());
msg.setKey(ls.getDestination().calculateHash()); msg.setEntry(ls);
msg.setLeaseSet(ls);
msg.setMessageExpiration(getContext().clock().now() + RESEND_TIMEOUT); msg.setMessageExpiration(getContext().clock().now() + RESEND_TIMEOUT);
TunnelInfo outTunnel = getContext().tunnelManager().selectOutboundTunnel(); TunnelInfo outTunnel = getContext().tunnelManager().selectOutboundTunnel();

View File

@ -2,7 +2,9 @@ package net.i2p.router.networkdb.kademlia;
import java.util.Date; import java.util.Date;
import net.i2p.data.DatabaseEntry;
import net.i2p.data.Hash; import net.i2p.data.Hash;
import net.i2p.data.LeaseSet;
import net.i2p.data.RouterInfo; import net.i2p.data.RouterInfo;
import net.i2p.data.i2np.DatabaseSearchReplyMessage; import net.i2p.data.i2np.DatabaseSearchReplyMessage;
import net.i2p.data.i2np.DatabaseStoreMessage; import net.i2p.data.i2np.DatabaseStoreMessage;
@ -78,22 +80,23 @@ class SearchUpdateReplyFoundJob extends JobImpl implements ReplyJob {
long timeToReply = _state.dataFound(_peer); long timeToReply = _state.dataFound(_peer);
DatabaseStoreMessage msg = (DatabaseStoreMessage)message; DatabaseStoreMessage msg = (DatabaseStoreMessage)message;
if (msg.getValueType() == DatabaseStoreMessage.KEY_TYPE_LEASESET) { DatabaseEntry entry = msg.getEntry();
if (entry.getType() == DatabaseEntry.KEY_TYPE_LEASESET) {
try { try {
_facade.store(msg.getKey(), msg.getLeaseSet()); _facade.store(msg.getKey(), (LeaseSet) entry);
getContext().profileManager().dbLookupSuccessful(_peer, timeToReply); getContext().profileManager().dbLookupSuccessful(_peer, timeToReply);
} catch (IllegalArgumentException iae) { } catch (IllegalArgumentException iae) {
if (_log.shouldLog(Log.ERROR)) if (_log.shouldLog(Log.ERROR))
_log.warn("Peer " + _peer + " sent us an invalid leaseSet: " + iae.getMessage()); _log.warn("Peer " + _peer + " sent us an invalid leaseSet: " + iae.getMessage());
getContext().profileManager().dbLookupReply(_peer, 0, 0, 1, 0, timeToReply); getContext().profileManager().dbLookupReply(_peer, 0, 0, 1, 0, timeToReply);
} }
} else if (msg.getValueType() == DatabaseStoreMessage.KEY_TYPE_ROUTERINFO) { } else if (entry.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO) {
if (_log.shouldLog(Log.INFO)) if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": dbStore received on search containing router " _log.info(getJobId() + ": dbStore received on search containing router "
+ msg.getKey() + " with publishDate of " + msg.getKey() + " with publishDate of "
+ new Date(msg.getRouterInfo().getPublished())); + new Date(entry.getDate()));
try { try {
_facade.store(msg.getKey(), msg.getRouterInfo()); _facade.store(msg.getKey(), (RouterInfo) entry);
getContext().profileManager().dbLookupSuccessful(_peer, timeToReply); getContext().profileManager().dbLookupSuccessful(_peer, timeToReply);
} catch (IllegalArgumentException iae) { } catch (IllegalArgumentException iae) {
if (_log.shouldLog(Log.ERROR)) if (_log.shouldLog(Log.ERROR))
@ -102,7 +105,7 @@ class SearchUpdateReplyFoundJob extends JobImpl implements ReplyJob {
} }
} else { } else {
if (_log.shouldLog(Log.ERROR)) if (_log.shouldLog(Log.ERROR))
_log.error(getJobId() + ": Unknown db store type?!@ " + msg.getValueType()); _log.error(getJobId() + ": Unknown db store type?!@ " + entry.getType());
} }
} else if (message instanceof DatabaseSearchReplyMessage) { } else if (message instanceof DatabaseSearchReplyMessage) {
_job.replyFound((DatabaseSearchReplyMessage)message, _peer); _job.replyFound((DatabaseSearchReplyMessage)message, _peer);

View File

@ -13,7 +13,7 @@ import java.util.Iterator;
import java.util.List; import java.util.List;
import java.util.Set; import java.util.Set;
import net.i2p.data.DataStructure; import net.i2p.data.DatabaseEntry;
import net.i2p.data.Hash; import net.i2p.data.Hash;
import net.i2p.data.LeaseSet; import net.i2p.data.LeaseSet;
import net.i2p.data.RouterInfo; import net.i2p.data.RouterInfo;
@ -61,7 +61,7 @@ class StoreJob extends JobImpl {
* *
*/ */
public StoreJob(RouterContext context, KademliaNetworkDatabaseFacade facade, Hash key, public StoreJob(RouterContext context, KademliaNetworkDatabaseFacade facade, Hash key,
DataStructure data, Job onSuccess, Job onFailure, long timeoutMs) { DatabaseEntry data, Job onSuccess, Job onFailure, long timeoutMs) {
this(context, facade, key, data, onSuccess, onFailure, timeoutMs, null); this(context, facade, key, data, onSuccess, onFailure, timeoutMs, null);
} }
@ -70,7 +70,7 @@ class StoreJob extends JobImpl {
* already know they have it). This can be null. * already know they have it). This can be null.
*/ */
public StoreJob(RouterContext context, KademliaNetworkDatabaseFacade facade, Hash key, public StoreJob(RouterContext context, KademliaNetworkDatabaseFacade facade, Hash key,
DataStructure data, Job onSuccess, Job onFailure, long timeoutMs, Set<Hash> toSkip) { DatabaseEntry data, Job onSuccess, Job onFailure, long timeoutMs, Set<Hash> toSkip) {
super(context); super(context);
_log = context.logManager().getLog(StoreJob.class); _log = context.logManager().getLog(StoreJob.class);
_facade = facade; _facade = facade;
@ -167,8 +167,8 @@ class StoreJob extends JobImpl {
_log.info(getJobId() + ": Continue sending key " + _state.getTarget() + " after " + _state.getAttempted().size() + " tries to " + closestHashes); _log.info(getJobId() + ": Continue sending key " + _state.getTarget() + " after " + _state.getAttempted().size() + " tries to " + closestHashes);
for (Iterator<Hash> iter = closestHashes.iterator(); iter.hasNext(); ) { for (Iterator<Hash> iter = closestHashes.iterator(); iter.hasNext(); ) {
Hash peer = iter.next(); Hash peer = iter.next();
DataStructure ds = _facade.getDataStore().get(peer); DatabaseEntry ds = _facade.getDataStore().get(peer);
if ( (ds == null) || !(ds instanceof RouterInfo) ) { if ( (ds == null) || !(ds.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO) ) {
if (_log.shouldLog(Log.INFO)) if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": Error selecting closest hash that wasnt a router! " + peer + " : " + ds); _log.info(getJobId() + ": Error selecting closest hash that wasnt a router! " + peer + " : " + ds);
_state.addSkipped(peer); _state.addSkipped(peer);
@ -255,16 +255,19 @@ class StoreJob extends JobImpl {
* *
*/ */
private void sendStore(RouterInfo router, int responseTime) { private void sendStore(RouterInfo router, int responseTime) {
if (!_state.getTarget().equals(_state.getData().getHash())) {
_log.error("Hash mismatch StoreJob");
return;
}
DatabaseStoreMessage msg = new DatabaseStoreMessage(getContext()); DatabaseStoreMessage msg = new DatabaseStoreMessage(getContext());
msg.setKey(_state.getTarget()); if (_state.getData().getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO) {
if (_state.getData() instanceof RouterInfo) {
msg.setRouterInfo((RouterInfo)_state.getData());
if (responseTime > MAX_DIRECT_EXPIRATION) if (responseTime > MAX_DIRECT_EXPIRATION)
responseTime = MAX_DIRECT_EXPIRATION; responseTime = MAX_DIRECT_EXPIRATION;
} else if (_state.getData() instanceof LeaseSet) } else if (_state.getData().getType() == DatabaseEntry.KEY_TYPE_LEASESET) {
msg.setLeaseSet((LeaseSet)_state.getData()); } else {
else
throw new IllegalArgumentException("Storing an unknown data type! " + _state.getData()); throw new IllegalArgumentException("Storing an unknown data type! " + _state.getData());
}
msg.setEntry(_state.getData());
msg.setMessageExpiration(getContext().clock().now() + _timeoutMs); msg.setMessageExpiration(getContext().clock().now() + _timeoutMs);
if (router.getIdentity().equals(getContext().router().getRouterInfo().getIdentity())) { if (router.getIdentity().equals(getContext().router().getRouterInfo().getIdentity())) {
@ -286,7 +289,7 @@ class StoreJob extends JobImpl {
* *
*/ */
private void sendStore(DatabaseStoreMessage msg, RouterInfo peer, long expiration) { private void sendStore(DatabaseStoreMessage msg, RouterInfo peer, long expiration) {
if (msg.getValueType() == DatabaseStoreMessage.KEY_TYPE_LEASESET) { if (msg.getEntry().getType() == DatabaseEntry.KEY_TYPE_LEASESET) {
getContext().statManager().addRateData("netDb.storeLeaseSetSent", 1, 0); getContext().statManager().addRateData("netDb.storeLeaseSetSent", 1, 0);
// if it is an encrypted leaseset... // if it is an encrypted leaseset...
if (getContext().keyRing().get(msg.getKey()) != null) if (getContext().keyRing().get(msg.getKey()) != null)

View File

@ -9,7 +9,7 @@ import java.util.Map;
import java.util.Set; import java.util.Set;
import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentHashMap;
import net.i2p.data.DataStructure; import net.i2p.data.DatabaseEntry;
import net.i2p.data.Hash; import net.i2p.data.Hash;
import net.i2p.router.RouterContext; import net.i2p.router.RouterContext;
@ -19,7 +19,7 @@ import net.i2p.router.RouterContext;
class StoreState { class StoreState {
private RouterContext _context; private RouterContext _context;
private Hash _key; private Hash _key;
private DataStructure _data; private DatabaseEntry _data;
private final HashSet<Hash> _pendingPeers; private final HashSet<Hash> _pendingPeers;
private Map<Hash, Long> _pendingPeerTimes; private Map<Hash, Long> _pendingPeerTimes;
private Map<Hash, MessageWrapper.WrappedMessage> _pendingMessages; private Map<Hash, MessageWrapper.WrappedMessage> _pendingMessages;
@ -31,10 +31,10 @@ class StoreState {
private volatile long _completed; private volatile long _completed;
private volatile long _started; private volatile long _started;
public StoreState(RouterContext ctx, Hash key, DataStructure data) { public StoreState(RouterContext ctx, Hash key, DatabaseEntry data) {
this(ctx, key, data, null); this(ctx, key, data, null);
} }
public StoreState(RouterContext ctx, Hash key, DataStructure data, Set<Hash> toSkip) { public StoreState(RouterContext ctx, Hash key, DatabaseEntry data, Set<Hash> toSkip) {
_context = ctx; _context = ctx;
_key = key; _key = key;
_data = data; _data = data;
@ -54,7 +54,7 @@ class StoreState {
} }
public Hash getTarget() { return _key; } public Hash getTarget() { return _key; }
public DataStructure getData() { return _data; } public DatabaseEntry getData() { return _data; }
public Set<Hash> getPending() { public Set<Hash> getPending() {
synchronized (_pendingPeers) { synchronized (_pendingPeers) {
return (Set<Hash>)_pendingPeers.clone(); return (Set<Hash>)_pendingPeers.clone();

View File

@ -8,14 +8,15 @@ package net.i2p.router.networkdb.kademlia;
* *
*/ */
import java.util.Collection;
import java.util.Date; import java.util.Date;
import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentHashMap;
import java.util.HashSet; import java.util.HashSet;
import java.util.Map; import java.util.Map;
import java.util.Set; import java.util.Set;
import net.i2p.data.DatabaseEntry;
import net.i2p.data.DataHelper; import net.i2p.data.DataHelper;
import net.i2p.data.DataStructure;
import net.i2p.data.Hash; import net.i2p.data.Hash;
import net.i2p.data.LeaseSet; import net.i2p.data.LeaseSet;
import net.i2p.data.RouterInfo; import net.i2p.data.RouterInfo;
@ -24,7 +25,7 @@ import net.i2p.util.Log;
class TransientDataStore implements DataStore { class TransientDataStore implements DataStore {
private Log _log; private Log _log;
private ConcurrentHashMap<Hash, DataStructure> _data; private ConcurrentHashMap<Hash, DatabaseEntry> _data;
protected RouterContext _context; protected RouterContext _context;
public TransientDataStore(RouterContext ctx) { public TransientDataStore(RouterContext ctx) {
@ -51,12 +52,28 @@ class TransientDataStore implements DataStore {
return new HashSet(_data.keySet()); return new HashSet(_data.keySet());
} }
/**
* @return not a copy
* @since 0.8.3
*/
public Collection<DatabaseEntry> getEntries() {
return _data.values();
}
/**
* @return not a copy
* @since 0.8.3
*/
public Set<Map.Entry<Hash, DatabaseEntry>> getMapEntries() {
return _data.entrySet();
}
/** for PersistentDataStore only - don't use here @throws IAE always */ /** for PersistentDataStore only - don't use here @throws IAE always */
public DataStructure get(Hash key, boolean persist) { public DatabaseEntry get(Hash key, boolean persist) {
throw new IllegalArgumentException("no"); throw new IllegalArgumentException("no");
} }
public DataStructure get(Hash key) { public DatabaseEntry get(Hash key) {
return _data.get(key); return _data.get(key);
} }
@ -66,15 +83,15 @@ class TransientDataStore implements DataStore {
public int countLeaseSets() { public int countLeaseSets() {
int count = 0; int count = 0;
for (DataStructure d : _data.values()) { for (DatabaseEntry d : _data.values()) {
if (d instanceof LeaseSet) if (d.getType() == DatabaseEntry.KEY_TYPE_LEASESET)
count++; count++;
} }
return count; return count;
} }
/** for PersistentDataStore only - don't use here @throws IAE always */ /** for PersistentDataStore only - don't use here @throws IAE always */
public boolean put(Hash key, DataStructure data, boolean persist) { public boolean put(Hash key, DatabaseEntry data, boolean persist) {
throw new IllegalArgumentException("no"); throw new IllegalArgumentException("no");
} }
@ -82,14 +99,14 @@ class TransientDataStore implements DataStore {
* @param data must be validated before here * @param data must be validated before here
* @return success * @return success
*/ */
public boolean put(Hash key, DataStructure data) { public boolean put(Hash key, DatabaseEntry data) {
if (data == null) return false; if (data == null) return false;
if (_log.shouldLog(Log.DEBUG)) if (_log.shouldLog(Log.DEBUG))
_log.debug("Storing key " + key); _log.debug("Storing key " + key);
DataStructure old = null; DatabaseEntry old = null;
old = _data.putIfAbsent(key, data); old = _data.putIfAbsent(key, data);
boolean rv = false; boolean rv = false;
if (data instanceof RouterInfo) { if (data.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO) {
// Don't do this here so we don't reset it at router startup; // Don't do this here so we don't reset it at router startup;
// the StoreMessageJob calls this // the StoreMessageJob calls this
//_context.profileManager().heardAbout(key); //_context.profileManager().heardAbout(key);
@ -113,7 +130,7 @@ class TransientDataStore implements DataStore {
_log.info("New router for " + key + ": published on " + new Date(ri.getPublished())); _log.info("New router for " + key + ": published on " + new Date(ri.getPublished()));
rv = true; rv = true;
} }
} else if (data instanceof LeaseSet) { } else if (data.getType() == DatabaseEntry.KEY_TYPE_LEASESET) {
LeaseSet ls = (LeaseSet)data; LeaseSet ls = (LeaseSet)data;
if (old != null) { if (old != null) {
LeaseSet ols = (LeaseSet)old; LeaseSet ols = (LeaseSet)old;
@ -158,9 +175,9 @@ class TransientDataStore implements DataStore {
public String toString() { public String toString() {
StringBuilder buf = new StringBuilder(); StringBuilder buf = new StringBuilder();
buf.append("Transient DataStore: ").append(_data.size()).append("\nKeys: "); buf.append("Transient DataStore: ").append(_data.size()).append("\nKeys: ");
for (Map.Entry<Hash, DataStructure> e : _data.entrySet()) { for (Map.Entry<Hash, DatabaseEntry> e : _data.entrySet()) {
Hash key = e.getKey(); Hash key = e.getKey();
DataStructure dp = e.getValue(); DatabaseEntry dp = e.getValue();
buf.append("\n\t*Key: ").append(key.toString()).append("\n\tContent: ").append(dp.toString()); buf.append("\n\t*Key: ").append(key.toString()).append("\n\tContent: ").append(dp.toString());
} }
buf.append("\n"); buf.append("\n");
@ -168,11 +185,11 @@ class TransientDataStore implements DataStore {
} }
/** for PersistentDataStore only - don't use here */ /** for PersistentDataStore only - don't use here */
public DataStructure remove(Hash key, boolean persist) { public DatabaseEntry remove(Hash key, boolean persist) {
throw new IllegalArgumentException("no"); throw new IllegalArgumentException("no");
} }
public DataStructure remove(Hash key) { public DatabaseEntry remove(Hash key) {
if (_log.shouldLog(Log.DEBUG)) if (_log.shouldLog(Log.DEBUG))
_log.debug("Removing key " + key.toBase64()); _log.debug("Removing key " + key.toBase64());
return _data.remove(key); return _data.remove(key);

View File

@ -178,8 +178,7 @@ public class PeerTestJob extends JobImpl {
*/ */
private DatabaseStoreMessage buildMessage(RouterInfo peer, TunnelId replyTunnel, Hash replyGateway, long nonce, long expiration) { private DatabaseStoreMessage buildMessage(RouterInfo peer, TunnelId replyTunnel, Hash replyGateway, long nonce, long expiration) {
DatabaseStoreMessage msg = new DatabaseStoreMessage(getContext()); DatabaseStoreMessage msg = new DatabaseStoreMessage(getContext());
msg.setKey(peer.getIdentity().getHash()); msg.setEntry(peer);
msg.setRouterInfo(peer);
msg.setReplyGateway(replyGateway); msg.setReplyGateway(replyGateway);
msg.setReplyTunnel(replyTunnel); msg.setReplyTunnel(replyTunnel);
msg.setReplyToken(nonce); msg.setReplyToken(nonce);

View File

@ -344,8 +344,7 @@ public class NTCPConnection implements FIFOBandwidthLimiter.CompleteListener {
OutNetMessage infoMsg = new OutNetMessage(_context); OutNetMessage infoMsg = new OutNetMessage(_context);
infoMsg.setExpiration(_context.clock().now()+10*1000); infoMsg.setExpiration(_context.clock().now()+10*1000);
DatabaseStoreMessage dsm = new DatabaseStoreMessage(_context); DatabaseStoreMessage dsm = new DatabaseStoreMessage(_context);
dsm.setKey(_context.routerHash()); dsm.setEntry(_context.router().getRouterInfo());
dsm.setRouterInfo(_context.router().getRouterInfo());
infoMsg.setMessage(dsm); infoMsg.setMessage(dsm);
infoMsg.setPriority(100); infoMsg.setPriority(100);
RouterInfo target = _context.netDb().lookupRouterInfoLocally(_remotePeer.calculateHash()); RouterInfo target = _context.netDb().lookupRouterInfoLocally(_remotePeer.calculateHash());

View File

@ -577,8 +577,7 @@ class EstablishmentManager {
(isInbound ? " inbound con from " + peer : "outbound con to " + peer)); (isInbound ? " inbound con from " + peer : "outbound con to " + peer));
DatabaseStoreMessage m = new DatabaseStoreMessage(_context); DatabaseStoreMessage m = new DatabaseStoreMessage(_context);
m.setKey(_context.routerHash()); m.setEntry(_context.router().getRouterInfo());
m.setRouterInfo(_context.router().getRouterInfo());
m.setMessageExpiration(_context.clock().now() + 10*1000); m.setMessageExpiration(_context.clock().now() + 10*1000);
_transport.send(m, peer); _transport.send(m, peer);
} }

View File

@ -18,6 +18,7 @@ import java.util.TreeSet;
import java.util.Vector; import java.util.Vector;
import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentHashMap;
import net.i2p.data.DatabaseEntry;
import net.i2p.data.DataHelper; import net.i2p.data.DataHelper;
import net.i2p.data.Hash; import net.i2p.data.Hash;
import net.i2p.data.RouterAddress; import net.i2p.data.RouterAddress;
@ -769,8 +770,11 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
public void messageReceived(I2NPMessage inMsg, RouterIdentity remoteIdent, Hash remoteIdentHash, long msToReceive, int bytesReceived) { public void messageReceived(I2NPMessage inMsg, RouterIdentity remoteIdent, Hash remoteIdentHash, long msToReceive, int bytesReceived) {
if (inMsg.getType() == DatabaseStoreMessage.MESSAGE_TYPE) { if (inMsg.getType() == DatabaseStoreMessage.MESSAGE_TYPE) {
DatabaseStoreMessage dsm = (DatabaseStoreMessage)inMsg; DatabaseStoreMessage dsm = (DatabaseStoreMessage)inMsg;
if ( (dsm.getRouterInfo() != null) && DatabaseEntry entry = dsm.getEntry();
(dsm.getRouterInfo().getNetworkId() != Router.NETWORK_ID) ) { if (entry == null)
return;
if (entry.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO &&
((RouterInfo) entry).getNetworkId() != Router.NETWORK_ID) {
// this is pre-0.6.1.10, so it isn't going to happen any more // this is pre-0.6.1.10, so it isn't going to happen any more
/* /*
@ -788,7 +792,7 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
+ " because they are in the wrong net"); + " because they are in the wrong net");
} }
*/ */
Hash peerHash = dsm.getRouterInfo().getIdentity().calculateHash(); Hash peerHash = entry.getHash();
PeerState peer = getPeerState(peerHash); PeerState peer = getPeerState(peerHash);
if (peer != null) { if (peer != null) {
RemoteHostId remote = peer.getRemoteHostId(); RemoteHostId remote = peer.getRemoteHostId();
@ -797,14 +801,14 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
SimpleScheduler.getInstance().addEvent(new RemoveDropList(remote), DROPLIST_PERIOD); SimpleScheduler.getInstance().addEvent(new RemoveDropList(remote), DROPLIST_PERIOD);
} }
markUnreachable(peerHash); markUnreachable(peerHash);
_context.shitlist().shitlistRouter(peerHash, "Part of the wrong network, version = " + dsm.getRouterInfo().getOption("router.version")); _context.shitlist().shitlistRouter(peerHash, "Part of the wrong network, version = " + ((RouterInfo) entry).getOption("router.version"));
//_context.shitlist().shitlistRouter(peerHash, "Part of the wrong network", STYLE); //_context.shitlist().shitlistRouter(peerHash, "Part of the wrong network", STYLE);
dropPeer(peerHash, false, "wrong network"); dropPeer(peerHash, false, "wrong network");
if (_log.shouldLog(Log.WARN)) if (_log.shouldLog(Log.WARN))
_log.warn("Dropping the peer " + peerHash.toBase64() + " because they are in the wrong net: " + dsm.getRouterInfo()); _log.warn("Dropping the peer " + peerHash.toBase64() + " because they are in the wrong net: " + entry);
return; return;
} else { } else {
if (dsm.getRouterInfo() != null) { if (entry.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO) {
if (_log.shouldLog(Log.INFO)) if (_log.shouldLog(Log.INFO))
_log.info("Received an RI from the same net"); _log.info("Received an RI from the same net");
} else { } else {

View File

@ -1,8 +1,10 @@
package net.i2p.router.tunnel; package net.i2p.router.tunnel;
import net.i2p.data.DatabaseEntry;
import net.i2p.data.Hash; import net.i2p.data.Hash;
import net.i2p.data.LeaseSet; import net.i2p.data.LeaseSet;
import net.i2p.data.Payload; import net.i2p.data.Payload;
import net.i2p.data.RouterInfo;
import net.i2p.data.TunnelId; import net.i2p.data.TunnelId;
import net.i2p.data.i2np.DataMessage; import net.i2p.data.i2np.DataMessage;
import net.i2p.data.i2np.DatabaseSearchReplyMessage; import net.i2p.data.i2np.DatabaseSearchReplyMessage;
@ -71,7 +73,7 @@ public class InboundMessageDistributor implements GarlicMessageReceiver.CloveRec
msg = newMsg; msg = newMsg;
} else if ( (_client != null) && } else if ( (_client != null) &&
(msg.getType() == DatabaseStoreMessage.MESSAGE_TYPE) && (msg.getType() == DatabaseStoreMessage.MESSAGE_TYPE) &&
(((DatabaseStoreMessage)msg).getValueType() == DatabaseStoreMessage.KEY_TYPE_ROUTERINFO)) { (((DatabaseStoreMessage)msg).getEntry().getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO)) {
// FVSJ may result in an unsolicited RI store if the peer went non-ff. // FVSJ may result in an unsolicited RI store if the peer went non-ff.
// Maybe we can figure out a way to handle this safely, so we don't ask him again. // Maybe we can figure out a way to handle this safely, so we don't ask him again.
// For now, just hope we eventually find out through other means. // For now, just hope we eventually find out through other means.
@ -165,7 +167,7 @@ public class InboundMessageDistributor implements GarlicMessageReceiver.CloveRec
// unnecessarily // unnecessarily
DatabaseStoreMessage dsm = (DatabaseStoreMessage)data; DatabaseStoreMessage dsm = (DatabaseStoreMessage)data;
try { try {
if (dsm.getValueType() == DatabaseStoreMessage.KEY_TYPE_LEASESET) { if (dsm.getEntry().getType() == DatabaseEntry.KEY_TYPE_LEASESET) {
// If it was stored to us before, don't undo the // If it was stored to us before, don't undo the
// receivedAsPublished flag so we will continue to respond to requests // receivedAsPublished flag so we will continue to respond to requests
// for the leaseset. That is, we don't want this to change the // for the leaseset. That is, we don't want this to change the
@ -173,10 +175,11 @@ public class InboundMessageDistributor implements GarlicMessageReceiver.CloveRec
// When the keyspace rotates at midnight, and this leaseset moves out // When the keyspace rotates at midnight, and this leaseset moves out
// of our keyspace, maybe we shouldn't do this? // of our keyspace, maybe we shouldn't do this?
// Should we do this whether ff or not? // Should we do this whether ff or not?
LeaseSet old = _context.netDb().store(dsm.getKey(), dsm.getLeaseSet()); LeaseSet ls = (LeaseSet) dsm.getEntry();
LeaseSet old = _context.netDb().store(dsm.getKey(), ls);
if (old != null && old.getReceivedAsPublished() if (old != null && old.getReceivedAsPublished()
/** && ((FloodfillNetworkDatabaseFacade)_context.netDb()).floodfillEnabled() **/ ) /** && ((FloodfillNetworkDatabaseFacade)_context.netDb()).floodfillEnabled() **/ )
dsm.getLeaseSet().setReceivedAsPublished(true); ls.setReceivedAsPublished(true);
if (_log.shouldLog(Log.INFO)) if (_log.shouldLog(Log.INFO))
_log.info("Storing LS for: " + dsm.getKey() + " sent to: " + _client); _log.info("Storing LS for: " + dsm.getKey() + " sent to: " + _client);
} else { } else {
@ -189,7 +192,7 @@ public class InboundMessageDistributor implements GarlicMessageReceiver.CloveRec
_log.error("Dropped dangerous message down a tunnel for " + _client.toBase64() + ": " + dsm, new Exception("cause")); _log.error("Dropped dangerous message down a tunnel for " + _client.toBase64() + ": " + dsm, new Exception("cause"));
return; return;
} }
_context.netDb().store(dsm.getKey(), dsm.getRouterInfo()); _context.netDb().store(dsm.getKey(), (RouterInfo) dsm.getEntry());
} }
} catch (IllegalArgumentException iae) { } catch (IllegalArgumentException iae) {
if (_log.shouldLog(Log.WARN)) if (_log.shouldLog(Log.WARN))