* NetDB, DatabaseeStoreMessage:

- Convert everything from DataStructure to the
        new DatabaseEntry superclass
      - Optimizations made possible by DatabaseEntry
      - Don't rescan netDb directory unless changed
This commit is contained in:
zzz
2011-01-02 14:23:26 +00:00
parent 0eebfbacd7
commit 378490886f
27 changed files with 355 additions and 348 deletions

View File

@ -12,6 +12,7 @@ import java.io.ByteArrayInputStream;
import java.io.IOException;
import net.i2p.I2PAppContext;
import net.i2p.data.DatabaseEntry;
import net.i2p.data.DataFormatException;
import net.i2p.data.DataHelper;
import net.i2p.data.Hash;
@ -28,59 +29,39 @@ import net.i2p.data.TunnelId;
public class DatabaseStoreMessage extends I2NPMessageImpl {
public final static int MESSAGE_TYPE = 1;
private Hash _key;
private int _type;
private LeaseSet _leaseSet;
private RouterInfo _info;
private byte[] _leaseSetCache;
private byte[] _routerInfoCache;
private DatabaseEntry _dbEntry;
private byte[] _byteCache;
private long _replyToken;
private TunnelId _replyTunnel;
private Hash _replyGateway;
public final static int KEY_TYPE_ROUTERINFO = 0;
public final static int KEY_TYPE_LEASESET = 1;
public DatabaseStoreMessage(I2PAppContext context) {
super(context);
setValueType(-1);
}
/**
* Defines the key in the network database being stored
*
*/
public Hash getKey() { return _key; }
public void setKey(Hash key) { _key = key; }
/**
* Defines the router info value in the network database being stored
*
*/
public RouterInfo getRouterInfo() { return _info; }
public void setRouterInfo(RouterInfo routerInfo) {
_info = routerInfo;
if (_info != null)
setValueType(KEY_TYPE_ROUTERINFO);
public Hash getKey() {
if (_key != null)
return _key; // receive
if (_dbEntry != null)
return _dbEntry.getHash(); // create
return null;
}
/**
* Defines the lease set value in the network database being stored
*
* Defines the entry in the network database being stored
*/
public LeaseSet getLeaseSet() { return _leaseSet; }
public void setLeaseSet(LeaseSet leaseSet) {
_leaseSet = leaseSet;
if (_leaseSet != null)
setValueType(KEY_TYPE_LEASESET);
}
public DatabaseEntry getEntry() { return _dbEntry; }
/**
* Defines type of key being stored in the network database -
* either KEY_TYPE_ROUTERINFO or KEY_TYPE_LEASESET
*
* This also sets the key
*/
public int getValueType() { return _type; }
public void setValueType(int type) { _type = type; }
public void setEntry(DatabaseEntry entry) {
_dbEntry = entry;
}
/**
* If a reply is desired, this token specifies the message ID that should
@ -90,6 +71,7 @@ public class DatabaseStoreMessage extends I2NPMessageImpl {
* @return positive reply token ID, or 0 if no reply is necessary.
*/
public long getReplyToken() { return _replyToken; }
/**
* Update the reply token.
*
@ -113,13 +95,10 @@ public class DatabaseStoreMessage extends I2NPMessageImpl {
if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message");
int curIndex = offset;
//byte keyData[] = new byte[Hash.HASH_LENGTH];
//System.arraycopy(data, curIndex, keyData, 0, Hash.HASH_LENGTH);
_key = Hash.create(data, curIndex);
curIndex += Hash.HASH_LENGTH;
//_key = new Hash(keyData);
_type = (int)DataHelper.fromLong(data, curIndex, 1);
type = (int)DataHelper.fromLong(data, curIndex, 1);
curIndex++;
_replyToken = DataHelper.fromLong(data, curIndex, 4);
@ -131,39 +110,38 @@ public class DatabaseStoreMessage extends I2NPMessageImpl {
_replyTunnel = new TunnelId(tunnel);
curIndex += 4;
//byte gw[] = new byte[Hash.HASH_LENGTH];
//System.arraycopy(data, curIndex, gw, 0, Hash.HASH_LENGTH);
_replyGateway = Hash.create(data, curIndex);
curIndex += Hash.HASH_LENGTH;
//_replyGateway = new Hash(gw);
} else {
_replyTunnel = null;
_replyGateway = null;
}
if (_type == KEY_TYPE_LEASESET) {
_leaseSet = new LeaseSet();
if (type == DatabaseEntry.KEY_TYPE_LEASESET) {
_dbEntry = new LeaseSet();
try {
_leaseSet.readBytes(new ByteArrayInputStream(data, curIndex, data.length-curIndex));
_dbEntry.readBytes(new ByteArrayInputStream(data, curIndex, data.length-curIndex));
} catch (DataFormatException dfe) {
throw new I2NPMessageException("Error reading the leaseSet", dfe);
}
} else if (_type == KEY_TYPE_ROUTERINFO) {
_info = new RouterInfo();
} else if (type == DatabaseEntry.KEY_TYPE_ROUTERINFO) {
_dbEntry = new RouterInfo();
int compressedSize = (int)DataHelper.fromLong(data, curIndex, 2);
curIndex += 2;
try {
byte decompressed[] = DataHelper.decompress(data, curIndex, compressedSize);
_info.readBytes(new ByteArrayInputStream(decompressed));
_dbEntry.readBytes(new ByteArrayInputStream(decompressed));
} catch (DataFormatException dfe) {
throw new I2NPMessageException("Error reading the routerInfo", dfe);
} catch (IOException ioe) {
throw new I2NPMessageException("Compressed routerInfo was corrupt", ioe);
}
} else {
throw new I2NPMessageException("Invalid type of key read from the structure - " + _type);
throw new I2NPMessageException("Invalid type of key read from the structure - " + type);
}
//if (!key.equals(_dbEntry.getHash()))
// throw new I2NPMessageException("Hash mismatch in DSM");
}
@ -172,28 +150,28 @@ public class DatabaseStoreMessage extends I2NPMessageImpl {
int len = Hash.HASH_LENGTH + 1 + 4; // key+type+replyToken
if (_replyToken > 0)
len += 4 + Hash.HASH_LENGTH; // replyTunnel+replyGateway
if (_type == KEY_TYPE_LEASESET) {
_leaseSetCache = _leaseSet.toByteArray();
len += _leaseSetCache.length;
} else if (_type == KEY_TYPE_ROUTERINFO) {
byte uncompressed[] = _info.toByteArray();
byte compressed[] = DataHelper.compress(uncompressed);
_routerInfoCache = compressed;
len += compressed.length + 2;
if (_dbEntry.getType() == DatabaseEntry.KEY_TYPE_LEASESET) {
_byteCache = _dbEntry.toByteArray();
} else if (_dbEntry.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO) {
byte uncompressed[] = _dbEntry.toByteArray();
_byteCache = DataHelper.compress(uncompressed);
len += 2;
}
len += _byteCache.length;
return len;
}
/** write the message body to the output array, starting at the given index */
protected int writeMessageBody(byte out[], int curIndex) throws I2NPMessageException {
if (_key == null) throw new I2NPMessageException("Invalid key");
if ( (_type != KEY_TYPE_LEASESET) && (_type != KEY_TYPE_ROUTERINFO) ) throw new I2NPMessageException("Invalid key type");
if ( (_type == KEY_TYPE_LEASESET) && (_leaseSet == null) ) throw new I2NPMessageException("Missing lease set");
if ( (_type == KEY_TYPE_ROUTERINFO) && (_info == null) ) throw new I2NPMessageException("Missing router info");
if (_dbEntry == null) throw new I2NPMessageException("Missing entry");
int type = _dbEntry.getType();
if (type != DatabaseEntry.KEY_TYPE_LEASESET && type != DatabaseEntry.KEY_TYPE_ROUTERINFO)
throw new I2NPMessageException("Invalid key type");
System.arraycopy(_key.getData(), 0, out, curIndex, Hash.HASH_LENGTH);
// Use the hash of the DatabaseEntry
System.arraycopy(getKey().getData(), 0, out, curIndex, Hash.HASH_LENGTH);
curIndex += Hash.HASH_LENGTH;
byte type[] = DataHelper.toLong(1, _type);
out[curIndex++] = type[0];
out[curIndex++] = (byte) type;
byte tok[] = DataHelper.toLong(4, _replyToken);
System.arraycopy(tok, 0, out, curIndex, 4);
curIndex += 4;
@ -209,17 +187,14 @@ public class DatabaseStoreMessage extends I2NPMessageImpl {
curIndex += Hash.HASH_LENGTH;
}
if (_type == KEY_TYPE_LEASESET) {
// initialized in calculateWrittenLength
System.arraycopy(_leaseSetCache, 0, out, curIndex, _leaseSetCache.length);
curIndex += _leaseSetCache.length;
} else if (_type == KEY_TYPE_ROUTERINFO) {
byte len[] = DataHelper.toLong(2, _routerInfoCache.length);
// _byteCache initialized in calculateWrittenLength
if (type == DatabaseEntry.KEY_TYPE_ROUTERINFO) {
byte len[] = DataHelper.toLong(2, _byteCache.length);
out[curIndex++] = len[0];
out[curIndex++] = len[1];
System.arraycopy(_routerInfoCache, 0, out, curIndex, _routerInfoCache.length);
curIndex += _routerInfoCache.length;
}
System.arraycopy(_byteCache, 0, out, curIndex, _byteCache.length);
curIndex += _byteCache.length;
return curIndex;
}
@ -228,9 +203,7 @@ public class DatabaseStoreMessage extends I2NPMessageImpl {
@Override
public int hashCode() {
return DataHelper.hashCode(getKey()) +
DataHelper.hashCode(getLeaseSet()) +
DataHelper.hashCode(getRouterInfo()) +
getValueType() +
DataHelper.hashCode(_dbEntry) +
(int)getReplyToken() +
DataHelper.hashCode(getReplyTunnel()) +
DataHelper.hashCode(getReplyGateway());
@ -241,9 +214,7 @@ public class DatabaseStoreMessage extends I2NPMessageImpl {
if ( (object != null) && (object instanceof DatabaseStoreMessage) ) {
DatabaseStoreMessage msg = (DatabaseStoreMessage)object;
return DataHelper.eq(getKey(),msg.getKey()) &&
DataHelper.eq(getLeaseSet(),msg.getLeaseSet()) &&
DataHelper.eq(getRouterInfo(),msg.getRouterInfo()) &&
_type == msg.getValueType() &&
DataHelper.eq(_dbEntry,msg.getEntry()) &&
getReplyToken() == msg.getReplyToken() &&
DataHelper.eq(getReplyTunnel(), msg.getReplyTunnel()) &&
DataHelper.eq(getReplyGateway(), msg.getReplyGateway());
@ -259,9 +230,7 @@ public class DatabaseStoreMessage extends I2NPMessageImpl {
buf.append("\n\tExpiration: ").append(getMessageExpiration());
buf.append("\n\tUnique ID: ").append(getUniqueId());
buf.append("\n\tKey: ").append(getKey());
buf.append("\n\tValue Type: ").append(getValueType());
buf.append("\n\tRouter Info: ").append(getRouterInfo());
buf.append("\n\tLease Set: ").append(getLeaseSet());
buf.append("\n\tEntry: ").append(_dbEntry);
buf.append("\n\tReply token: ").append(getReplyToken());
buf.append("\n\tReply tunnel: ").append(getReplyTunnel());
buf.append("\n\tReply gateway: ").append(getReplyGateway());

View File

@ -16,6 +16,7 @@ import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import net.i2p.data.DatabaseEntry;
import net.i2p.data.Hash;
import net.i2p.data.LeaseSet;
import net.i2p.data.RouterInfo;
@ -36,6 +37,7 @@ class DummyNetworkDatabaseFacade extends NetworkDatabaseFacade {
_routers.put(info.getIdentity().getHash(), info);
}
public DatabaseEntry lookupLocally(Hash key) { return null; }
public void lookupLeaseSet(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs) {}
public LeaseSet lookupLeaseSetLocally(Hash key) { return null; }
public void lookupRouterInfo(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs) {

View File

@ -13,6 +13,7 @@ import java.io.Writer;
import java.util.Collections;
import java.util.Set;
import net.i2p.data.DatabaseEntry;
import net.i2p.data.Hash;
import net.i2p.data.LeaseSet;
import net.i2p.data.RouterInfo;
@ -32,6 +33,11 @@ public abstract class NetworkDatabaseFacade implements Service {
*/
public abstract Set<Hash> findNearestRouters(Hash key, int maxNumRouters, Set<Hash> peersToIgnore);
/**
* @return RouterInfo, LeaseSet, or null
* @since 0.8.3
*/
public abstract DatabaseEntry lookupLocally(Hash key);
public abstract void lookupLeaseSet(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs);
public abstract LeaseSet lookupLeaseSetLocally(Hash key);
public abstract void lookupRouterInfo(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs);

View File

@ -227,9 +227,8 @@ class OutboundClientMessageJobHelper {
clove.setExpiration(expiration);
clove.setId(ctx.random().nextLong(I2NPMessage.MAX_ID_VALUE));
DatabaseStoreMessage msg = new DatabaseStoreMessage(ctx);
msg.setLeaseSet(replyLeaseSet);
msg.setEntry(replyLeaseSet);
msg.setMessageExpiration(expiration);
msg.setKey(replyLeaseSet.getDestination().calculateHash());
clove.setPayload(msg);
clove.setRecipientPublicKey(null);
clove.setRequestAck(false);

View File

@ -12,7 +12,7 @@ import java.util.HashSet;
import java.util.Iterator;
import java.util.Set;
import net.i2p.data.DataStructure;
import net.i2p.data.DatabaseEntry;
import net.i2p.data.Hash;
import net.i2p.data.LeaseSet;
import net.i2p.data.RouterIdentity;
@ -227,20 +227,19 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
return routerHashSet.contains(getContext().routerHash());
}
private void sendData(Hash key, DataStructure data, Hash toPeer, TunnelId replyTunnel) {
private void sendData(Hash key, DatabaseEntry data, Hash toPeer, TunnelId replyTunnel) {
if (!key.equals(data.getHash())) {
_log.error("Hash mismatch HDLMJ");
return;
}
if (_log.shouldLog(Log.DEBUG))
_log.debug("Sending data matching key " + key.toBase64() + " to peer " + toPeer.toBase64()
+ " tunnel " + replyTunnel);
DatabaseStoreMessage msg = new DatabaseStoreMessage(getContext());
msg.setKey(key);
if (data instanceof LeaseSet) {
msg.setLeaseSet((LeaseSet)data);
msg.setValueType(DatabaseStoreMessage.KEY_TYPE_LEASESET);
if (data.getType() == DatabaseEntry.KEY_TYPE_LEASESET) {
getContext().statManager().addRateData("netDb.lookupsMatchedLeaseSet", 1, 0);
} else if (data instanceof RouterInfo) {
msg.setRouterInfo((RouterInfo)data);
msg.setValueType(DatabaseStoreMessage.KEY_TYPE_ROUTERINFO);
}
msg.setEntry(data);
getContext().statManager().addRateData("netDb.lookupsMatched", 1, 0);
getContext().statManager().addRateData("netDb.lookupsHandled", 1, 0);
sendMessage(msg, toPeer, replyTunnel);

View File

@ -10,9 +10,11 @@ package net.i2p.router.networkdb;
import java.util.Date;
import net.i2p.data.DatabaseEntry;
import net.i2p.data.Hash;
import net.i2p.data.LeaseSet;
import net.i2p.data.RouterIdentity;
import net.i2p.data.RouterInfo;
import net.i2p.data.i2np.DatabaseStoreMessage;
import net.i2p.data.i2np.DeliveryStatusMessage;
import net.i2p.router.JobImpl;
@ -59,16 +61,17 @@ public class HandleDatabaseStoreMessageJob extends JobImpl {
String invalidMessage = null;
boolean wasNew = false;
if (_message.getValueType() == DatabaseStoreMessage.KEY_TYPE_LEASESET) {
DatabaseEntry entry = _message.getEntry();
if (entry.getType() == DatabaseEntry.KEY_TYPE_LEASESET) {
getContext().statManager().addRateData("netDb.storeLeaseSetHandled", 1, 0);
try {
LeaseSet ls = _message.getLeaseSet();
LeaseSet ls = (LeaseSet) entry;
// mark it as something we received, so we'll answer queries
// for it. this flag does NOT get set on entries that we
// receive in response to our own lookups.
ls.setReceivedAsPublished(true);
LeaseSet match = getContext().netDb().store(_message.getKey(), _message.getLeaseSet());
LeaseSet match = getContext().netDb().store(_message.getKey(), ls);
if (match == null) {
wasNew = true;
} else {
@ -78,13 +81,14 @@ public class HandleDatabaseStoreMessageJob extends JobImpl {
} catch (IllegalArgumentException iae) {
invalidMessage = iae.getMessage();
}
} else if (_message.getValueType() == DatabaseStoreMessage.KEY_TYPE_ROUTERINFO) {
} else if (entry.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO) {
RouterInfo ri = (RouterInfo) entry;
getContext().statManager().addRateData("netDb.storeRouterInfoHandled", 1, 0);
if (_log.shouldLog(Log.INFO))
_log.info("Handling dbStore of router " + _message.getKey() + " with publishDate of "
+ new Date(_message.getRouterInfo().getPublished()));
+ new Date(ri.getPublished()));
try {
Object match = getContext().netDb().store(_message.getKey(), _message.getRouterInfo());
Object match = getContext().netDb().store(_message.getKey(), ri);
wasNew = (null == match);
getContext().profileManager().heardAbout(_message.getKey());
} catch (IllegalArgumentException iae) {
@ -92,7 +96,7 @@ public class HandleDatabaseStoreMessageJob extends JobImpl {
}
} else {
if (_log.shouldLog(Log.ERROR))
_log.error("Invalid DatabaseStoreMessage data type - " + _message.getValueType()
_log.error("Invalid DatabaseStoreMessage data type - " + entry.getType()
+ ": " + _message);
}

View File

@ -8,21 +8,27 @@ package net.i2p.router.networkdb.kademlia;
*
*/
import java.util.Collection;
import java.util.Map;
import java.util.Set;
import net.i2p.data.DataStructure;
import net.i2p.data.DatabaseEntry;
import net.i2p.data.Hash;
public interface DataStore {
public boolean isInitialized();
public boolean isKnown(Hash key);
public DataStructure get(Hash key);
public DataStructure get(Hash key, boolean persist);
public boolean put(Hash key, DataStructure data);
public boolean put(Hash key, DataStructure data, boolean persist);
public DataStructure remove(Hash key);
public DataStructure remove(Hash key, boolean persist);
public DatabaseEntry get(Hash key);
public DatabaseEntry get(Hash key, boolean persist);
public boolean put(Hash key, DatabaseEntry data);
public boolean put(Hash key, DatabaseEntry data, boolean persist);
public DatabaseEntry remove(Hash key);
public DatabaseEntry remove(Hash key, boolean persist);
public Set<Hash> getKeys();
/** @since 0.8.3 */
public Collection<DatabaseEntry> getEntries();
/** @since 0.8.3 */
public Set<Map.Entry<Hash, DatabaseEntry>> getMapEntries();
public void stop();
public void restart();
public void rescan();

View File

@ -12,6 +12,7 @@ import java.util.HashSet;
import java.util.Iterator;
import java.util.Set;
import net.i2p.data.DatabaseEntry;
import net.i2p.data.Hash;
import net.i2p.data.LeaseSet;
import net.i2p.router.JobImpl;
@ -61,8 +62,8 @@ class ExpireLeasesJob extends JobImpl {
Set toExpire = new HashSet(128);
for (Iterator iter = keys.iterator(); iter.hasNext(); ) {
Hash key = (Hash)iter.next();
Object obj = _facade.getDataStore().get(key);
if (obj instanceof LeaseSet) {
DatabaseEntry obj = _facade.getDataStore().get(key);
if (obj.getType() == DatabaseEntry.KEY_TYPE_LEASESET) {
LeaseSet ls = (LeaseSet)obj;
if (!ls.isCurrent(Router.CLOCK_FUDGE_FACTOR))
toExpire.add(key);

View File

@ -1,5 +1,8 @@
package net.i2p.router.networkdb.kademlia;
import net.i2p.data.DatabaseEntry;
import net.i2p.data.LeaseSet;
import net.i2p.data.RouterInfo;
import net.i2p.data.i2np.DatabaseSearchReplyMessage;
import net.i2p.data.i2np.DatabaseStoreMessage;
import net.i2p.data.i2np.I2NPMessage;
@ -61,14 +64,15 @@ class FloodOnlyLookupMatchJob extends JobImpl implements ReplyJob {
// We do it here first to make sure it is in the DB before
// runJob() and search.success() is called???
// Should we just pass the DataStructure directly back to somebody?
if (dsm.getValueType() == DatabaseStoreMessage.KEY_TYPE_LEASESET) {
if (dsm.getEntry().getType() == DatabaseEntry.KEY_TYPE_LEASESET) {
// Since HFDSMJ wants to setReceivedAsPublished(), we have to
// set a flag saying this was really the result of a query,
// so don't do that.
dsm.getLeaseSet().setReceivedAsReply();
getContext().netDb().store(dsm.getKey(), dsm.getLeaseSet());
LeaseSet ls = (LeaseSet) dsm.getEntry();
ls.setReceivedAsReply();
getContext().netDb().store(dsm.getKey(), ls);
} else {
getContext().netDb().store(dsm.getKey(), dsm.getRouterInfo());
getContext().netDb().store(dsm.getKey(), (RouterInfo) dsm.getEntry());
}
} catch (IllegalArgumentException iae) {
if (_log.shouldLog(Log.WARN))

View File

@ -182,8 +182,7 @@ public class FloodSearchJob extends JobImpl {
_search = job;
}
public void runJob() {
if ( (getContext().netDb().lookupLeaseSetLocally(_search.getKey()) != null) ||
(getContext().netDb().lookupRouterInfoLocally(_search.getKey()) != null) ) {
if (getContext().netDb().lookupLocally(_search.getKey()) != null) {
_search.success();
} else {
int remaining = _search.getLookupsRemaining();

View File

@ -7,8 +7,8 @@ import java.util.List;
import java.util.Map;
import java.util.Set;
import net.i2p.data.DatabaseEntry;
import net.i2p.data.DataFormatException;
import net.i2p.data.DataStructure;
import net.i2p.data.Hash;
import net.i2p.data.LeaseSet;
import net.i2p.data.RouterInfo;
@ -93,11 +93,11 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
}
@Override
public void sendStore(Hash key, DataStructure ds, Job onSuccess, Job onFailure, long sendTimeout, Set toIgnore) {
public void sendStore(Hash key, DatabaseEntry ds, Job onSuccess, Job onFailure, long sendTimeout, Set toIgnore) {
// if we are a part of the floodfill netDb, don't send out our own leaseSets as part
// of the flooding - instead, send them to a random floodfill peer so *they* can flood 'em out.
// perhaps statistically adjust this so we are the source every 1/N times... or something.
if (floodfillEnabled() && (ds instanceof RouterInfo)) {
if (floodfillEnabled() && (ds.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO)) {
flood(ds);
if (onSuccess != null)
_context.jobQueue().addJob(onSuccess);
@ -129,12 +129,8 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
* We do this to implement Kademlia within the floodfills, i.e.
* we flood to those closest to the key.
*/
public void flood(DataStructure ds) {
Hash key;
if (ds instanceof LeaseSet)
key = ((LeaseSet)ds).getDestination().calculateHash();
else
key = ((RouterInfo)ds).getIdentity().calculateHash();
public void flood(DatabaseEntry ds) {
Hash key = ds.getHash();
Hash rkey = _context.routingKeyGenerator().getRoutingKey(key);
FloodfillPeerSelector sel = (FloodfillPeerSelector)getPeerSelector();
List peers = sel.selectFloodfillParticipants(rkey, MAX_TO_FLOOD, getKBuckets());
@ -151,12 +147,7 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
if (peer.equals(_context.routerHash()))
continue;
DatabaseStoreMessage msg = new DatabaseStoreMessage(_context);
if (ds instanceof LeaseSet) {
msg.setLeaseSet((LeaseSet)ds);
} else {
msg.setRouterInfo((RouterInfo)ds);
}
msg.setKey(key);
msg.setEntry(ds);
msg.setReplyGateway(null);
msg.setReplyToken(0);
msg.setReplyTunnel(null);
@ -242,13 +233,9 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
List<RouterInfo> rv = new ArrayList();
DataStore ds = getDataStore();
if (ds != null) {
Set keys = ds.getKeys();
if (keys != null) {
for (Iterator iter = keys.iterator(); iter.hasNext(); ) {
Object o = ds.get((Hash)iter.next());
if (o instanceof RouterInfo)
rv.add((RouterInfo)o);
}
for (DatabaseEntry o : ds.getEntries()) {
if (o.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO)
rv.add((RouterInfo)o);
}
}
return rv;

View File

@ -12,7 +12,7 @@ import java.util.Iterator;
import java.util.NoSuchElementException;
import java.util.Set;
import net.i2p.data.DataStructure;
import net.i2p.data.DatabaseEntry;
import net.i2p.data.Hash;
import net.i2p.data.LeaseSet;
import net.i2p.data.RouterInfo;
@ -30,7 +30,7 @@ class FloodfillStoreJob extends StoreJob {
* Send a data structure to the floodfills
*
*/
public FloodfillStoreJob(RouterContext context, FloodfillNetworkDatabaseFacade facade, Hash key, DataStructure data, Job onSuccess, Job onFailure, long timeoutMs) {
public FloodfillStoreJob(RouterContext context, FloodfillNetworkDatabaseFacade facade, Hash key, DatabaseEntry data, Job onSuccess, Job onFailure, long timeoutMs) {
this(context, facade, key, data, onSuccess, onFailure, timeoutMs, null);
}
@ -38,7 +38,7 @@ class FloodfillStoreJob extends StoreJob {
* @param toSkip set of peer hashes of people we dont want to send the data to (e.g. we
* already know they have it). This can be null.
*/
public FloodfillStoreJob(RouterContext context, FloodfillNetworkDatabaseFacade facade, Hash key, DataStructure data, Job onSuccess, Job onFailure, long timeoutMs, Set<Hash> toSkip) {
public FloodfillStoreJob(RouterContext context, FloodfillNetworkDatabaseFacade facade, Hash key, DatabaseEntry data, Job onSuccess, Job onFailure, long timeoutMs, Set<Hash> toSkip) {
super(context, facade, key, data, onSuccess, onFailure, timeoutMs, toSkip);
_facade = facade;
}
@ -63,15 +63,12 @@ class FloodfillStoreJob extends StoreJob {
}
// Get the time stamp from the data we sent, so the Verify job can meke sure that
// it finds something stamped with that time or newer.
long published = 0;
DataStructure data = _state.getData();
boolean isRouterInfo = data instanceof RouterInfo;
DatabaseEntry data = _state.getData();
boolean isRouterInfo = data.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO;
long published = data.getDate();
if (isRouterInfo) {
published = ((RouterInfo) data).getPublished();
// Temporarily disable
return;
} else if (data instanceof LeaseSet) {
published = ((LeaseSet) data).getEarliestLeaseDate();
}
// we should always have exactly one successful entry
Hash sentTo = null;

View File

@ -4,7 +4,7 @@ import java.util.HashSet;
import java.util.List;
import java.util.Set;
import net.i2p.data.DataStructure;
import net.i2p.data.DatabaseEntry;
import net.i2p.data.Hash;
import net.i2p.data.RouterInfo;
import net.i2p.data.i2np.DatabaseLookupMessage;
@ -201,10 +201,7 @@ public class FloodfillVerifyStoreJob extends JobImpl {
// Verify it's as recent as the one we sent
boolean success = false;
DatabaseStoreMessage dsm = (DatabaseStoreMessage)_message;
if (_isRouterInfo && dsm.getValueType() == DatabaseStoreMessage.KEY_TYPE_ROUTERINFO)
success = dsm.getRouterInfo().getPublished() >= _published;
else if ((!_isRouterInfo) && dsm.getValueType() == DatabaseStoreMessage.KEY_TYPE_LEASESET)
success = dsm.getLeaseSet().getEarliestLeaseDate() >= _published;
success = dsm.getEntry().getDate() >= _published;
if (success) {
// store ok, w00t!
getContext().profileManager().dbLookupSuccessful(_target, delay);
@ -218,7 +215,7 @@ public class FloodfillVerifyStoreJob extends JobImpl {
if (_log.shouldLog(Log.WARN))
_log.warn("Verify failed (older) for " + _key);
if (_log.shouldLog(Log.INFO))
_log.info("Rcvd older lease: " + dsm.getLeaseSet());
_log.info("Rcvd older lease: " + dsm.getEntry());
} else if (_message instanceof DatabaseSearchReplyMessage) {
// assume 0 old, all new, 0 invalid, 0 dup
getContext().profileManager().dbLookupReply(_target, 0,
@ -245,11 +242,7 @@ public class FloodfillVerifyStoreJob extends JobImpl {
* So at least we'll try THREE ffs round-robin if things continue to fail...
*/
private void resend() {
DataStructure ds;
if (_isRouterInfo)
ds = _facade.lookupRouterInfoLocally(_key);
else
ds = _facade.lookupLeaseSetLocally(_key);
DatabaseEntry ds = _facade.lookupLocally(_key);
if (ds != null) {
Set<Hash> toSkip = new HashSet(2);
if (_sentTo != null)

View File

@ -57,9 +57,7 @@ public class HandleFloodfillDatabaseLookupMessageJob extends HandleDatabaseLooku
// that would increment the netDb.lookupsHandled and netDb.lookupsMatched stats
DatabaseStoreMessage msg = new DatabaseStoreMessage(getContext());
RouterInfo me = getContext().router().getRouterInfo();
msg.setKey(me.getIdentity().getHash());
msg.setRouterInfo(me);
msg.setValueType(DatabaseStoreMessage.KEY_TYPE_ROUTERINFO);
msg.setEntry(me);
sendMessage(msg, toPeer, replyTunnel);
}
}

View File

@ -11,6 +11,7 @@ package net.i2p.router.networkdb.kademlia;
import java.util.Date;
import java.util.Set;
import net.i2p.data.DatabaseEntry;
import net.i2p.data.Hash;
import net.i2p.data.LeaseSet;
import net.i2p.data.RouterIdentity;
@ -55,7 +56,8 @@ public class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
boolean wasNew = false;
RouterInfo prevNetDb = null;
Hash key = _message.getKey();
if (_message.getValueType() == DatabaseStoreMessage.KEY_TYPE_LEASESET) {
DatabaseEntry entry = _message.getEntry();
if (entry.getType() == DatabaseEntry.KEY_TYPE_LEASESET) {
getContext().statManager().addRateData("netDb.storeLeaseSetHandled", 1, 0);
if (_log.shouldLog(Log.INFO))
_log.info("Handling dbStore of leaseset " + _message);
@ -75,7 +77,7 @@ public class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
throw new IllegalArgumentException("Peer attempted to store local leaseSet: " +
key.toBase64().substring(0, 4));
}
LeaseSet ls = _message.getLeaseSet();
LeaseSet ls = (LeaseSet) entry;
//boolean oldrar = ls.getReceivedAsReply();
//boolean oldrap = ls.getReceivedAsPublished();
// If this was received as a response to a query,
@ -91,10 +93,10 @@ public class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
//boolean rap = ls.getReceivedAsPublished();
//if (_log.shouldLog(Log.INFO))
// _log.info("oldrap? " + oldrap + " oldrar? " + oldrar + " newrap? " + rap);
LeaseSet match = getContext().netDb().store(key, _message.getLeaseSet());
LeaseSet match = getContext().netDb().store(key, ls);
if (match == null) {
wasNew = true;
} else if (match.getEarliestLeaseDate() < _message.getLeaseSet().getEarliestLeaseDate()) {
} else if (match.getEarliestLeaseDate() < ls.getEarliestLeaseDate()) {
wasNew = true;
// If it is in our keyspace and we are talking to it
@ -117,11 +119,12 @@ public class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
} catch (IllegalArgumentException iae) {
invalidMessage = iae.getMessage();
}
} else if (_message.getValueType() == DatabaseStoreMessage.KEY_TYPE_ROUTERINFO) {
} else if (entry.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO) {
RouterInfo ri = (RouterInfo) entry;
getContext().statManager().addRateData("netDb.storeRouterInfoHandled", 1, 0);
if (_log.shouldLog(Log.INFO))
_log.info("Handling dbStore of router " + key + " with publishDate of "
+ new Date(_message.getRouterInfo().getPublished()));
+ new Date(ri.getPublished()));
try {
// Never store our RouterInfo received from somebody else.
// This generally happens from a FloodfillVerifyStoreJob.
@ -132,8 +135,8 @@ public class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
// throw rather than return, so that we send the ack below (prevent easy attack)
throw new IllegalArgumentException("Peer attempted to store our RouterInfo");
}
prevNetDb = getContext().netDb().store(key, _message.getRouterInfo());
wasNew = ((null == prevNetDb) || (prevNetDb.getPublished() < _message.getRouterInfo().getPublished()));
prevNetDb = getContext().netDb().store(key, ri);
wasNew = ((null == prevNetDb) || (prevNetDb.getPublished() < ri.getPublished()));
// Check new routerinfo address against blocklist
if (wasNew) {
if (prevNetDb == null) {
@ -143,7 +146,7 @@ public class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
_log.warn("Blocklisting new peer " + key);
} else {
Set oldAddr = prevNetDb.getAddresses();
Set newAddr = _message.getRouterInfo().getAddresses();
Set newAddr = ri.getAddresses();
if (newAddr != null && (!newAddr.equals(oldAddr)) &&
(!getContext().shitlist().isShitlistedForever(key)) &&
getContext().blocklist().isBlocklisted(key) &&
@ -157,7 +160,7 @@ public class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
}
} else {
if (_log.shouldLog(Log.ERROR))
_log.error("Invalid DatabaseStoreMessage data type - " + _message.getValueType()
_log.error("Invalid DatabaseStoreMessage data type - " + entry.getType()
+ ": " + _message);
}
@ -198,12 +201,9 @@ public class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
return;
}
long floodBegin = System.currentTimeMillis();
if (_message.getValueType() == DatabaseStoreMessage.KEY_TYPE_LEASESET)
_facade.flood(_message.getLeaseSet());
_facade.flood(_message.getEntry());
// ERR: see comment in HandleDatabaseLookupMessageJob regarding hidden mode
//else if (!_message.getRouterInfo().isHidden())
else
_facade.flood(_message.getRouterInfo());
long floodEnd = System.currentTimeMillis();
getContext().statManager().addRateData("netDb.storeFloodNew", floodEnd-floodBegin, 0);
} else {

View File

@ -24,8 +24,8 @@ import java.util.Properties;
import java.util.Set;
import java.util.TreeSet;
import net.i2p.data.DatabaseEntry;
import net.i2p.data.DataHelper;
import net.i2p.data.DataStructure;
import net.i2p.data.Destination;
import net.i2p.data.Hash;
import net.i2p.data.Lease;
@ -235,11 +235,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
public void startup() {
_log.info("Starting up the kademlia network database");
RouterInfo ri = _context.router().getRouterInfo();
String dbDir = _context.router().getConfigSetting(PROP_DB_DIR);
if (dbDir == null) {
_log.info("No DB dir specified [" + PROP_DB_DIR + "], using [" + DEFAULT_DB_DIR + "]");
dbDir = DEFAULT_DB_DIR;
}
String dbDir = _context.getProperty(PROP_DB_DIR, DEFAULT_DB_DIR);
String enforce = _context.getProperty(PROP_ENFORCE_NETID);
if (enforce != null)
_enforceNetId = Boolean.valueOf(enforce).booleanValue();
@ -247,7 +243,11 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
_enforceNetId = DEFAULT_ENFORCE_NETID;
_kb = new KBucketSet(_context, ri.getIdentity().getHash());
_ds = new PersistentDataStore(_context, dbDir, this);
try {
_ds = new PersistentDataStore(_context, dbDir, this);
} catch (IOException ioe) {
throw new RuntimeException("Unable to initialize netdb storage", ioe);
}
//_ds = new TransientDataStore();
// _exploreKeys = new HashSet(64);
_dbDir = dbDir;
@ -350,21 +350,11 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
/** get the hashes for all known routers */
public Set<Hash> getAllRouters() {
if (!_initialized) return Collections.EMPTY_SET;
Set<Hash> keys = _ds.getKeys();
Set<Hash> rv = new HashSet(keys.size());
if (_log.shouldLog(Log.DEBUG))
_log.debug("getAllRouters(): # keys in the datastore: " + keys.size());
for (Hash key : keys) {
DataStructure ds = _ds.get(key);
if (ds == null) {
if (_log.shouldLog(Log.INFO))
_log.info("Selected hash " + key.toBase64() + " is not stored locally");
} else if ( !(ds instanceof RouterInfo) ) {
// leaseSet
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("getAllRouters(): key is router: " + key.toBase64());
rv.add(key);
Set<Map.Entry<Hash, DatabaseEntry>> entries = _ds.getMapEntries();
Set<Hash> rv = new HashSet(entries.size());
for (Map.Entry<Hash, DatabaseEntry> entry : entries) {
if (entry.getValue().getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO) {
rv.add(entry.getKey());
}
}
return rv;
@ -383,8 +373,8 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
public int size() { return _count; }
public void add(Hash entry) {
if (_ds == null) return;
Object o = _ds.get(entry);
if (o instanceof RouterInfo)
DatabaseEntry o = _ds.get(entry);
if (o != null && o.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO)
_count++;
}
}
@ -400,12 +390,9 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
public int getKnownLeaseSets() {
if (_ds == null) return 0;
//return _ds.countLeaseSets();
Set<Hash> keys = _ds.getKeys();
int rv = 0;
for (Hash key : keys) {
DataStructure ds = _ds.get(key);
if (ds != null &&
ds instanceof LeaseSet &&
for (DatabaseEntry ds : _ds.getEntries()) {
if (ds.getType() == DatabaseEntry.KEY_TYPE_LEASESET &&
((LeaseSet)ds).getReceivedAsPublished())
rv++;
}
@ -418,8 +405,8 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
public int size() { return _count; }
public void add(Hash entry) {
if (_ds == null) return;
Object o = _ds.get(entry);
if (o instanceof LeaseSet)
DatabaseEntry o = _ds.get(entry);
if (o != null && o.getType() == DatabaseEntry.KEY_TYPE_LEASESET)
_count++;
}
}
@ -434,6 +421,32 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
return _kb.size();
}
/**
* @return RouterInfo, LeaseSet, or null, validated
* @since 0.8.3
*/
public DatabaseEntry lookupLocally(Hash key) {
if (!_initialized)
return null;
DatabaseEntry rv = _ds.get(key);
if (rv == null)
return null;
if (rv.getType() == DatabaseEntry.KEY_TYPE_LEASESET) {
LeaseSet ls = (LeaseSet)rv;
if (ls.isCurrent(Router.CLOCK_FUDGE_FACTOR))
return rv;
else
fail(key);
} else if (rv.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO) {
try {
if (validate(key, (RouterInfo)rv) == null)
return rv;
} catch (IllegalArgumentException iae) {}
fail(key);
}
return null;
}
public void lookupLeaseSet(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs) {
if (!_initialized) return;
LeaseSet ls = lookupLeaseSetLocally(key);
@ -453,9 +466,9 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
public LeaseSet lookupLeaseSetLocally(Hash key) {
if (!_initialized) return null;
if (_ds.isKnown(key)) {
DataStructure ds = _ds.get(key);
if (ds instanceof LeaseSet) {
DatabaseEntry ds = _ds.get(key);
if (ds != null) {
if (ds.getType() == DatabaseEntry.KEY_TYPE_LEASESET) {
LeaseSet ls = (LeaseSet)ds;
if (ls.isCurrent(Router.CLOCK_FUDGE_FACTOR)) {
return ls;
@ -489,9 +502,9 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
public RouterInfo lookupRouterInfoLocally(Hash key) {
if (!_initialized) return null;
DataStructure ds = _ds.get(key);
DatabaseEntry ds = _ds.get(key);
if (ds != null) {
if (ds instanceof RouterInfo) {
if (ds.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO) {
// more aggressive than perhaps is necessary, but makes sure we
// drop old references that we had accepted on startup (since
// startup allows some lax rules).
@ -610,6 +623,8 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
* Determine whether this leaseSet will be accepted as valid and current
* given what we know now.
*
* TODO this is called several times, only check the key and signature once
*
* @return reason why the entry is not valid, or null if it is valid
*/
String validate(Hash key, LeaseSet leaseSet) {
@ -692,6 +707,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
* Determine whether this routerInfo will be accepted as valid and current
* given what we know now.
*
* TODO this is called several times, only check the key and signature once
*/
String validate(Hash key, RouterInfo routerInfo) throws IllegalArgumentException {
long now = _context.clock().now();
@ -807,30 +823,26 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
public void fail(Hash dbEntry) {
if (!_initialized) return;
boolean isRouterInfo = false;
Object o = _ds.get(dbEntry);
if (o instanceof RouterInfo)
isRouterInfo = true;
if (isRouterInfo) {
lookupBeforeDropping(dbEntry, (RouterInfo)o);
return;
} else {
// we always drop leaseSets that are failed [timed out],
// regardless of how many routers we have. this is called on a lease if
// it has expired *or* its tunnels are failing and we want to see if there
// are any updates
if (_log.shouldLog(Log.INFO))
_log.info("Dropping a lease: " + dbEntry);
}
DatabaseEntry o = _ds.get(dbEntry);
if (o == null) {
// if we dont know the key, lets make sure it isn't a now-dead peer
_kb.remove(dbEntry);
_context.peerManager().removeCapabilities(dbEntry);
// if we dont know the key, lets make sure it isn't a now-dead peer
return;
}
_ds.remove(dbEntry, isRouterInfo);
if (o.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO) {
lookupBeforeDropping(dbEntry, (RouterInfo)o);
return;
}
// we always drop leaseSets that are failed [timed out],
// regardless of how many routers we have. this is called on a lease if
// it has expired *or* its tunnels are failing and we want to see if there
// are any updates
if (_log.shouldLog(Log.INFO))
_log.info("Dropping a lease: " + dbEntry);
_ds.remove(dbEntry, false);
}
/** don't use directly - see F.N.D.F. override */
@ -852,7 +864,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
public void unpublish(LeaseSet localLeaseSet) {
if (!_initialized) return;
Hash h = localLeaseSet.getDestination().calculateHash();
DataStructure data = _ds.remove(h);
DatabaseEntry data = _ds.remove(h);
if (data == null) {
if (_log.shouldLog(Log.WARN))
@ -906,8 +918,8 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
Set keys = getDataStore().getKeys();
for (Iterator iter = keys.iterator(); iter.hasNext(); ) {
Hash key = (Hash)iter.next();
Object o = getDataStore().get(key);
if (o instanceof LeaseSet)
DatabaseEntry o = getDataStore().get(key);
if (o.getType() == DatabaseEntry.KEY_TYPE_LEASESET)
leases.add(o);
}
return leases;
@ -920,8 +932,8 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
Set keys = getDataStore().getKeys();
for (Iterator iter = keys.iterator(); iter.hasNext(); ) {
Hash key = (Hash)iter.next();
Object o = getDataStore().get(key);
if (o instanceof RouterInfo)
DatabaseEntry o = getDataStore().get(key);
if (o.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO)
routers.add(o);
}
return routers;
@ -953,7 +965,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
}
/** unused (overridden in FNDF) */
public void sendStore(Hash key, DataStructure ds, Job onSuccess, Job onFailure, long sendTimeout, Set toIgnore) {
public void sendStore(Hash key, DatabaseEntry ds, Job onSuccess, Job onFailure, long sendTimeout, Set toIgnore) {
if ( (ds == null) || (key == null) ) {
if (onFailure != null)
_context.jobQueue().addJob(onFailure);

View File

@ -18,8 +18,8 @@ import java.util.Map;
import java.util.NoSuchElementException;
import java.util.concurrent.ConcurrentHashMap;
import net.i2p.data.DatabaseEntry;
import net.i2p.data.DataFormatException;
import net.i2p.data.DataStructure;
import net.i2p.data.Hash;
import net.i2p.data.LeaseSet;
import net.i2p.data.RouterInfo;
@ -38,19 +38,22 @@ import net.i2p.util.SecureFileOutputStream;
*
*/
class PersistentDataStore extends TransientDataStore {
private Log _log;
private String _dbDir;
private KademliaNetworkDatabaseFacade _facade;
private Writer _writer;
private ReadJob _readJob;
private final Log _log;
private final File _dbDir;
private final KademliaNetworkDatabaseFacade _facade;
private final Writer _writer;
private final ReadJob _readJob;
private boolean _initialized;
private final static int READ_DELAY = 60*1000;
public PersistentDataStore(RouterContext ctx, String dbDir, KademliaNetworkDatabaseFacade facade) {
/**
* @param dbDir relative path
*/
public PersistentDataStore(RouterContext ctx, String dbDir, KademliaNetworkDatabaseFacade facade) throws IOException {
super(ctx);
_log = ctx.logManager().getLog(PersistentDataStore.class);
_dbDir = dbDir;
_dbDir = getDbDir(dbDir);
_facade = facade;
_readJob = new ReadJob();
_context.jobQueue().addJob(_readJob);
@ -78,7 +81,6 @@ class PersistentDataStore extends TransientDataStore {
@Override
public void restart() {
super.restart();
_dbDir = _facade.getDbDir();
}
@Override
@ -88,7 +90,7 @@ class PersistentDataStore extends TransientDataStore {
}
@Override
public DataStructure get(Hash key) {
public DatabaseEntry get(Hash key) {
return get(key, true);
}
@ -97,8 +99,8 @@ class PersistentDataStore extends TransientDataStore {
* @param persist if false, call super only, don't access disk
*/
@Override
public DataStructure get(Hash key, boolean persist) {
DataStructure rv = super.get(key);
public DatabaseEntry get(Hash key, boolean persist) {
DatabaseEntry rv = super.get(key);
/*****
if (rv != null || !persist)
return rv;
@ -113,7 +115,7 @@ class PersistentDataStore extends TransientDataStore {
}
@Override
public DataStructure remove(Hash key) {
public DatabaseEntry remove(Hash key) {
return remove(key, true);
}
@ -121,7 +123,7 @@ class PersistentDataStore extends TransientDataStore {
* @param persist if false, call super only, don't access disk
*/
@Override
public DataStructure remove(Hash key, boolean persist) {
public DatabaseEntry remove(Hash key, boolean persist) {
if (persist) {
_writer.remove(key);
_context.jobQueue().addJob(new RemoveJob(key));
@ -130,7 +132,7 @@ class PersistentDataStore extends TransientDataStore {
}
@Override
public boolean put(Hash key, DataStructure data) {
public boolean put(Hash key, DatabaseEntry data) {
return put(key, data, true);
}
@ -139,11 +141,11 @@ class PersistentDataStore extends TransientDataStore {
* @return success
*/
@Override
public boolean put(Hash key, DataStructure data, boolean persist) {
public boolean put(Hash key, DatabaseEntry data, boolean persist) {
if ( (data == null) || (key == null) ) return false;
boolean rv = super.put(key, data);
// Don't bother writing LeaseSets to disk
if (rv && persist && data instanceof RouterInfo)
if (rv && persist && data.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO)
_writer.queue(key, data);
return rv;
}
@ -159,8 +161,7 @@ class PersistentDataStore extends TransientDataStore {
if (_log.shouldLog(Log.INFO))
_log.info("Removing key " + _key /* , getAddedBy() */);
try {
File dbDir = getDbDir();
removeFile(_key, dbDir);
removeFile(_key, _dbDir);
} catch (IOException ioe) {
_log.error("Error removing key " + _key, ioe);
}
@ -179,10 +180,10 @@ class PersistentDataStore extends TransientDataStore {
* We store a reference to the data here too,
* rather than simply pull it from super.get(), because
* we will soon have to implement a scheme for keeping only
* a subset of all DataStructures in memory and keeping the rest on disk.
* a subset of all DatabaseEntrys in memory and keeping the rest on disk.
*/
private class Writer implements Runnable {
private final Map<Hash, DataStructure>_keys;
private final Map<Hash, DatabaseEntry>_keys;
private final Object _waitLock;
private volatile boolean _quit;
@ -191,7 +192,7 @@ class PersistentDataStore extends TransientDataStore {
_waitLock = new Object();
}
public void queue(Hash key, DataStructure data) {
public void queue(Hash key, DatabaseEntry data) {
int pending = _keys.size();
boolean exists = (null != _keys.put(key, data));
if (exists)
@ -200,7 +201,7 @@ class PersistentDataStore extends TransientDataStore {
}
/** check to see if it's in the write queue */
public DataStructure get(Hash key) {
public DatabaseEntry get(Hash key) {
return _keys.get(key);
}
@ -211,16 +212,16 @@ class PersistentDataStore extends TransientDataStore {
public void run() {
_quit = false;
Hash key = null;
DataStructure data = null;
DatabaseEntry data = null;
int count = 0;
int lastCount = 0;
long startTime = 0;
while (true) {
// get a new iterator every time to get a random entry without
// having concurrency issues or copying to a List or Array
Iterator<Map.Entry<Hash, DataStructure>> iter = _keys.entrySet().iterator();
Iterator<Map.Entry<Hash, DatabaseEntry>> iter = _keys.entrySet().iterator();
try {
Map.Entry<Hash, DataStructure> entry = iter.next();
Map.Entry<Hash, DatabaseEntry> entry = iter.next();
key = entry.getKey();
data = entry.getValue();
iter.remove();
@ -235,7 +236,10 @@ class PersistentDataStore extends TransientDataStore {
if (key != null) {
if (data != null) {
write(key, data);
// synch with the reader job
synchronized (_dbDir) {
write(key, data);
}
data = null;
}
key = null;
@ -270,23 +274,22 @@ class PersistentDataStore extends TransientDataStore {
}
}
private void write(Hash key, DataStructure data) {
private void write(Hash key, DatabaseEntry data) {
if (_log.shouldLog(Log.INFO))
_log.info("Writing key " + key);
FileOutputStream fos = null;
File dbFile = null;
try {
String filename = null;
File dbDir = getDbDir();
if (data instanceof LeaseSet)
if (data.getType() == DatabaseEntry.KEY_TYPE_LEASESET)
filename = getLeaseSetName(key);
else if (data instanceof RouterInfo)
else if (data.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO)
filename = getRouterInfoName(key);
else
throw new IOException("We don't know how to write objects of type " + data.getClass().getName());
dbFile = new File(dbDir, filename);
dbFile = new File(_dbDir, filename);
long dataPublishDate = getPublishDate(data);
if (dbFile.lastModified() < dataPublishDate) {
// our filesystem is out of date, lets replace it
@ -312,27 +315,33 @@ class PersistentDataStore extends TransientDataStore {
if (fos != null) try { fos.close(); } catch (IOException ioe) {}
}
}
private long getPublishDate(DataStructure data) {
if (data instanceof RouterInfo) {
return ((RouterInfo)data).getPublished();
} else if (data instanceof LeaseSet) {
return ((LeaseSet)data).getEarliestLeaseDate();
} else {
return -1;
}
private long getPublishDate(DatabaseEntry data) {
return data.getDate();
}
/** This is only for manual reseeding? Why bother every 60 sec??? */
private class ReadJob extends JobImpl {
private boolean _alreadyWarned;
private long _lastModified;
public ReadJob() {
super(PersistentDataStore.this._context);
_alreadyWarned = false;
}
public String getName() { return "DB Read Job"; }
public void runJob() {
_log.info("Rereading new files");
readFiles();
// check directory mod time to save a lot of object churn in scanning all the file names
long lastMod = _dbDir.lastModified();
if (lastMod > _lastModified) {
_lastModified = lastMod;
_log.info("Rereading new files");
// synch with the writer job
synchronized (_dbDir) {
readFiles();
}
}
requeue(READ_DELAY);
}
@ -342,9 +351,8 @@ class PersistentDataStore extends TransientDataStore {
private void readFiles() {
int routerCount = 0;
try {
File dbDir = getDbDir();
File routerInfoFiles[] = dbDir.listFiles(RouterInfoFilter.getInstance());
File routerInfoFiles[] = _dbDir.listFiles(RouterInfoFilter.getInstance());
if (routerInfoFiles != null) {
routerCount += routerInfoFiles.length;
if (routerInfoFiles.length > 5)
@ -359,9 +367,6 @@ class PersistentDataStore extends TransientDataStore {
}
}
}
} catch (IOException ioe) {
_log.error("Error reading files in the db dir", ioe);
}
if (!_alreadyWarned) {
ReseedChecker.checkReseed(_context, routerCount);
@ -383,9 +388,9 @@ class PersistentDataStore extends TransientDataStore {
private boolean shouldRead() {
// persist = false to call only super.get()
DataStructure data = get(_key, false);
DatabaseEntry data = get(_key, false);
if (data == null) return true;
if (data instanceof RouterInfo) {
if (data.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO) {
long knownDate = ((RouterInfo)data).getPublished();
long fileDate = _routerFile.lastModified();
if (fileDate > knownDate)
@ -441,8 +446,8 @@ class PersistentDataStore extends TransientDataStore {
}
private File getDbDir() throws IOException {
File f = new SecureDirectory(_context.getRouterDir(), _dbDir);
private File getDbDir(String dbDir) throws IOException {
File f = new SecureDirectory(_context.getRouterDir(), dbDir);
if (!f.exists()) {
boolean created = f.mkdirs();
if (!created)

View File

@ -14,8 +14,8 @@ import java.util.Iterator;
import java.util.List;
import java.util.Set;
import net.i2p.data.DatabaseEntry;
import net.i2p.data.DataHelper;
import net.i2p.data.DataStructure;
import net.i2p.data.Hash;
import net.i2p.data.LeaseSet;
import net.i2p.data.RouterInfo;
@ -293,12 +293,12 @@ class SearchJob extends JobImpl {
attempted.addAll(closestHashes);
for (Iterator iter = closestHashes.iterator(); iter.hasNext(); ) {
Hash peer = (Hash)iter.next();
DataStructure ds = _facade.getDataStore().get(peer);
DatabaseEntry ds = _facade.getDataStore().get(peer);
if (ds == null) {
if (_log.shouldLog(Log.INFO))
_log.info("Next closest peer " + peer + " was only recently referred to us, sending a search for them");
getContext().netDb().lookupRouterInfo(peer, null, null, _timeoutMs);
} else if (!(ds instanceof RouterInfo)) {
} else if (!(ds.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO)) {
if (_log.shouldLog(Log.WARN))
_log.warn(getJobId() + ": Error selecting closest hash that wasnt a router! "
+ peer + " : " + ds.getClass().getName());
@ -635,7 +635,7 @@ class SearchJob extends JobImpl {
*
*/
private void resend() {
DataStructure ds = _facade.lookupLeaseSetLocally(_state.getTarget());
DatabaseEntry ds = _facade.lookupLeaseSetLocally(_state.getTarget());
if (ds == null) {
if (SHOULD_RESEND_ROUTERINFO) {
ds = _facade.lookupRouterInfoLocally(_state.getTarget());
@ -665,8 +665,7 @@ class SearchJob extends JobImpl {
*/
private boolean resend(RouterInfo toPeer, LeaseSet ls) {
DatabaseStoreMessage msg = new DatabaseStoreMessage(getContext());
msg.setKey(ls.getDestination().calculateHash());
msg.setLeaseSet(ls);
msg.setEntry(ls);
msg.setMessageExpiration(getContext().clock().now() + RESEND_TIMEOUT);
TunnelInfo outTunnel = getContext().tunnelManager().selectOutboundTunnel();

View File

@ -2,7 +2,9 @@ package net.i2p.router.networkdb.kademlia;
import java.util.Date;
import net.i2p.data.DatabaseEntry;
import net.i2p.data.Hash;
import net.i2p.data.LeaseSet;
import net.i2p.data.RouterInfo;
import net.i2p.data.i2np.DatabaseSearchReplyMessage;
import net.i2p.data.i2np.DatabaseStoreMessage;
@ -78,22 +80,23 @@ class SearchUpdateReplyFoundJob extends JobImpl implements ReplyJob {
long timeToReply = _state.dataFound(_peer);
DatabaseStoreMessage msg = (DatabaseStoreMessage)message;
if (msg.getValueType() == DatabaseStoreMessage.KEY_TYPE_LEASESET) {
DatabaseEntry entry = msg.getEntry();
if (entry.getType() == DatabaseEntry.KEY_TYPE_LEASESET) {
try {
_facade.store(msg.getKey(), msg.getLeaseSet());
_facade.store(msg.getKey(), (LeaseSet) entry);
getContext().profileManager().dbLookupSuccessful(_peer, timeToReply);
} catch (IllegalArgumentException iae) {
if (_log.shouldLog(Log.ERROR))
_log.warn("Peer " + _peer + " sent us an invalid leaseSet: " + iae.getMessage());
getContext().profileManager().dbLookupReply(_peer, 0, 0, 1, 0, timeToReply);
}
} else if (msg.getValueType() == DatabaseStoreMessage.KEY_TYPE_ROUTERINFO) {
} else if (entry.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO) {
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": dbStore received on search containing router "
+ msg.getKey() + " with publishDate of "
+ new Date(msg.getRouterInfo().getPublished()));
+ new Date(entry.getDate()));
try {
_facade.store(msg.getKey(), msg.getRouterInfo());
_facade.store(msg.getKey(), (RouterInfo) entry);
getContext().profileManager().dbLookupSuccessful(_peer, timeToReply);
} catch (IllegalArgumentException iae) {
if (_log.shouldLog(Log.ERROR))
@ -102,7 +105,7 @@ class SearchUpdateReplyFoundJob extends JobImpl implements ReplyJob {
}
} else {
if (_log.shouldLog(Log.ERROR))
_log.error(getJobId() + ": Unknown db store type?!@ " + msg.getValueType());
_log.error(getJobId() + ": Unknown db store type?!@ " + entry.getType());
}
} else if (message instanceof DatabaseSearchReplyMessage) {
_job.replyFound((DatabaseSearchReplyMessage)message, _peer);

View File

@ -13,7 +13,7 @@ import java.util.Iterator;
import java.util.List;
import java.util.Set;
import net.i2p.data.DataStructure;
import net.i2p.data.DatabaseEntry;
import net.i2p.data.Hash;
import net.i2p.data.LeaseSet;
import net.i2p.data.RouterInfo;
@ -61,7 +61,7 @@ class StoreJob extends JobImpl {
*
*/
public StoreJob(RouterContext context, KademliaNetworkDatabaseFacade facade, Hash key,
DataStructure data, Job onSuccess, Job onFailure, long timeoutMs) {
DatabaseEntry data, Job onSuccess, Job onFailure, long timeoutMs) {
this(context, facade, key, data, onSuccess, onFailure, timeoutMs, null);
}
@ -70,7 +70,7 @@ class StoreJob extends JobImpl {
* already know they have it). This can be null.
*/
public StoreJob(RouterContext context, KademliaNetworkDatabaseFacade facade, Hash key,
DataStructure data, Job onSuccess, Job onFailure, long timeoutMs, Set<Hash> toSkip) {
DatabaseEntry data, Job onSuccess, Job onFailure, long timeoutMs, Set<Hash> toSkip) {
super(context);
_log = context.logManager().getLog(StoreJob.class);
_facade = facade;
@ -167,8 +167,8 @@ class StoreJob extends JobImpl {
_log.info(getJobId() + ": Continue sending key " + _state.getTarget() + " after " + _state.getAttempted().size() + " tries to " + closestHashes);
for (Iterator<Hash> iter = closestHashes.iterator(); iter.hasNext(); ) {
Hash peer = iter.next();
DataStructure ds = _facade.getDataStore().get(peer);
if ( (ds == null) || !(ds instanceof RouterInfo) ) {
DatabaseEntry ds = _facade.getDataStore().get(peer);
if ( (ds == null) || !(ds.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO) ) {
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": Error selecting closest hash that wasnt a router! " + peer + " : " + ds);
_state.addSkipped(peer);
@ -255,16 +255,19 @@ class StoreJob extends JobImpl {
*
*/
private void sendStore(RouterInfo router, int responseTime) {
if (!_state.getTarget().equals(_state.getData().getHash())) {
_log.error("Hash mismatch StoreJob");
return;
}
DatabaseStoreMessage msg = new DatabaseStoreMessage(getContext());
msg.setKey(_state.getTarget());
if (_state.getData() instanceof RouterInfo) {
msg.setRouterInfo((RouterInfo)_state.getData());
if (_state.getData().getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO) {
if (responseTime > MAX_DIRECT_EXPIRATION)
responseTime = MAX_DIRECT_EXPIRATION;
} else if (_state.getData() instanceof LeaseSet)
msg.setLeaseSet((LeaseSet)_state.getData());
else
} else if (_state.getData().getType() == DatabaseEntry.KEY_TYPE_LEASESET) {
} else {
throw new IllegalArgumentException("Storing an unknown data type! " + _state.getData());
}
msg.setEntry(_state.getData());
msg.setMessageExpiration(getContext().clock().now() + _timeoutMs);
if (router.getIdentity().equals(getContext().router().getRouterInfo().getIdentity())) {
@ -286,7 +289,7 @@ class StoreJob extends JobImpl {
*
*/
private void sendStore(DatabaseStoreMessage msg, RouterInfo peer, long expiration) {
if (msg.getValueType() == DatabaseStoreMessage.KEY_TYPE_LEASESET) {
if (msg.getEntry().getType() == DatabaseEntry.KEY_TYPE_LEASESET) {
getContext().statManager().addRateData("netDb.storeLeaseSetSent", 1, 0);
// if it is an encrypted leaseset...
if (getContext().keyRing().get(msg.getKey()) != null)

View File

@ -9,7 +9,7 @@ import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import net.i2p.data.DataStructure;
import net.i2p.data.DatabaseEntry;
import net.i2p.data.Hash;
import net.i2p.router.RouterContext;
@ -19,7 +19,7 @@ import net.i2p.router.RouterContext;
class StoreState {
private RouterContext _context;
private Hash _key;
private DataStructure _data;
private DatabaseEntry _data;
private final HashSet<Hash> _pendingPeers;
private Map<Hash, Long> _pendingPeerTimes;
private Map<Hash, MessageWrapper.WrappedMessage> _pendingMessages;
@ -31,10 +31,10 @@ class StoreState {
private volatile long _completed;
private volatile long _started;
public StoreState(RouterContext ctx, Hash key, DataStructure data) {
public StoreState(RouterContext ctx, Hash key, DatabaseEntry data) {
this(ctx, key, data, null);
}
public StoreState(RouterContext ctx, Hash key, DataStructure data, Set<Hash> toSkip) {
public StoreState(RouterContext ctx, Hash key, DatabaseEntry data, Set<Hash> toSkip) {
_context = ctx;
_key = key;
_data = data;
@ -54,7 +54,7 @@ class StoreState {
}
public Hash getTarget() { return _key; }
public DataStructure getData() { return _data; }
public DatabaseEntry getData() { return _data; }
public Set<Hash> getPending() {
synchronized (_pendingPeers) {
return (Set<Hash>)_pendingPeers.clone();

View File

@ -8,14 +8,15 @@ package net.i2p.router.networkdb.kademlia;
*
*/
import java.util.Collection;
import java.util.Date;
import java.util.concurrent.ConcurrentHashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import net.i2p.data.DatabaseEntry;
import net.i2p.data.DataHelper;
import net.i2p.data.DataStructure;
import net.i2p.data.Hash;
import net.i2p.data.LeaseSet;
import net.i2p.data.RouterInfo;
@ -24,7 +25,7 @@ import net.i2p.util.Log;
class TransientDataStore implements DataStore {
private Log _log;
private ConcurrentHashMap<Hash, DataStructure> _data;
private ConcurrentHashMap<Hash, DatabaseEntry> _data;
protected RouterContext _context;
public TransientDataStore(RouterContext ctx) {
@ -51,12 +52,28 @@ class TransientDataStore implements DataStore {
return new HashSet(_data.keySet());
}
/**
* @return not a copy
* @since 0.8.3
*/
public Collection<DatabaseEntry> getEntries() {
return _data.values();
}
/**
* @return not a copy
* @since 0.8.3
*/
public Set<Map.Entry<Hash, DatabaseEntry>> getMapEntries() {
return _data.entrySet();
}
/** for PersistentDataStore only - don't use here @throws IAE always */
public DataStructure get(Hash key, boolean persist) {
public DatabaseEntry get(Hash key, boolean persist) {
throw new IllegalArgumentException("no");
}
public DataStructure get(Hash key) {
public DatabaseEntry get(Hash key) {
return _data.get(key);
}
@ -66,15 +83,15 @@ class TransientDataStore implements DataStore {
public int countLeaseSets() {
int count = 0;
for (DataStructure d : _data.values()) {
if (d instanceof LeaseSet)
for (DatabaseEntry d : _data.values()) {
if (d.getType() == DatabaseEntry.KEY_TYPE_LEASESET)
count++;
}
return count;
}
/** for PersistentDataStore only - don't use here @throws IAE always */
public boolean put(Hash key, DataStructure data, boolean persist) {
public boolean put(Hash key, DatabaseEntry data, boolean persist) {
throw new IllegalArgumentException("no");
}
@ -82,14 +99,14 @@ class TransientDataStore implements DataStore {
* @param data must be validated before here
* @return success
*/
public boolean put(Hash key, DataStructure data) {
public boolean put(Hash key, DatabaseEntry data) {
if (data == null) return false;
if (_log.shouldLog(Log.DEBUG))
_log.debug("Storing key " + key);
DataStructure old = null;
DatabaseEntry old = null;
old = _data.putIfAbsent(key, data);
boolean rv = false;
if (data instanceof RouterInfo) {
if (data.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO) {
// Don't do this here so we don't reset it at router startup;
// the StoreMessageJob calls this
//_context.profileManager().heardAbout(key);
@ -113,7 +130,7 @@ class TransientDataStore implements DataStore {
_log.info("New router for " + key + ": published on " + new Date(ri.getPublished()));
rv = true;
}
} else if (data instanceof LeaseSet) {
} else if (data.getType() == DatabaseEntry.KEY_TYPE_LEASESET) {
LeaseSet ls = (LeaseSet)data;
if (old != null) {
LeaseSet ols = (LeaseSet)old;
@ -158,9 +175,9 @@ class TransientDataStore implements DataStore {
public String toString() {
StringBuilder buf = new StringBuilder();
buf.append("Transient DataStore: ").append(_data.size()).append("\nKeys: ");
for (Map.Entry<Hash, DataStructure> e : _data.entrySet()) {
for (Map.Entry<Hash, DatabaseEntry> e : _data.entrySet()) {
Hash key = e.getKey();
DataStructure dp = e.getValue();
DatabaseEntry dp = e.getValue();
buf.append("\n\t*Key: ").append(key.toString()).append("\n\tContent: ").append(dp.toString());
}
buf.append("\n");
@ -168,11 +185,11 @@ class TransientDataStore implements DataStore {
}
/** for PersistentDataStore only - don't use here */
public DataStructure remove(Hash key, boolean persist) {
public DatabaseEntry remove(Hash key, boolean persist) {
throw new IllegalArgumentException("no");
}
public DataStructure remove(Hash key) {
public DatabaseEntry remove(Hash key) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Removing key " + key.toBase64());
return _data.remove(key);

View File

@ -178,8 +178,7 @@ public class PeerTestJob extends JobImpl {
*/
private DatabaseStoreMessage buildMessage(RouterInfo peer, TunnelId replyTunnel, Hash replyGateway, long nonce, long expiration) {
DatabaseStoreMessage msg = new DatabaseStoreMessage(getContext());
msg.setKey(peer.getIdentity().getHash());
msg.setRouterInfo(peer);
msg.setEntry(peer);
msg.setReplyGateway(replyGateway);
msg.setReplyTunnel(replyTunnel);
msg.setReplyToken(nonce);

View File

@ -344,8 +344,7 @@ public class NTCPConnection implements FIFOBandwidthLimiter.CompleteListener {
OutNetMessage infoMsg = new OutNetMessage(_context);
infoMsg.setExpiration(_context.clock().now()+10*1000);
DatabaseStoreMessage dsm = new DatabaseStoreMessage(_context);
dsm.setKey(_context.routerHash());
dsm.setRouterInfo(_context.router().getRouterInfo());
dsm.setEntry(_context.router().getRouterInfo());
infoMsg.setMessage(dsm);
infoMsg.setPriority(100);
RouterInfo target = _context.netDb().lookupRouterInfoLocally(_remotePeer.calculateHash());

View File

@ -577,8 +577,7 @@ class EstablishmentManager {
(isInbound ? " inbound con from " + peer : "outbound con to " + peer));
DatabaseStoreMessage m = new DatabaseStoreMessage(_context);
m.setKey(_context.routerHash());
m.setRouterInfo(_context.router().getRouterInfo());
m.setEntry(_context.router().getRouterInfo());
m.setMessageExpiration(_context.clock().now() + 10*1000);
_transport.send(m, peer);
}

View File

@ -18,6 +18,7 @@ import java.util.TreeSet;
import java.util.Vector;
import java.util.concurrent.ConcurrentHashMap;
import net.i2p.data.DatabaseEntry;
import net.i2p.data.DataHelper;
import net.i2p.data.Hash;
import net.i2p.data.RouterAddress;
@ -769,8 +770,11 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
public void messageReceived(I2NPMessage inMsg, RouterIdentity remoteIdent, Hash remoteIdentHash, long msToReceive, int bytesReceived) {
if (inMsg.getType() == DatabaseStoreMessage.MESSAGE_TYPE) {
DatabaseStoreMessage dsm = (DatabaseStoreMessage)inMsg;
if ( (dsm.getRouterInfo() != null) &&
(dsm.getRouterInfo().getNetworkId() != Router.NETWORK_ID) ) {
DatabaseEntry entry = dsm.getEntry();
if (entry == null)
return;
if (entry.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO &&
((RouterInfo) entry).getNetworkId() != Router.NETWORK_ID) {
// this is pre-0.6.1.10, so it isn't going to happen any more
/*
@ -788,7 +792,7 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
+ " because they are in the wrong net");
}
*/
Hash peerHash = dsm.getRouterInfo().getIdentity().calculateHash();
Hash peerHash = entry.getHash();
PeerState peer = getPeerState(peerHash);
if (peer != null) {
RemoteHostId remote = peer.getRemoteHostId();
@ -797,14 +801,14 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
SimpleScheduler.getInstance().addEvent(new RemoveDropList(remote), DROPLIST_PERIOD);
}
markUnreachable(peerHash);
_context.shitlist().shitlistRouter(peerHash, "Part of the wrong network, version = " + dsm.getRouterInfo().getOption("router.version"));
_context.shitlist().shitlistRouter(peerHash, "Part of the wrong network, version = " + ((RouterInfo) entry).getOption("router.version"));
//_context.shitlist().shitlistRouter(peerHash, "Part of the wrong network", STYLE);
dropPeer(peerHash, false, "wrong network");
if (_log.shouldLog(Log.WARN))
_log.warn("Dropping the peer " + peerHash.toBase64() + " because they are in the wrong net: " + dsm.getRouterInfo());
_log.warn("Dropping the peer " + peerHash.toBase64() + " because they are in the wrong net: " + entry);
return;
} else {
if (dsm.getRouterInfo() != null) {
if (entry.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO) {
if (_log.shouldLog(Log.INFO))
_log.info("Received an RI from the same net");
} else {

View File

@ -1,8 +1,10 @@
package net.i2p.router.tunnel;
import net.i2p.data.DatabaseEntry;
import net.i2p.data.Hash;
import net.i2p.data.LeaseSet;
import net.i2p.data.Payload;
import net.i2p.data.RouterInfo;
import net.i2p.data.TunnelId;
import net.i2p.data.i2np.DataMessage;
import net.i2p.data.i2np.DatabaseSearchReplyMessage;
@ -71,7 +73,7 @@ public class InboundMessageDistributor implements GarlicMessageReceiver.CloveRec
msg = newMsg;
} else if ( (_client != null) &&
(msg.getType() == DatabaseStoreMessage.MESSAGE_TYPE) &&
(((DatabaseStoreMessage)msg).getValueType() == DatabaseStoreMessage.KEY_TYPE_ROUTERINFO)) {
(((DatabaseStoreMessage)msg).getEntry().getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO)) {
// FVSJ may result in an unsolicited RI store if the peer went non-ff.
// Maybe we can figure out a way to handle this safely, so we don't ask him again.
// For now, just hope we eventually find out through other means.
@ -165,7 +167,7 @@ public class InboundMessageDistributor implements GarlicMessageReceiver.CloveRec
// unnecessarily
DatabaseStoreMessage dsm = (DatabaseStoreMessage)data;
try {
if (dsm.getValueType() == DatabaseStoreMessage.KEY_TYPE_LEASESET) {
if (dsm.getEntry().getType() == DatabaseEntry.KEY_TYPE_LEASESET) {
// If it was stored to us before, don't undo the
// receivedAsPublished flag so we will continue to respond to requests
// for the leaseset. That is, we don't want this to change the
@ -173,10 +175,11 @@ public class InboundMessageDistributor implements GarlicMessageReceiver.CloveRec
// When the keyspace rotates at midnight, and this leaseset moves out
// of our keyspace, maybe we shouldn't do this?
// Should we do this whether ff or not?
LeaseSet old = _context.netDb().store(dsm.getKey(), dsm.getLeaseSet());
LeaseSet ls = (LeaseSet) dsm.getEntry();
LeaseSet old = _context.netDb().store(dsm.getKey(), ls);
if (old != null && old.getReceivedAsPublished()
/** && ((FloodfillNetworkDatabaseFacade)_context.netDb()).floodfillEnabled() **/ )
dsm.getLeaseSet().setReceivedAsPublished(true);
ls.setReceivedAsPublished(true);
if (_log.shouldLog(Log.INFO))
_log.info("Storing LS for: " + dsm.getKey() + " sent to: " + _client);
} else {
@ -189,7 +192,7 @@ public class InboundMessageDistributor implements GarlicMessageReceiver.CloveRec
_log.error("Dropped dangerous message down a tunnel for " + _client.toBase64() + ": " + dsm, new Exception("cause"));
return;
}
_context.netDb().store(dsm.getKey(), dsm.getRouterInfo());
_context.netDb().store(dsm.getKey(), (RouterInfo) dsm.getEntry());
}
} catch (IllegalArgumentException iae) {
if (_log.shouldLog(Log.WARN))