big ol' update to strip out the singletons, replacing them with

a rooted app context.  The core itself has its own I2PAppContext
(see its javadoc for, uh, docs), and the router extends that to
expose the router's singletons.  The main point of this is to
make it so that we can run multiple routers in the same JVM, even
to allow different apps in the same JVM to switch singleton
implementations (e.g. run some routers with one set of profile
calculators, and other routers with a different one).
There is still some work to be done regarding the actual boot up
of multiple routers in a JVM, as well as their configuration,
though the plan is to have the RouterContext override the
I2PAppContext's getProperty/getPropertyNames methods to read from
a config file (seperate ones per context) instead of using the
System.getProperty that the base I2PAppContext uses.
Once the multi-router is working, i'll shim in a VMCommSystem
that doesn't depend upon sockets or threads to read/write (and
that uses configurable message send delays / disconnects / etc,
perhaps using data from the routerContext.getProperty to drive it).
I could hold off until the sim is all working, but there's a
truckload of changes in here and I hate dealing with conflicts ;)
Everything works - I've been running 'er for a while and kicked
the tires a bit, but if you see something amiss, please let me
know.
This commit is contained in:
jrandom
2004-04-24 11:54:35 +00:00
committed by zzz
parent c29a6b95ae
commit 393b1d7674
217 changed files with 16662 additions and 15452 deletions

View File

@ -1,9 +1,9 @@
package net.i2p.data.i2np;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
@ -15,6 +15,7 @@ import java.io.InputStream;
import net.i2p.data.DataFormatException;
import net.i2p.data.DataHelper;
import net.i2p.util.Log;
import net.i2p.I2PAppContext;
/**
* Defines a message containing arbitrary bytes of data
@ -26,8 +27,9 @@ public class DataMessage extends I2NPMessageImpl {
public final static int MESSAGE_TYPE = 20;
private byte _data[];
public DataMessage() {
_data = null;
public DataMessage(I2PAppContext context) {
super(context);
_data = null;
}
public byte[] getData() { return _data; }
@ -36,23 +38,23 @@ public class DataMessage extends I2NPMessageImpl {
public int getSize() { return _data.length; }
public void readMessage(InputStream in, int type) throws I2NPMessageException, IOException {
if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message");
if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message");
try {
int size = (int)DataHelper.readLong(in, 4);
_data = new byte[size];
int read = read(in, _data);
if (read != size)
throw new DataFormatException("Not enough bytes to read (read = " + read + ", expected = " + size + ")");
int size = (int)DataHelper.readLong(in, 4);
_data = new byte[size];
int read = read(in, _data);
if (read != size)
throw new DataFormatException("Not enough bytes to read (read = " + read + ", expected = " + size + ")");
} catch (DataFormatException dfe) {
throw new I2NPMessageException("Unable to load the message data", dfe);
}
}
protected byte[] writeMessage() throws I2NPMessageException, IOException {
ByteArrayOutputStream os = new ByteArrayOutputStream((_data != null ? _data.length + 4 : 4));
ByteArrayOutputStream os = new ByteArrayOutputStream((_data != null ? _data.length + 4 : 4));
try {
DataHelper.writeLong(os, 4, (_data != null ? _data.length : 0));
os.write(_data);
DataHelper.writeLong(os, 4, (_data != null ? _data.length : 0));
os.write(_data);
} catch (DataFormatException dfe) {
throw new I2NPMessageException("Error writing out the message data", dfe);
}
@ -62,7 +64,7 @@ public class DataMessage extends I2NPMessageImpl {
public int getType() { return MESSAGE_TYPE; }
public int hashCode() {
return DataHelper.hashCode(getData());
return DataHelper.hashCode(getData());
}
public boolean equals(Object object) {
@ -74,7 +76,7 @@ public class DataMessage extends I2NPMessageImpl {
}
}
public String toString() {
public String toString() {
StringBuffer buf = new StringBuffer();
buf.append("[DataMessage: ");
buf.append("\n\tData: ").append(DataHelper.toString(getData(), 64));

View File

@ -1,99 +0,0 @@
package net.i2p.data.i2np;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import net.i2p.data.DataFormatException;
import net.i2p.data.DataHelper;
import net.i2p.data.Hash;
import net.i2p.util.Log;
/**
* Defines the message a router sends to another router to help integrate into
* the network by searching for routers in a particular keyspace.
*
* @author jrandom
*/
public class DatabaseFindNearestMessage extends I2NPMessageImpl {
private final static Log _log = new Log(DatabaseFindNearestMessage.class);
public final static int MESSAGE_TYPE = 4;
private Hash _key;
private Hash _from;
public DatabaseFindNearestMessage() {
setSearchKey(null);
setFromHash(null);
}
/**
* Defines the key being searched for
*/
public Hash getSearchKey() { return _key; }
public void setSearchKey(Hash key) { _key = key; }
/**
* Contains the SHA256 Hash of the RouterIdentity sending the message
*/
public Hash getFromHash() { return _from; }
public void setFromHash(Hash from) { _from = from; }
public void readMessage(InputStream in, int type) throws I2NPMessageException, IOException {
if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message");
try {
_key = new Hash();
_key.readBytes(in);
_from = new Hash();
_from.readBytes(in);
} catch (DataFormatException dfe) {
throw new I2NPMessageException("Unable to load the message data", dfe);
}
}
protected byte[] writeMessage() throws I2NPMessageException, IOException {
if ( (_key == null) || (_from == null) ) throw new I2NPMessageException("Not enough data to write out");
ByteArrayOutputStream os = new ByteArrayOutputStream(32);
try {
_key.writeBytes(os);
_from.writeBytes(os);
} catch (DataFormatException dfe) {
throw new I2NPMessageException("Error writing out the message data", dfe);
}
return os.toByteArray();
}
public int getType() { return MESSAGE_TYPE; }
public int hashCode() {
return DataHelper.hashCode(getSearchKey()) +
DataHelper.hashCode(getFromHash());
}
public boolean equals(Object object) {
if ( (object != null) && (object instanceof DatabaseFindNearestMessage) ) {
DatabaseFindNearestMessage msg = (DatabaseFindNearestMessage)object;
return DataHelper.eq(getSearchKey(),msg.getSearchKey()) &&
DataHelper.eq(getFromHash(),msg.getFromHash());
} else {
return false;
}
}
public String toString() {
StringBuffer buf = new StringBuffer();
buf.append("[DatabaseFindNearestMessage: ");
buf.append("\n\tSearch Key: ").append(getSearchKey());
buf.append("\n\tFrom: ").append(getFromHash());
buf.append("]");
return buf.toString();
}
}

View File

@ -1,9 +1,9 @@
package net.i2p.data.i2np;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
@ -21,6 +21,7 @@ import net.i2p.data.Hash;
import net.i2p.data.RouterInfo;
import net.i2p.data.TunnelId;
import net.i2p.util.Log;
import net.i2p.I2PAppContext;
/**
* Defines the message a router sends to another router to search for a
@ -36,10 +37,11 @@ public class DatabaseLookupMessage extends I2NPMessageImpl {
private TunnelId _replyTunnel;
private Set _dontIncludePeers;
public DatabaseLookupMessage() {
setSearchKey(null);
setFrom(null);
setDontIncludePeers(null);
public DatabaseLookupMessage(I2PAppContext context) {
super(context);
setSearchKey(null);
setFrom(null);
setDontIncludePeers(null);
}
/**
@ -68,63 +70,63 @@ public class DatabaseLookupMessage extends I2NPMessageImpl {
* @return Set of Hash objects, each of which is the H(routerIdentity) to skip
*/
public Set getDontIncludePeers() { return _dontIncludePeers; }
public void setDontIncludePeers(Set peers) {
if (peers != null)
_dontIncludePeers = new HashSet(peers);
else
_dontIncludePeers = null;
public void setDontIncludePeers(Set peers) {
if (peers != null)
_dontIncludePeers = new HashSet(peers);
else
_dontIncludePeers = null;
}
public void readMessage(InputStream in, int type) throws I2NPMessageException, IOException {
if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message");
if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message");
try {
_key = new Hash();
_key.readBytes(in);
_from = new RouterInfo();
_from.readBytes(in);
boolean tunnelSpecified = DataHelper.readBoolean(in).booleanValue();
if (tunnelSpecified) {
_replyTunnel = new TunnelId();
_replyTunnel.readBytes(in);
}
int numPeers = (int)DataHelper.readLong(in, 2);
if ( (numPeers < 0) || (numPeers >= (1<<16) ) )
throw new DataFormatException("Invalid number of peers - " + numPeers);
Set peers = new HashSet(numPeers);
for (int i = 0; i < numPeers; i++) {
Hash peer = new Hash();
peer.readBytes(in);
peers.add(peer);
}
_dontIncludePeers = peers;
_key = new Hash();
_key.readBytes(in);
_from = new RouterInfo();
_from.readBytes(in);
boolean tunnelSpecified = DataHelper.readBoolean(in).booleanValue();
if (tunnelSpecified) {
_replyTunnel = new TunnelId();
_replyTunnel.readBytes(in);
}
int numPeers = (int)DataHelper.readLong(in, 2);
if ( (numPeers < 0) || (numPeers >= (1<<16) ) )
throw new DataFormatException("Invalid number of peers - " + numPeers);
Set peers = new HashSet(numPeers);
for (int i = 0; i < numPeers; i++) {
Hash peer = new Hash();
peer.readBytes(in);
peers.add(peer);
}
_dontIncludePeers = peers;
} catch (DataFormatException dfe) {
throw new I2NPMessageException("Unable to load the message data", dfe);
}
}
protected byte[] writeMessage() throws I2NPMessageException, IOException {
if (_key == null) throw new I2NPMessageException("Key being searched for not specified");
if (_from == null) throw new I2NPMessageException("From address not specified");
if (_key == null) throw new I2NPMessageException("Key being searched for not specified");
if (_from == null) throw new I2NPMessageException("From address not specified");
ByteArrayOutputStream os = new ByteArrayOutputStream(32);
try {
_key.writeBytes(os);
_from.writeBytes(os);
if (_replyTunnel != null) {
DataHelper.writeBoolean(os, Boolean.TRUE);
_replyTunnel.writeBytes(os);
} else {
DataHelper.writeBoolean(os, Boolean.FALSE);
}
if ( (_dontIncludePeers == null) || (_dontIncludePeers.size() <= 0) ) {
DataHelper.writeLong(os, 2, 0);
} else {
DataHelper.writeLong(os, 2, _dontIncludePeers.size());
for (Iterator iter = _dontIncludePeers.iterator(); iter.hasNext(); ) {
Hash peer = (Hash)iter.next();
peer.writeBytes(os);
}
}
_key.writeBytes(os);
_from.writeBytes(os);
if (_replyTunnel != null) {
DataHelper.writeBoolean(os, Boolean.TRUE);
_replyTunnel.writeBytes(os);
} else {
DataHelper.writeBoolean(os, Boolean.FALSE);
}
if ( (_dontIncludePeers == null) || (_dontIncludePeers.size() <= 0) ) {
DataHelper.writeLong(os, 2, 0);
} else {
DataHelper.writeLong(os, 2, _dontIncludePeers.size());
for (Iterator iter = _dontIncludePeers.iterator(); iter.hasNext(); ) {
Hash peer = (Hash)iter.next();
peer.writeBytes(os);
}
}
} catch (DataFormatException dfe) {
throw new I2NPMessageException("Error writing out the message data", dfe);
}
@ -134,25 +136,25 @@ public class DatabaseLookupMessage extends I2NPMessageImpl {
public int getType() { return MESSAGE_TYPE; }
public int hashCode() {
return DataHelper.hashCode(getSearchKey()) +
DataHelper.hashCode(getFrom()) +
DataHelper.hashCode(getReplyTunnel()) +
DataHelper.hashCode(_dontIncludePeers);
return DataHelper.hashCode(getSearchKey()) +
DataHelper.hashCode(getFrom()) +
DataHelper.hashCode(getReplyTunnel()) +
DataHelper.hashCode(_dontIncludePeers);
}
public boolean equals(Object object) {
if ( (object != null) && (object instanceof DatabaseLookupMessage) ) {
DatabaseLookupMessage msg = (DatabaseLookupMessage)object;
return DataHelper.eq(getSearchKey(),msg.getSearchKey()) &&
DataHelper.eq(getFrom(),msg.getFrom()) &&
DataHelper.eq(getReplyTunnel(),msg.getReplyTunnel()) &&
DataHelper.eq(_dontIncludePeers,msg.getDontIncludePeers());
DataHelper.eq(getFrom(),msg.getFrom()) &&
DataHelper.eq(getReplyTunnel(),msg.getReplyTunnel()) &&
DataHelper.eq(_dontIncludePeers,msg.getDontIncludePeers());
} else {
return false;
}
}
public String toString() {
public String toString() {
StringBuffer buf = new StringBuffer();
buf.append("[DatabaseLookupMessage: ");
buf.append("\n\tSearch Key: ").append(getSearchKey());

View File

@ -1,9 +1,9 @@
package net.i2p.data.i2np;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
@ -21,10 +21,11 @@ import net.i2p.data.DataHelper;
import net.i2p.data.Hash;
import net.i2p.data.RouterInfo;
import net.i2p.util.Log;
import net.i2p.I2PAppContext;
/**
* Defines the message a router sends to another router in response to a
* search (DatabaseFindNearest or DatabaseLookup) when it doesn't have the value,
* Defines the message a router sends to another router in response to a
* search (DatabaseFindNearest or DatabaseLookup) when it doesn't have the value,
* specifying what routers it would search.
*
* @author jrandom
@ -36,10 +37,11 @@ public class DatabaseSearchReplyMessage extends I2NPMessageImpl {
private List _routerInfoStructures;
private Hash _from;
public DatabaseSearchReplyMessage() {
setSearchKey(null);
_routerInfoStructures = new ArrayList();
setFromHash(null);
public DatabaseSearchReplyMessage(I2PAppContext context) {
super(context);
setSearchKey(null);
_routerInfoStructures = new ArrayList();
setFromHash(null);
}
/**
@ -57,58 +59,58 @@ public class DatabaseSearchReplyMessage extends I2NPMessageImpl {
public void setFromHash(Hash from) { _from = from; }
public void readMessage(InputStream in, int type) throws I2NPMessageException, IOException {
if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message");
if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message");
try {
_key = new Hash();
_key.readBytes(in);
int compressedLength = (int)DataHelper.readLong(in, 2);
byte compressedData[] = new byte[compressedLength];
int read = DataHelper.read(in, compressedData);
if (read != compressedLength)
throw new IOException("Not enough data to decompress");
byte decompressedData[] = DataHelper.decompress(compressedData);
ByteArrayInputStream bais = new ByteArrayInputStream(decompressedData);
int num = (int)DataHelper.readLong(bais, 1);
_routerInfoStructures.clear();
for (int i = 0; i < num; i++) {
RouterInfo info = new RouterInfo();
info.readBytes(bais);
addReply(info);
}
_from = new Hash();
_from.readBytes(in);
_key = new Hash();
_key.readBytes(in);
int compressedLength = (int)DataHelper.readLong(in, 2);
byte compressedData[] = new byte[compressedLength];
int read = DataHelper.read(in, compressedData);
if (read != compressedLength)
throw new IOException("Not enough data to decompress");
byte decompressedData[] = DataHelper.decompress(compressedData);
ByteArrayInputStream bais = new ByteArrayInputStream(decompressedData);
int num = (int)DataHelper.readLong(bais, 1);
_routerInfoStructures.clear();
for (int i = 0; i < num; i++) {
RouterInfo info = new RouterInfo();
info.readBytes(bais);
addReply(info);
}
_from = new Hash();
_from.readBytes(in);
} catch (DataFormatException dfe) {
throw new I2NPMessageException("Unable to load the message data", dfe);
}
}
protected byte[] writeMessage() throws I2NPMessageException, IOException {
if (_key == null)
throw new I2NPMessageException("Key in reply to not specified");
if (_routerInfoStructures == null)
throw new I2NPMessageException("RouterInfo replies are null");
if (_routerInfoStructures.size() <= 0)
throw new I2NPMessageException("No replies specified in SearchReply! Always include oneself!");
if (_from == null)
throw new I2NPMessageException("No 'from' address specified!");
if (_key == null)
throw new I2NPMessageException("Key in reply to not specified");
if (_routerInfoStructures == null)
throw new I2NPMessageException("RouterInfo replies are null");
if (_routerInfoStructures.size() <= 0)
throw new I2NPMessageException("No replies specified in SearchReply! Always include oneself!");
if (_from == null)
throw new I2NPMessageException("No 'from' address specified!");
ByteArrayOutputStream os = new ByteArrayOutputStream(32);
try {
_key.writeBytes(os);
ByteArrayOutputStream baos = new ByteArrayOutputStream(512);
DataHelper.writeLong(baos, 1, _routerInfoStructures.size());
for (int i = 0; i < getNumReplies(); i++) {
RouterInfo info = getReply(i);
info.writeBytes(baos);
}
byte compressed[] = DataHelper.compress(baos.toByteArray());
DataHelper.writeLong(os, 2, compressed.length);
os.write(compressed);
_from.writeBytes(os);
_key.writeBytes(os);
ByteArrayOutputStream baos = new ByteArrayOutputStream(512);
DataHelper.writeLong(baos, 1, _routerInfoStructures.size());
for (int i = 0; i < getNumReplies(); i++) {
RouterInfo info = getReply(i);
info.writeBytes(baos);
}
byte compressed[] = DataHelper.compress(baos.toByteArray());
DataHelper.writeLong(os, 2, compressed.length);
os.write(compressed);
_from.writeBytes(os);
} catch (DataFormatException dfe) {
throw new I2NPMessageException("Error writing out the message data", dfe);
}
@ -121,27 +123,27 @@ public class DatabaseSearchReplyMessage extends I2NPMessageImpl {
if ( (object != null) && (object instanceof DatabaseSearchReplyMessage) ) {
DatabaseSearchReplyMessage msg = (DatabaseSearchReplyMessage)object;
return DataHelper.eq(getSearchKey(),msg.getSearchKey()) &&
DataHelper.eq(getFromHash(),msg.getFromHash()) &&
DataHelper.eq(_routerInfoStructures,msg._routerInfoStructures);
DataHelper.eq(getFromHash(),msg.getFromHash()) &&
DataHelper.eq(_routerInfoStructures,msg._routerInfoStructures);
} else {
return false;
}
}
public int hashCode() {
return DataHelper.hashCode(getSearchKey()) +
DataHelper.hashCode(getFromHash()) +
DataHelper.hashCode(_routerInfoStructures);
return DataHelper.hashCode(getSearchKey()) +
DataHelper.hashCode(getFromHash()) +
DataHelper.hashCode(_routerInfoStructures);
}
public String toString() {
public String toString() {
StringBuffer buf = new StringBuffer();
buf.append("[DatabaseSearchReplyMessage: ");
buf.append("\n\tSearch Key: ").append(getSearchKey());
buf.append("\n\tReplies: # = ").append(getNumReplies());
for (int i = 0; i < getNumReplies(); i++) {
buf.append("\n\t\tReply [").append(i).append("]: ").append(getReply(i));
}
for (int i = 0; i < getNumReplies(); i++) {
buf.append("\n\t\tReply [").append(i).append("]: ").append(getReply(i));
}
buf.append("\n\tFrom: ").append(getFromHash());
buf.append("]");
return buf.toString();

View File

@ -1,9 +1,9 @@
package net.i2p.data.i2np;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
@ -19,9 +19,10 @@ import net.i2p.data.Hash;
import net.i2p.data.LeaseSet;
import net.i2p.data.RouterInfo;
import net.i2p.util.Log;
import net.i2p.I2PAppContext;
/**
* Defines the message a router sends to another router to test the network
* Defines the message a router sends to another router to test the network
* database reachability, as well as the reply message sent back.
*
* @author jrandom
@ -37,11 +38,12 @@ public class DatabaseStoreMessage extends I2NPMessageImpl {
public final static int KEY_TYPE_ROUTERINFO = 0;
public final static int KEY_TYPE_LEASESET = 1;
public DatabaseStoreMessage() {
setValueType(-1);
setKey(null);
setLeaseSet(null);
setRouterInfo(null);
public DatabaseStoreMessage(I2PAppContext context) {
super(context);
setValueType(-1);
setKey(null);
setLeaseSet(null);
setRouterInfo(null);
}
/**
@ -56,10 +58,10 @@ public class DatabaseStoreMessage extends I2NPMessageImpl {
*
*/
public RouterInfo getRouterInfo() { return _info; }
public void setRouterInfo(RouterInfo routerInfo) {
_info = routerInfo;
if (_info != null)
setValueType(KEY_TYPE_ROUTERINFO);
public void setRouterInfo(RouterInfo routerInfo) {
_info = routerInfo;
if (_info != null)
setValueType(KEY_TYPE_ROUTERINFO);
}
/**
@ -67,14 +69,14 @@ public class DatabaseStoreMessage extends I2NPMessageImpl {
*
*/
public LeaseSet getLeaseSet() { return _leaseSet; }
public void setLeaseSet(LeaseSet leaseSet) {
_leaseSet = leaseSet;
if (_leaseSet != null)
setValueType(KEY_TYPE_LEASESET);
public void setLeaseSet(LeaseSet leaseSet) {
_leaseSet = leaseSet;
if (_leaseSet != null)
setValueType(KEY_TYPE_LEASESET);
}
/**
* Defines type of key being stored in the network database -
* Defines type of key being stored in the network database -
* either KEY_TYPE_ROUTERINFO or KEY_TYPE_LEASESET
*
*/
@ -82,52 +84,52 @@ public class DatabaseStoreMessage extends I2NPMessageImpl {
public void setValueType(int type) { _type = type; }
public void readMessage(InputStream in, int type) throws I2NPMessageException, IOException {
if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message");
if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message");
try {
_key = new Hash();
_key.readBytes(in);
_log.debug("Hash read: " + _key.toBase64());
_type = (int)DataHelper.readLong(in, 1);
if (_type == KEY_TYPE_LEASESET) {
_leaseSet = new LeaseSet();
_leaseSet.readBytes(in);
} else if (_type == KEY_TYPE_ROUTERINFO) {
_info = new RouterInfo();
int compressedSize = (int)DataHelper.readLong(in, 2);
byte compressed[] = new byte[compressedSize];
int read = DataHelper.read(in, compressed);
if (read != compressedSize)
throw new I2NPMessageException("Invalid compressed data size");
ByteArrayInputStream bais = new ByteArrayInputStream(DataHelper.decompress(compressed));
_info.readBytes(bais);
} else {
throw new I2NPMessageException("Invalid type of key read from the structure - " + _type);
}
_key = new Hash();
_key.readBytes(in);
_log.debug("Hash read: " + _key.toBase64());
_type = (int)DataHelper.readLong(in, 1);
if (_type == KEY_TYPE_LEASESET) {
_leaseSet = new LeaseSet();
_leaseSet.readBytes(in);
} else if (_type == KEY_TYPE_ROUTERINFO) {
_info = new RouterInfo();
int compressedSize = (int)DataHelper.readLong(in, 2);
byte compressed[] = new byte[compressedSize];
int read = DataHelper.read(in, compressed);
if (read != compressedSize)
throw new I2NPMessageException("Invalid compressed data size");
ByteArrayInputStream bais = new ByteArrayInputStream(DataHelper.decompress(compressed));
_info.readBytes(bais);
} else {
throw new I2NPMessageException("Invalid type of key read from the structure - " + _type);
}
} catch (DataFormatException dfe) {
throw new I2NPMessageException("Unable to load the message data", dfe);
}
}
protected byte[] writeMessage() throws I2NPMessageException, IOException {
if (_key == null) throw new I2NPMessageException("Invalid key");
if ( (_type != KEY_TYPE_LEASESET) && (_type != KEY_TYPE_ROUTERINFO) ) throw new I2NPMessageException("Invalid key type");
if ( (_type == KEY_TYPE_LEASESET) && (_leaseSet == null) ) throw new I2NPMessageException("Missing lease set");
if ( (_type == KEY_TYPE_ROUTERINFO) && (_info == null) ) throw new I2NPMessageException("Missing router info");
if (_key == null) throw new I2NPMessageException("Invalid key");
if ( (_type != KEY_TYPE_LEASESET) && (_type != KEY_TYPE_ROUTERINFO) ) throw new I2NPMessageException("Invalid key type");
if ( (_type == KEY_TYPE_LEASESET) && (_leaseSet == null) ) throw new I2NPMessageException("Missing lease set");
if ( (_type == KEY_TYPE_ROUTERINFO) && (_info == null) ) throw new I2NPMessageException("Missing router info");
ByteArrayOutputStream os = new ByteArrayOutputStream(256);
try {
_key.writeBytes(os);
DataHelper.writeLong(os, 1, _type);
if (_type == KEY_TYPE_LEASESET) {
_leaseSet.writeBytes(os);
} else if (_type == KEY_TYPE_ROUTERINFO) {
ByteArrayOutputStream baos = new ByteArrayOutputStream(4*1024);
_info.writeBytes(baos);
byte uncompressed[] = baos.toByteArray();
byte compressed[] = DataHelper.compress(uncompressed);
DataHelper.writeLong(os, 2, compressed.length);
os.write(compressed);
}
_key.writeBytes(os);
DataHelper.writeLong(os, 1, _type);
if (_type == KEY_TYPE_LEASESET) {
_leaseSet.writeBytes(os);
} else if (_type == KEY_TYPE_ROUTERINFO) {
ByteArrayOutputStream baos = new ByteArrayOutputStream(4*1024);
_info.writeBytes(baos);
byte uncompressed[] = baos.toByteArray();
byte compressed[] = DataHelper.compress(uncompressed);
DataHelper.writeLong(os, 2, compressed.length);
os.write(compressed);
}
} catch (DataFormatException dfe) {
throw new I2NPMessageException("Error writing out the message data", dfe);
}
@ -137,29 +139,29 @@ public class DatabaseStoreMessage extends I2NPMessageImpl {
public int getType() { return MESSAGE_TYPE; }
public int hashCode() {
return DataHelper.hashCode(getKey()) +
DataHelper.hashCode(getLeaseSet()) +
DataHelper.hashCode(getRouterInfo()) +
getValueType();
return DataHelper.hashCode(getKey()) +
DataHelper.hashCode(getLeaseSet()) +
DataHelper.hashCode(getRouterInfo()) +
getValueType();
}
public boolean equals(Object object) {
if ( (object != null) && (object instanceof DatabaseStoreMessage) ) {
DatabaseStoreMessage msg = (DatabaseStoreMessage)object;
return DataHelper.eq(getKey(),msg.getKey()) &&
DataHelper.eq(getLeaseSet(),msg.getLeaseSet()) &&
DataHelper.eq(getRouterInfo(),msg.getRouterInfo()) &&
DataHelper.eq(getValueType(),msg.getValueType());
DataHelper.eq(getLeaseSet(),msg.getLeaseSet()) &&
DataHelper.eq(getRouterInfo(),msg.getRouterInfo()) &&
DataHelper.eq(getValueType(),msg.getValueType());
} else {
return false;
}
}
public String toString() {
public String toString() {
StringBuffer buf = new StringBuffer();
buf.append("[DatabaseStoreMessage: ");
buf.append("\n\tExpiration: ").append(getMessageExpiration());
buf.append("\n\tUnique ID: ").append(getUniqueId());
buf.append("\n\tExpiration: ").append(getMessageExpiration());
buf.append("\n\tUnique ID: ").append(getUniqueId());
buf.append("\n\tKey: ").append(getKey());
buf.append("\n\tValue Type: ").append(getValueType());
buf.append("\n\tRouter Info: ").append(getRouterInfo());

View File

@ -1,9 +1,9 @@
package net.i2p.data.i2np;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
@ -16,6 +16,7 @@ import java.util.Date;
import net.i2p.data.DataFormatException;
import net.i2p.data.DataHelper;
import net.i2p.util.Log;
import net.i2p.I2PAppContext;
/**
* Defines the message sent back in reply to a message when requested, containing
@ -29,9 +30,10 @@ public class DeliveryStatusMessage extends I2NPMessageImpl {
private long _id;
private Date _arrival;
public DeliveryStatusMessage() {
setMessageId(-1);
setArrival(null);
public DeliveryStatusMessage(I2PAppContext context) {
super(context);
setMessageId(-1);
setArrival(null);
}
public long getMessageId() { return _id; }
@ -41,22 +43,22 @@ public class DeliveryStatusMessage extends I2NPMessageImpl {
public void setArrival(Date arrival) { _arrival = arrival; }
public void readMessage(InputStream in, int type) throws I2NPMessageException, IOException {
if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message");
if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message");
try {
_id = DataHelper.readLong(in, 4);
_arrival = DataHelper.readDate(in);
_id = DataHelper.readLong(in, 4);
_arrival = DataHelper.readDate(in);
} catch (DataFormatException dfe) {
throw new I2NPMessageException("Unable to load the message data", dfe);
}
}
protected byte[] writeMessage() throws I2NPMessageException, IOException {
if ( (_id < 0) || (_arrival == null) ) throw new I2NPMessageException("Not enough data to write out");
if ( (_id < 0) || (_arrival == null) ) throw new I2NPMessageException("Not enough data to write out");
ByteArrayOutputStream os = new ByteArrayOutputStream(32);
try {
DataHelper.writeLong(os, 4, _id);
DataHelper.writeDate(os, _arrival);
DataHelper.writeLong(os, 4, _id);
DataHelper.writeDate(os, _arrival);
} catch (DataFormatException dfe) {
throw new I2NPMessageException("Error writing out the message data", dfe);
}
@ -66,21 +68,21 @@ public class DeliveryStatusMessage extends I2NPMessageImpl {
public int getType() { return MESSAGE_TYPE; }
public int hashCode() {
return (int)getMessageId() +
DataHelper.hashCode(getArrival());
return (int)getMessageId() +
DataHelper.hashCode(getArrival());
}
public boolean equals(Object object) {
if ( (object != null) && (object instanceof DeliveryStatusMessage) ) {
DeliveryStatusMessage msg = (DeliveryStatusMessage)object;
return DataHelper.eq(getMessageId(),msg.getMessageId()) &&
DataHelper.eq(getArrival(),msg.getArrival());
DataHelper.eq(getArrival(),msg.getArrival());
} else {
return false;
}
}
public String toString() {
public String toString() {
StringBuffer buf = new StringBuffer();
buf.append("[DeliveryStatusMessage: ");
buf.append("\n\tMessage ID: ").append(getMessageId());

View File

@ -1,9 +1,9 @@
package net.i2p.data.i2np;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
@ -18,6 +18,7 @@ import net.i2p.data.DataFormatException;
import net.i2p.data.DataHelper;
import net.i2p.data.DataStructureImpl;
import net.i2p.util.Log;
import net.i2p.router.RouterContext;
/**
* Contains one deliverable message encrypted to a router along with instructions
@ -26,7 +27,8 @@ import net.i2p.util.Log;
* @author jrandom
*/
public class GarlicClove extends DataStructureImpl {
private final static Log _log = new Log(GarlicClove.class);
private Log _log;
private RouterContext _context;
private DeliveryInstructions _instructions;
private I2NPMessage _msg;
private long _cloveId;
@ -34,30 +36,34 @@ public class GarlicClove extends DataStructureImpl {
private Certificate _certificate;
private int _replyAction;
private SourceRouteBlock _sourceRouteBlock;
private I2NPMessageHandler _handler;
/** No action requested with the source route block */
public final static int ACTION_NONE = 0;
/**
* A DeliveryStatusMessage is requested with the source route block using
/**
* A DeliveryStatusMessage is requested with the source route block using
* the cloveId as the id received
*
*/
public final static int ACTION_STATUS = 1;
/**
/**
* No DeliveryStatusMessage is requested, but the source route block is
* included for message specific replies
*
*/
public final static int ACTION_MESSAGE_SPECIFIC = 2;
public GarlicClove() {
setInstructions(null);
setData(null);
setCloveId(-1);
setExpiration(null);
setCertificate(null);
setSourceRouteBlockAction(ACTION_NONE);
setSourceRouteBlock(null);
public GarlicClove(RouterContext context) {
_context = context;
_log = context.logManager().getLog(GarlicClove.class);
_handler = new I2NPMessageHandler(context);
setInstructions(null);
setData(null);
setCloveId(-1);
setExpiration(null);
setCertificate(null);
setSourceRouteBlockAction(ACTION_NONE);
setSourceRouteBlock(null);
}
public DeliveryInstructions getInstructions() { return _instructions; }
@ -76,94 +82,94 @@ public class GarlicClove extends DataStructureImpl {
public void setSourceRouteBlock(SourceRouteBlock block) { _sourceRouteBlock = block; }
public void readBytes(InputStream in) throws DataFormatException, IOException {
_instructions = new DeliveryInstructions();
_instructions.readBytes(in);
_log.debug("Read instructions: " + _instructions);
try {
_msg = new I2NPMessageHandler().readMessage(in);
} catch (I2NPMessageException ime) {
throw new DataFormatException("Unable to read the message from a garlic clove", ime);
}
_cloveId = DataHelper.readLong(in, 4);
_expiration = DataHelper.readDate(in);
_log.debug("CloveID read: " + _cloveId + " expiration read: " + _expiration);
_certificate = new Certificate();
_certificate.readBytes(in);
_log.debug("Read cert: " + _certificate);
int replyStyle = (int)DataHelper.readLong(in, 1);
setSourceRouteBlockAction(replyStyle);
if (replyStyle != ACTION_NONE) {
_sourceRouteBlock = new SourceRouteBlock();
_sourceRouteBlock.readBytes(in);
}
_instructions = new DeliveryInstructions();
_instructions.readBytes(in);
_log.debug("Read instructions: " + _instructions);
try {
_msg = _handler.readMessage(in);
} catch (I2NPMessageException ime) {
throw new DataFormatException("Unable to read the message from a garlic clove", ime);
}
_cloveId = DataHelper.readLong(in, 4);
_expiration = DataHelper.readDate(in);
_log.debug("CloveID read: " + _cloveId + " expiration read: " + _expiration);
_certificate = new Certificate();
_certificate.readBytes(in);
_log.debug("Read cert: " + _certificate);
int replyStyle = (int)DataHelper.readLong(in, 1);
setSourceRouteBlockAction(replyStyle);
if (replyStyle != ACTION_NONE) {
_sourceRouteBlock = new SourceRouteBlock();
_sourceRouteBlock.readBytes(in);
}
}
public void writeBytes(OutputStream out) throws DataFormatException, IOException {
StringBuffer error = new StringBuffer();
if (_instructions == null)
error.append("No instructions ");
if (_msg == null)
error.append("No message ");
if (_cloveId < 0)
error.append("CloveID < 0 [").append(_cloveId).append("] ");
if (_expiration == null)
error.append("Expiration is null ");
if (_certificate == null)
error.append("Certificate is null ");
if (_replyAction < 0)
error.append("Reply action is < 0 [").append(_replyAction).append("] ");;
if (error.length() > 0)
throw new DataFormatException(error.toString());
if ( (_replyAction != 0) && (_sourceRouteBlock == null) )
throw new DataFormatException("Source route block must be specified for non-null action");
_instructions.writeBytes(out);
_log.debug("Wrote instructions: " + _instructions);
_msg.writeBytes(out);
DataHelper.writeLong(out, 4, _cloveId);
DataHelper.writeDate(out, _expiration);
_log.debug("CloveID written: " + _cloveId + " expiration written: " + _expiration);
_certificate.writeBytes(out);
_log.debug("Written cert: " + _certificate);
DataHelper.writeLong(out, 1, _replyAction);
if ( (_replyAction != 0) && (_sourceRouteBlock != null) )
_sourceRouteBlock.writeBytes(out);
StringBuffer error = new StringBuffer();
if (_instructions == null)
error.append("No instructions ");
if (_msg == null)
error.append("No message ");
if (_cloveId < 0)
error.append("CloveID < 0 [").append(_cloveId).append("] ");
if (_expiration == null)
error.append("Expiration is null ");
if (_certificate == null)
error.append("Certificate is null ");
if (_replyAction < 0)
error.append("Reply action is < 0 [").append(_replyAction).append("] ");;
if (error.length() > 0)
throw new DataFormatException(error.toString());
if ( (_replyAction != 0) && (_sourceRouteBlock == null) )
throw new DataFormatException("Source route block must be specified for non-null action");
_instructions.writeBytes(out);
_log.debug("Wrote instructions: " + _instructions);
_msg.writeBytes(out);
DataHelper.writeLong(out, 4, _cloveId);
DataHelper.writeDate(out, _expiration);
_log.debug("CloveID written: " + _cloveId + " expiration written: " + _expiration);
_certificate.writeBytes(out);
_log.debug("Written cert: " + _certificate);
DataHelper.writeLong(out, 1, _replyAction);
if ( (_replyAction != 0) && (_sourceRouteBlock != null) )
_sourceRouteBlock.writeBytes(out);
}
public boolean equals(Object obj) {
if ( (obj == null) || !(obj instanceof GarlicClove))
return false;
GarlicClove clove = (GarlicClove)obj;
return DataHelper.eq(getCertificate(), clove.getCertificate()) &&
DataHelper.eq(getCloveId(), clove.getCloveId()) &&
DataHelper.eq(getData(), clove.getData()) &&
DataHelper.eq(getExpiration(), clove.getExpiration()) &&
DataHelper.eq(getInstructions(), clove.getInstructions()) &&
DataHelper.eq(getSourceRouteBlock(), clove.getSourceRouteBlock()) &&
(getSourceRouteBlockAction() == clove.getSourceRouteBlockAction());
GarlicClove clove = (GarlicClove)obj;
return DataHelper.eq(getCertificate(), clove.getCertificate()) &&
DataHelper.eq(getCloveId(), clove.getCloveId()) &&
DataHelper.eq(getData(), clove.getData()) &&
DataHelper.eq(getExpiration(), clove.getExpiration()) &&
DataHelper.eq(getInstructions(), clove.getInstructions()) &&
DataHelper.eq(getSourceRouteBlock(), clove.getSourceRouteBlock()) &&
(getSourceRouteBlockAction() == clove.getSourceRouteBlockAction());
}
public int hashCode() {
return DataHelper.hashCode(getCertificate()) +
(int)getCloveId() +
DataHelper.hashCode(getData()) +
DataHelper.hashCode(getExpiration()) +
DataHelper.hashCode(getInstructions()) +
DataHelper.hashCode(getSourceRouteBlock()) +
getSourceRouteBlockAction();
return DataHelper.hashCode(getCertificate()) +
(int)getCloveId() +
DataHelper.hashCode(getData()) +
DataHelper.hashCode(getExpiration()) +
DataHelper.hashCode(getInstructions()) +
DataHelper.hashCode(getSourceRouteBlock()) +
getSourceRouteBlockAction();
}
public String toString() {
StringBuffer buf = new StringBuffer(128);
StringBuffer buf = new StringBuffer(128);
buf.append("[GarlicClove: ");
buf.append("\n\tInstructions: ").append(getInstructions());
buf.append("\n\tCertificate: ").append(getCertificate());
buf.append("\n\tClove ID: ").append(getCloveId());
buf.append("\n\tExpiration: ").append(getExpiration());
buf.append("\n\tSource route style: ").append(getSourceRouteBlockAction());
buf.append("\n\tSource route block: ").append(getSourceRouteBlock());
buf.append("\n\tData: ").append(getData());
buf.append("]");
return buf.toString();
buf.append("\n\tInstructions: ").append(getInstructions());
buf.append("\n\tCertificate: ").append(getCertificate());
buf.append("\n\tClove ID: ").append(getCloveId());
buf.append("\n\tExpiration: ").append(getExpiration());
buf.append("\n\tSource route style: ").append(getSourceRouteBlockAction());
buf.append("\n\tSource route block: ").append(getSourceRouteBlock());
buf.append("\n\tData: ").append(getData());
buf.append("]");
return buf.toString();
}
}

View File

@ -1,9 +1,9 @@
package net.i2p.data.i2np;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
@ -15,6 +15,7 @@ import java.io.InputStream;
import net.i2p.data.DataFormatException;
import net.i2p.data.DataHelper;
import net.i2p.util.Log;
import net.i2p.I2PAppContext;
/**
* Defines the wrapped garlic message
@ -26,33 +27,34 @@ public class GarlicMessage extends I2NPMessageImpl {
public final static int MESSAGE_TYPE = 11;
private byte[] _data;
public GarlicMessage() {
setData(null);
public GarlicMessage(I2PAppContext context) {
super(context);
setData(null);
}
public byte[] getData() { return _data; }
public void setData(byte[] data) { _data = data; }
public void readMessage(InputStream in, int type) throws I2NPMessageException, IOException {
if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message");
if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message");
try {
long len = DataHelper.readLong(in, 4);
_data = new byte[(int)len];
int read = read(in, _data);
if (read != len)
throw new I2NPMessageException("Incorrect size read");
long len = DataHelper.readLong(in, 4);
_data = new byte[(int)len];
int read = read(in, _data);
if (read != len)
throw new I2NPMessageException("Incorrect size read");
} catch (DataFormatException dfe) {
throw new I2NPMessageException("Unable to load the message data", dfe);
}
}
protected byte[] writeMessage() throws I2NPMessageException, IOException {
if ( (_data == null) || (_data.length <= 0) ) throw new I2NPMessageException("Not enough data to write out");
if ( (_data == null) || (_data.length <= 0) ) throw new I2NPMessageException("Not enough data to write out");
ByteArrayOutputStream os = new ByteArrayOutputStream(32);
try {
DataHelper.writeLong(os, 4, _data.length);
os.write(_data);
DataHelper.writeLong(os, 4, _data.length);
os.write(_data);
} catch (DataFormatException dfe) {
throw new I2NPMessageException("Error writing out the message data", dfe);
}
@ -62,7 +64,7 @@ public class GarlicMessage extends I2NPMessageImpl {
public int getType() { return MESSAGE_TYPE; }
public int hashCode() {
return DataHelper.hashCode(getData());
return DataHelper.hashCode(getData());
}
public boolean equals(Object object) {
@ -74,7 +76,7 @@ public class GarlicMessage extends I2NPMessageImpl {
}
}
public String toString() {
public String toString() {
StringBuffer buf = new StringBuffer();
buf.append("[GarlicMessage: ");
buf.append("\n\tData length: ").append(getData().length).append(" bytes");

View File

@ -1,9 +1,9 @@
package net.i2p.data.i2np;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
@ -16,20 +16,25 @@ import net.i2p.data.DataFormatException;
import net.i2p.data.DataHelper;
import net.i2p.util.Clock;
import net.i2p.util.Log;
import net.i2p.I2PAppContext;
/**
* Handle messages from router to router
*
*/
public class I2NPMessageHandler {
private final static Log _log = new Log(I2NPMessageHandler.class);
private Log _log;
private I2PAppContext _context;
private long _lastReadBegin;
private long _lastReadEnd;
public I2NPMessageHandler() {}
public I2NPMessageHandler(I2PAppContext context) {
_context = context;
_log = context.logManager().getLog(I2NPMessageHandler.class);
}
/**
* Read an I2NPMessage from the stream and return the fully populated object.
*
*
* @throws IOException if there is an IO problem reading from the stream
* @throws I2NPMessageException if there is a problem handling the particular
* message - if it is an unknown type or has improper formatting, etc.
@ -37,10 +42,10 @@ public class I2NPMessageHandler {
public I2NPMessage readMessage(InputStream in) throws IOException, I2NPMessageException {
try {
int type = (int)DataHelper.readLong(in, 1);
_lastReadBegin = Clock.getInstance().now();
_lastReadBegin = System.currentTimeMillis();
I2NPMessage msg = createMessage(in, type);
msg.readBytes(in, type);
_lastReadEnd = Clock.getInstance().now();
_lastReadEnd = System.currentTimeMillis();
return msg;
} catch (DataFormatException dfe) {
throw new I2NPMessageException("Error reading the message", dfe);
@ -50,31 +55,31 @@ public class I2NPMessageHandler {
public long getLastReadTime() { return _lastReadEnd - _lastReadBegin; }
/**
* Yes, this is fairly ugly, but its the only place it ever happens.
* Yes, this is fairly ugly, but its the only place it ever happens.
*
*/
private static I2NPMessage createMessage(InputStream in, int type) throws IOException, I2NPMessageException {
private I2NPMessage createMessage(InputStream in, int type) throws IOException, I2NPMessageException {
switch (type) {
case DatabaseStoreMessage.MESSAGE_TYPE:
return new DatabaseStoreMessage();
case DatabaseLookupMessage.MESSAGE_TYPE:
return new DatabaseLookupMessage();
case DatabaseSearchReplyMessage.MESSAGE_TYPE:
return new DatabaseSearchReplyMessage();
case DeliveryStatusMessage.MESSAGE_TYPE:
return new DeliveryStatusMessage();
case GarlicMessage.MESSAGE_TYPE:
return new GarlicMessage();
case TunnelMessage.MESSAGE_TYPE:
return new TunnelMessage();
case DataMessage.MESSAGE_TYPE:
return new DataMessage();
case SourceRouteReplyMessage.MESSAGE_TYPE:
return new SourceRouteReplyMessage();
case TunnelCreateMessage.MESSAGE_TYPE:
return new TunnelCreateMessage();
case TunnelCreateStatusMessage.MESSAGE_TYPE:
return new TunnelCreateStatusMessage();
case DatabaseStoreMessage.MESSAGE_TYPE:
return new DatabaseStoreMessage(_context);
case DatabaseLookupMessage.MESSAGE_TYPE:
return new DatabaseLookupMessage(_context);
case DatabaseSearchReplyMessage.MESSAGE_TYPE:
return new DatabaseSearchReplyMessage(_context);
case DeliveryStatusMessage.MESSAGE_TYPE:
return new DeliveryStatusMessage(_context);
case GarlicMessage.MESSAGE_TYPE:
return new GarlicMessage(_context);
case TunnelMessage.MESSAGE_TYPE:
return new TunnelMessage(_context);
case DataMessage.MESSAGE_TYPE:
return new DataMessage(_context);
case SourceRouteReplyMessage.MESSAGE_TYPE:
return new SourceRouteReplyMessage(_context);
case TunnelCreateMessage.MESSAGE_TYPE:
return new TunnelCreateMessage(_context);
case TunnelCreateStatusMessage.MESSAGE_TYPE:
return new TunnelCreateStatusMessage(_context);
default:
throw new I2NPMessageException("The type "+ type + " is an unknown I2NP message");
}
@ -82,7 +87,7 @@ public class I2NPMessageHandler {
public static void main(String args[]) {
try {
I2NPMessage msg = new I2NPMessageHandler().readMessage(new FileInputStream(args[0]));
I2NPMessage msg = new I2NPMessageHandler(I2PAppContext.getGlobalContext()).readMessage(new FileInputStream(args[0]));
System.out.println(msg);
} catch (Exception e) {
e.printStackTrace();

View File

@ -1,9 +1,9 @@
package net.i2p.data.i2np;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
@ -19,6 +19,7 @@ import net.i2p.data.DataStructureImpl;
import net.i2p.util.Clock;
import net.i2p.util.Log;
import net.i2p.util.RandomSource;
import net.i2p.I2PAppContext;
/**
* Defines the base message implementation.
@ -26,15 +27,18 @@ import net.i2p.util.RandomSource;
* @author jrandom
*/
public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPMessage {
private final static Log _log = new Log(I2NPMessageImpl.class);
private Log _log;
protected I2PAppContext _context;
private Date _expiration;
private long _uniqueId;
public final static long DEFAULT_EXPIRATION_MS = 1*60*1000; // 1 minute by default
public I2NPMessageImpl() {
_expiration = new Date(Clock.getInstance().now() + DEFAULT_EXPIRATION_MS);
_uniqueId = RandomSource.getInstance().nextInt(Integer.MAX_VALUE);
public I2NPMessageImpl(I2PAppContext context) {
_context = context;
_log = context.logManager().getLog(I2NPMessageImpl.class);
_expiration = new Date(_context.clock().now() + DEFAULT_EXPIRATION_MS);
_uniqueId = _context.random().nextInt(Integer.MAX_VALUE);
}
/**
@ -45,8 +49,8 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM
protected abstract byte[] writeMessage() throws I2NPMessageException, IOException;
/**
* Read the body into the data structures, after the initial type byte and
* the uniqueId / expiration, using the current class's format as defined by
* Read the body into the data structures, after the initial type byte and
* the uniqueId / expiration, using the current class's format as defined by
* the I2NP specification
*
* @param in stream to read from
@ -58,35 +62,35 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM
protected abstract void readMessage(InputStream in, int type) throws I2NPMessageException, IOException;
public void readBytes(InputStream in) throws DataFormatException, IOException {
try {
readBytes(in, -1);
} catch (I2NPMessageException ime) {
throw new DataFormatException("Bad bytes", ime);
}
try {
readBytes(in, -1);
} catch (I2NPMessageException ime) {
throw new DataFormatException("Bad bytes", ime);
}
}
public void readBytes(InputStream in, int type) throws I2NPMessageException, IOException {
try {
if (type < 0)
type = (int)DataHelper.readLong(in, 1);
_uniqueId = DataHelper.readLong(in, 4);
_expiration = DataHelper.readDate(in);
} catch (DataFormatException dfe) {
throw new I2NPMessageException("Error reading the message header", dfe);
}
_log.debug("Reading bytes: type = " + type + " / uniqueId : " + _uniqueId + " / expiration : " + _expiration);
readMessage(in, type);
try {
if (type < 0)
type = (int)DataHelper.readLong(in, 1);
_uniqueId = DataHelper.readLong(in, 4);
_expiration = DataHelper.readDate(in);
} catch (DataFormatException dfe) {
throw new I2NPMessageException("Error reading the message header", dfe);
}
_log.debug("Reading bytes: type = " + type + " / uniqueId : " + _uniqueId + " / expiration : " + _expiration);
readMessage(in, type);
}
public void writeBytes(OutputStream out) throws DataFormatException, IOException {
try {
DataHelper.writeLong(out, 1, getType());
DataHelper.writeLong(out, 4, _uniqueId);
DataHelper.writeDate(out, _expiration);
_log.debug("Writing bytes: type = " + getType() + " / uniqueId : " + _uniqueId + " / expiration : " + _expiration);
byte[] data = writeMessage();
out.write(data);
} catch (I2NPMessageException ime) {
throw new DataFormatException("Error writing out the I2NP message data", ime);
}
try {
DataHelper.writeLong(out, 1, getType());
DataHelper.writeLong(out, 4, _uniqueId);
DataHelper.writeDate(out, _expiration);
_log.debug("Writing bytes: type = " + getType() + " / uniqueId : " + _uniqueId + " / expiration : " + _expiration);
byte[] data = writeMessage();
out.write(data);
} catch (I2NPMessageException ime) {
throw new DataFormatException("Error writing out the I2NP message data", ime);
}
}
/**

View File

@ -1,9 +1,9 @@
package net.i2p.data.i2np;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
@ -13,9 +13,10 @@ import java.io.InputStream;
import net.i2p.util.I2PThread;
import net.i2p.util.Log;
import net.i2p.router.RouterContext;
/**
* The I2NPMessageReader reads an InputStream (using
* The I2NPMessageReader reads an InputStream (using
* {@link I2NPMessageHandler I2NPMessageHandler}) and passes out events to a registered
* listener, where events are either messages being received, exceptions being
* thrown, or the connection being closed. Routers should use this rather
@ -24,23 +25,26 @@ import net.i2p.util.Log;
* @author jrandom
*/
public class I2NPMessageReader {
private final static Log _log = new Log(I2NPMessageReader.class);
private Log _log;
private RouterContext _context;
private InputStream _stream;
private I2NPMessageEventListener _listener;
private I2NPMessageReaderRunner _reader;
private Thread _readerThread;
public I2NPMessageReader(InputStream stream, I2NPMessageEventListener lsnr) {
this(stream, lsnr, "I2NP Reader");
public I2NPMessageReader(RouterContext context, InputStream stream, I2NPMessageEventListener lsnr) {
this(context, stream, lsnr, "I2NP Reader");
}
public I2NPMessageReader(InputStream stream, I2NPMessageEventListener lsnr, String name) {
_stream = stream;
public I2NPMessageReader(RouterContext context, InputStream stream, I2NPMessageEventListener lsnr, String name) {
_context = context;
_log = context.logManager().getLog(I2NPMessageReader.class);
_stream = stream;
setListener(lsnr);
_reader = new I2NPMessageReaderRunner();
_readerThread = new I2PThread(_reader);
_readerThread.setName(name);
_readerThread.setDaemon(true);
_readerThread.setName(name);
_readerThread.setDaemon(true);
}
public void setListener(I2NPMessageEventListener lsnr) { _listener = lsnr; }
@ -50,7 +54,7 @@ public class I2NPMessageReader {
* Instruct the reader to begin reading messages off the stream
*
*/
public void startReading() { _readerThread.start(); }
public void startReading() { _readerThread.start(); }
/**
* Have the already started reader pause its reading indefinitely
*
@ -62,7 +66,7 @@ public class I2NPMessageReader {
*/
public void resumeReading() { _reader.resumeRunner(); }
/**
* Cancel reading.
* Cancel reading.
*
*/
public void stopReading() { _reader.cancelRunner(); }
@ -90,22 +94,22 @@ public class I2NPMessageReader {
*
*/
public void disconnected(I2NPMessageReader reader);
}
}
private class I2NPMessageReaderRunner implements Runnable {
private boolean _doRun;
private boolean _doRun;
private boolean _stayAlive;
private I2NPMessageHandler _handler;
private I2NPMessageHandler _handler;
public I2NPMessageReaderRunner() {
_doRun = true;
_stayAlive = true;
_handler = new I2NPMessageHandler();
_handler = new I2NPMessageHandler(_context);
}
public void pauseRunner() { _doRun = false; }
public void resumeRunner() { _doRun = true; }
public void cancelRunner() {
public void cancelRunner() {
_doRun = false;
_stayAlive = false;
_stayAlive = false;
}
public void run() {
while (_stayAlive) {
@ -114,16 +118,16 @@ public class I2NPMessageReader {
try {
I2NPMessage msg = _handler.readMessage(_stream);
if (msg != null) {
long msToRead = _handler.getLastReadTime();
long msToRead = _handler.getLastReadTime();
_listener.messageReceived(I2NPMessageReader.this, msg, msToRead);
}
}
} catch (I2NPMessageException ime) {
//_log.warn("Error handling message", ime);
//_log.warn("Error handling message", ime);
_listener.readError(I2NPMessageReader.this, ime);
_listener.disconnected(I2NPMessageReader.this);
cancelRunner();
_listener.disconnected(I2NPMessageReader.this);
cancelRunner();
} catch (IOException ioe) {
_log.warn("IO Error handling message", ioe);
_log.warn("IO Error handling message", ioe);
_listener.disconnected(I2NPMessageReader.this);
cancelRunner();
}

View File

@ -26,6 +26,7 @@ import net.i2p.data.PublicKey;
import net.i2p.data.SessionKey;
import net.i2p.data.SessionTag;
import net.i2p.util.Log;
import net.i2p.I2PAppContext;
/**
@ -46,14 +47,14 @@ public class SourceRouteBlock extends DataStructureImpl {
private long _decryptedExpiration;
public SourceRouteBlock() {
setRouter(null);
setData(null);
setKey(null);
setTag((byte[])null);
_decryptedInstructions = null;
_decryptedMessageId = -1;
_decryptedCertificate = null;
_decryptedExpiration = -1;
setRouter(null);
setData(null);
setKey(null);
setTag((byte[])null);
_decryptedInstructions = null;
_decryptedMessageId = -1;
_decryptedCertificate = null;
_decryptedExpiration = -1;
}
/**
@ -92,9 +93,9 @@ public class SourceRouteBlock extends DataStructureImpl {
public byte[] getTag() { return _tag; }
public void setTag(SessionTag tag) { setTag(tag.getData()); }
public void setTag(byte tag[]) {
if ( (tag != null) && (tag.length != SessionTag.BYTE_LENGTH) )
throw new IllegalArgumentException("Tag must be either null or 32 bytes");
_tag = tag;
if ( (tag != null) && (tag.length != SessionTag.BYTE_LENGTH) )
throw new IllegalArgumentException("Tag must be either null or 32 bytes");
_tag = tag;
}
/**
@ -126,100 +127,105 @@ public class SourceRouteBlock extends DataStructureImpl {
*
* @throws DataFormatException if the data is invalid or could not be encrypted
*/
public void setData(DeliveryInstructions instructions, long messageId, Certificate cert, long expiration, PublicKey replyThrough) throws DataFormatException {
try {
ByteArrayOutputStream baos = new ByteArrayOutputStream(64);
public void setData(I2PAppContext ctx, DeliveryInstructions instructions,
long messageId, Certificate cert, long expiration,
PublicKey replyThrough) throws DataFormatException {
try {
ByteArrayOutputStream baos = new ByteArrayOutputStream(64);
_decryptedInstructions = instructions;
_decryptedMessageId = messageId;
_decryptedCertificate = cert;
_decryptedExpiration = expiration;
instructions.writeBytes(baos);
DataHelper.writeLong(baos, 4, messageId);
cert.writeBytes(baos);
DataHelper.writeDate(baos, new Date(expiration));
_decryptedInstructions = instructions;
_decryptedMessageId = messageId;
_decryptedCertificate = cert;
_decryptedExpiration = expiration;
int paddedSize = 256;
SessionKey sessKey = null;
SessionTag tag = null;
if (instructions.getDelayRequested()) {
// always use a new key if we're delaying, since the reply block may not be used within the
// window of a session
sessKey = KeyGenerator.getInstance().generateSessionKey();
tag = null;
_log.debug("Delay requested - creating a new session key");
} else {
sessKey = SessionKeyManager.getInstance().getCurrentKey(replyThrough);
if (sessKey == null) {
sessKey = KeyGenerator.getInstance().generateSessionKey();
tag = null;
_log.debug("No delay requested, but no session key is known");
} else {
tag = SessionKeyManager.getInstance().consumeNextAvailableTag(replyThrough, sessKey);
}
}
byte encData[] = ElGamalAESEngine.encrypt(baos.toByteArray(), replyThrough, sessKey, null, tag, paddedSize);
setData(encData);
} catch (IOException ioe) {
throw new DataFormatException("Error writing out the source route block data", ioe);
} catch (DataFormatException dfe) {
throw new DataFormatException("Error writing out the source route block data", dfe);
}
instructions.writeBytes(baos);
DataHelper.writeLong(baos, 4, messageId);
cert.writeBytes(baos);
DataHelper.writeDate(baos, new Date(expiration));
int paddedSize = 256;
SessionKey sessKey = null;
SessionTag tag = null;
if (instructions.getDelayRequested()) {
// always use a new key if we're delaying, since the reply block may not be used within the
// window of a session
sessKey = ctx.keyGenerator().generateSessionKey();
tag = null;
if (_log.shouldLog(Log.DEBUG))
_log.debug("Delay requested - creating a new session key");
} else {
sessKey = ctx.sessionKeyManager().getCurrentKey(replyThrough);
if (sessKey == null) {
sessKey = ctx.keyGenerator().generateSessionKey();
tag = null;
if (_log.shouldLog(Log.DEBUG))
_log.debug("No delay requested, but no session key is known");
} else {
tag = ctx.sessionKeyManager().consumeNextAvailableTag(replyThrough, sessKey);
}
}
byte encData[] = ctx.elGamalAESEngine().encrypt(baos.toByteArray(), replyThrough,
sessKey, null, tag, paddedSize);
setData(encData);
} catch (IOException ioe) {
throw new DataFormatException("Error writing out the source route block data", ioe);
} catch (DataFormatException dfe) {
throw new DataFormatException("Error writing out the source route block data", dfe);
}
}
public void readBytes(InputStream in) throws DataFormatException, IOException {
_router = new Hash();
_router.readBytes(in);
int size = (int)DataHelper.readLong(in, 2);
_data = new byte[size];
int read = read(in, _data);
if (read != _data.length)
throw new DataFormatException("Incorrect # of bytes read for source route block: " + read);
_key = new SessionKey();
_key.readBytes(in);
_tag = new byte[32];
read = read(in, _tag);
if (read != _tag.length)
throw new DataFormatException("Incorrect # of bytes read for session tag: " + read);
_router = new Hash();
_router.readBytes(in);
int size = (int)DataHelper.readLong(in, 2);
_data = new byte[size];
int read = read(in, _data);
if (read != _data.length)
throw new DataFormatException("Incorrect # of bytes read for source route block: " + read);
_key = new SessionKey();
_key.readBytes(in);
_tag = new byte[32];
read = read(in, _tag);
if (read != _tag.length)
throw new DataFormatException("Incorrect # of bytes read for session tag: " + read);
}
public void writeBytes(OutputStream out) throws DataFormatException, IOException {
if ( (_router == null) || (_data == null) || (_key == null) || (_tag == null) || (_tag.length != 32) )
throw new DataFormatException("Insufficient data to write");
_router.writeBytes(out);
DataHelper.writeLong(out, 2, _data.length);
out.write(_data);
_key.writeBytes(out);
out.write(_tag);
throw new DataFormatException("Insufficient data to write");
_router.writeBytes(out);
DataHelper.writeLong(out, 2, _data.length);
out.write(_data);
_key.writeBytes(out);
out.write(_tag);
}
public boolean equals(Object obj) {
if ( (obj == null) || !(obj instanceof SourceRouteBlock))
return false;
SourceRouteBlock block = (SourceRouteBlock)obj;
return DataHelper.eq(getRouter(), block.getRouter()) &&
DataHelper.eq(getData(), block.getData()) &&
DataHelper.eq(getKey(), block.getKey()) &&
DataHelper.eq(getTag(), block.getTag());
SourceRouteBlock block = (SourceRouteBlock)obj;
return DataHelper.eq(getRouter(), block.getRouter()) &&
DataHelper.eq(getData(), block.getData()) &&
DataHelper.eq(getKey(), block.getKey()) &&
DataHelper.eq(getTag(), block.getTag());
}
public int hashCode() {
return DataHelper.hashCode(getRouter()) +
DataHelper.hashCode(getData()) +
DataHelper.hashCode(getKey()) +
DataHelper.hashCode(getTag());
DataHelper.hashCode(getData()) +
DataHelper.hashCode(getKey()) +
DataHelper.hashCode(getTag());
}
public String toString() {
StringBuffer buf = new StringBuffer(128);
StringBuffer buf = new StringBuffer(128);
buf.append("[SourceRouteBlock: ");
buf.append("\n\tRouter: ").append(getRouter());
buf.append("\n\tData: ").append(DataHelper.toString(getData(), getData().length));
buf.append("\n\tTag: ").append(DataHelper.toString(getTag(), (getTag() != null ? getTag().length : 0)));
buf.append("\n\tKey: ").append(getKey());
buf.append("]");
return buf.toString();
buf.append("\n\tRouter: ").append(getRouter());
buf.append("\n\tData: ").append(DataHelper.toString(getData(), getData().length));
buf.append("\n\tTag: ").append(DataHelper.toString(getTag(), (getTag() != null ? getTag().length : 0)));
buf.append("\n\tKey: ").append(getKey());
buf.append("]");
return buf.toString();
}
}

View File

@ -19,6 +19,7 @@ import net.i2p.data.DataFormatException;
import net.i2p.data.DataHelper;
import net.i2p.data.PrivateKey;
import net.i2p.util.Log;
import net.i2p.I2PAppContext;
/**
* Defines a message directed by a source route block to deliver a message to an
@ -35,14 +36,17 @@ public class SourceRouteReplyMessage extends I2NPMessageImpl {
private long _decryptedMessageId;
private Certificate _decryptedCertificate;
private long _decryptedExpiration;
private I2NPMessageHandler _handler;
public SourceRouteReplyMessage() {
_encryptedHeader = null;
_message = null;
_decryptedInstructions = null;
_decryptedMessageId = -1;
_decryptedCertificate = null;
_decryptedExpiration = -1;
public SourceRouteReplyMessage(I2PAppContext context) {
super(context);
_handler = new I2NPMessageHandler(context);
_encryptedHeader = null;
_message = null;
_decryptedInstructions = null;
_decryptedMessageId = -1;
_decryptedCertificate = null;
_decryptedExpiration = -1;
}
/**
@ -77,54 +81,56 @@ public class SourceRouteReplyMessage extends I2NPMessageImpl {
* @throws DataFormatException if the decryption fails or if the data is somehow malformed
*/
public void decryptHeader(PrivateKey key) throws DataFormatException {
if ( (_encryptedHeader == null) || (_encryptedHeader.length <= 0) )
throw new DataFormatException("No header to decrypt");
byte decr[] = ElGamalAESEngine.decrypt(_encryptedHeader, key);
if (decr == null)
throw new DataFormatException("Decrypted data is null");
try {
ByteArrayInputStream bais = new ByteArrayInputStream(decr);
_decryptedInstructions = new DeliveryInstructions();
_decryptedInstructions.readBytes(bais);
_decryptedMessageId = DataHelper.readLong(bais, 4);
_decryptedCertificate = new Certificate();
_decryptedCertificate.readBytes(bais);
_decryptedExpiration = DataHelper.readDate(bais).getTime();
if ( (_encryptedHeader == null) || (_encryptedHeader.length <= 0) )
throw new DataFormatException("No header to decrypt");
} catch (IOException ioe) {
throw new DataFormatException("Error reading the source route reply header", ioe);
} catch (DataFormatException dfe) {
throw new DataFormatException("Error reading the source route reply header", dfe);
}
byte decr[] = _context.elGamalAESEngine().decrypt(_encryptedHeader, key);
if (decr == null)
throw new DataFormatException("Decrypted data is null");
try {
ByteArrayInputStream bais = new ByteArrayInputStream(decr);
_decryptedInstructions = new DeliveryInstructions();
_decryptedInstructions.readBytes(bais);
_decryptedMessageId = DataHelper.readLong(bais, 4);
_decryptedCertificate = new Certificate();
_decryptedCertificate.readBytes(bais);
_decryptedExpiration = DataHelper.readDate(bais).getTime();
} catch (IOException ioe) {
throw new DataFormatException("Error reading the source route reply header", ioe);
} catch (DataFormatException dfe) {
throw new DataFormatException("Error reading the source route reply header", dfe);
}
}
public void readMessage(InputStream in, int type) throws I2NPMessageException, IOException {
if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message");
if (type != MESSAGE_TYPE)
throw new I2NPMessageException("Message type is incorrect for this message");
try {
int headerSize = (int)DataHelper.readLong(in, 2);
_encryptedHeader = new byte[headerSize];
int read = read(in, _encryptedHeader);
if (read != headerSize)
throw new DataFormatException("Not enough bytes to read the header (read = " + read + ", required = " + headerSize + ")");
_message = new I2NPMessageHandler().readMessage(in);
int headerSize = (int)DataHelper.readLong(in, 2);
_encryptedHeader = new byte[headerSize];
int read = read(in, _encryptedHeader);
if (read != headerSize)
throw new DataFormatException("Not enough bytes to read the header (read = " + read
+ ", required = " + headerSize + ")");
_message = _handler.readMessage(in);
} catch (DataFormatException dfe) {
throw new I2NPMessageException("Unable to load the message data", dfe);
}
}
protected byte[] writeMessage() throws I2NPMessageException, IOException {
if ( (_encryptedHeader == null) || (_message == null) )
throw new I2NPMessageException("Not enough data to write out");
ByteArrayOutputStream os = new ByteArrayOutputStream(1024);
if ( (_encryptedHeader == null) || (_message == null) )
throw new I2NPMessageException("Not enough data to write out");
ByteArrayOutputStream os = new ByteArrayOutputStream(1024);
try {
DataHelper.writeLong(os, 2, _encryptedHeader.length);
os.write(_encryptedHeader);
_message.writeBytes(os);
DataHelper.writeLong(os, 2, _encryptedHeader.length);
os.write(_encryptedHeader);
_message.writeBytes(os);
} catch (DataFormatException dfe) {
throw new I2NPMessageException("Error writing out the message data", dfe);
}
@ -134,15 +140,15 @@ public class SourceRouteReplyMessage extends I2NPMessageImpl {
public int getType() { return MESSAGE_TYPE; }
public int hashCode() {
return DataHelper.hashCode(_encryptedHeader) +
DataHelper.hashCode(_message);
return DataHelper.hashCode(_encryptedHeader) +
DataHelper.hashCode(_message);
}
public boolean equals(Object object) {
if ( (object != null) && (object instanceof SourceRouteReplyMessage) ) {
SourceRouteReplyMessage msg = (SourceRouteReplyMessage)object;
return DataHelper.eq(_message,msg._message) &&
DataHelper.eq(_encryptedHeader,msg._encryptedHeader);
DataHelper.eq(_encryptedHeader,msg._encryptedHeader);
} else {
return false;
}

View File

@ -1,9 +1,9 @@
package net.i2p.data.i2np;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
@ -18,6 +18,7 @@ import net.i2p.data.DataHelper;
import net.i2p.data.Hash;
import net.i2p.data.TunnelId;
import net.i2p.util.Log;
import net.i2p.I2PAppContext;
/**
* Defines the message sent to a router to request that it participate in a
@ -52,23 +53,24 @@ public class TunnelCreateMessage extends I2NPMessageImpl {
private final static long FLAG_DUMMY = 1 << 7;
private final static long FLAG_REORDER = 1 << 6;
public TunnelCreateMessage() {
setParticipantType(-1);
setNextRouter(null);
setTunnelId(null);
setTunnelDurationSeconds(-1);
setConfigurationKey(null);
setMaxPeakMessagesPerMin(-1);
setMaxAvgMessagesPerMin(-1);
setMaxPeakBytesPerMin(-1);
setMaxAvgBytesPerMin(-1);
setIncludeDummyTraffic(false);
setReorderMessages(false);
setVerificationPublicKey(null);
setVerificationPrivateKey(null);
setTunnelKey(null);
setCertificate(null);
setReplyBlock(null);
public TunnelCreateMessage(I2PAppContext context) {
super(context);
setParticipantType(-1);
setNextRouter(null);
setTunnelId(null);
setTunnelDurationSeconds(-1);
setConfigurationKey(null);
setMaxPeakMessagesPerMin(-1);
setMaxAvgMessagesPerMin(-1);
setMaxPeakBytesPerMin(-1);
setMaxAvgBytesPerMin(-1);
setIncludeDummyTraffic(false);
setReorderMessages(false);
setVerificationPublicKey(null);
setVerificationPrivateKey(null);
setTunnelKey(null);
setCertificate(null);
setReplyBlock(null);
}
public void setParticipantType(int type) { _participantType = type; }
@ -105,41 +107,41 @@ public class TunnelCreateMessage extends I2NPMessageImpl {
public SourceRouteBlock getReplyBlock() { return _replyBlock; }
public void readMessage(InputStream in, int type) throws I2NPMessageException, IOException {
if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message");
if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message");
try {
_participantType = (int)DataHelper.readLong(in, 1);
if (_participantType != PARTICIPANT_TYPE_ENDPOINT) {
_nextRouter = new Hash();
_nextRouter.readBytes(in);
}
_tunnelId = new TunnelId();
_tunnelId.readBytes(in);
_tunnelDuration = DataHelper.readLong(in, 4);
_configKey = new TunnelConfigurationSessionKey();
_configKey.readBytes(in);
_maxPeakMessagesPerMin = DataHelper.readLong(in, 4);
_maxAvgMessagesPerMin = DataHelper.readLong(in, 4);
_maxPeakBytesPerMin = DataHelper.readLong(in, 4);
_maxAvgBytesPerMin = DataHelper.readLong(in, 4);
int flags = (int)DataHelper.readLong(in, 1);
_includeDummyTraffic = flagsIncludeDummy(flags);
_reorderMessages = flagsReorder(flags);
_verificationPubKey = new TunnelSigningPublicKey();
_verificationPubKey.readBytes(in);
if (_participantType == PARTICIPANT_TYPE_GATEWAY) {
_verificationPrivKey = new TunnelSigningPrivateKey();
_verificationPrivKey.readBytes(in);
}
if ( (_participantType == PARTICIPANT_TYPE_ENDPOINT) || (_participantType == PARTICIPANT_TYPE_GATEWAY) ) {
_tunnelKey = new TunnelSessionKey();
_tunnelKey.readBytes(in);
}
_certificate = new Certificate();
_certificate.readBytes(in);
_replyBlock = new SourceRouteBlock();
_replyBlock.readBytes(in);
_participantType = (int)DataHelper.readLong(in, 1);
if (_participantType != PARTICIPANT_TYPE_ENDPOINT) {
_nextRouter = new Hash();
_nextRouter.readBytes(in);
}
_tunnelId = new TunnelId();
_tunnelId.readBytes(in);
_tunnelDuration = DataHelper.readLong(in, 4);
_configKey = new TunnelConfigurationSessionKey();
_configKey.readBytes(in);
_maxPeakMessagesPerMin = DataHelper.readLong(in, 4);
_maxAvgMessagesPerMin = DataHelper.readLong(in, 4);
_maxPeakBytesPerMin = DataHelper.readLong(in, 4);
_maxAvgBytesPerMin = DataHelper.readLong(in, 4);
int flags = (int)DataHelper.readLong(in, 1);
_includeDummyTraffic = flagsIncludeDummy(flags);
_reorderMessages = flagsReorder(flags);
_verificationPubKey = new TunnelSigningPublicKey();
_verificationPubKey.readBytes(in);
if (_participantType == PARTICIPANT_TYPE_GATEWAY) {
_verificationPrivKey = new TunnelSigningPrivateKey();
_verificationPrivKey.readBytes(in);
}
if ( (_participantType == PARTICIPANT_TYPE_ENDPOINT) || (_participantType == PARTICIPANT_TYPE_GATEWAY) ) {
_tunnelKey = new TunnelSessionKey();
_tunnelKey.readBytes(in);
}
_certificate = new Certificate();
_certificate.readBytes(in);
_replyBlock = new SourceRouteBlock();
_replyBlock.readBytes(in);
} catch (DataFormatException dfe) {
throw new I2NPMessageException("Unable to load the message data", dfe);
}
@ -148,146 +150,99 @@ public class TunnelCreateMessage extends I2NPMessageImpl {
protected byte[] writeMessage() throws I2NPMessageException, IOException {
ByteArrayOutputStream os = new ByteArrayOutputStream(32);
try {
DataHelper.writeLong(os, 1, _participantType);
if (_participantType != PARTICIPANT_TYPE_ENDPOINT) {
_nextRouter.writeBytes(os);
}
_tunnelId.writeBytes(os);
DataHelper.writeLong(os, 4, _tunnelDuration);
_configKey.writeBytes(os);
DataHelper.writeLong(os, 4, _maxPeakMessagesPerMin);
DataHelper.writeLong(os, 4, _maxAvgMessagesPerMin);
DataHelper.writeLong(os, 4, _maxPeakBytesPerMin);
DataHelper.writeLong(os, 4, _maxAvgBytesPerMin);
long flags = getFlags();
DataHelper.writeLong(os, 1, flags);
_verificationPubKey.writeBytes(os);
if (_participantType == PARTICIPANT_TYPE_GATEWAY) {
_verificationPrivKey.writeBytes(os);
}
if ( (_participantType == PARTICIPANT_TYPE_ENDPOINT) || (_participantType == PARTICIPANT_TYPE_GATEWAY) ) {
_tunnelKey.writeBytes(os);
}
_certificate.writeBytes(os);
_replyBlock.writeBytes(os);
DataHelper.writeLong(os, 1, _participantType);
if (_participantType != PARTICIPANT_TYPE_ENDPOINT) {
_nextRouter.writeBytes(os);
}
_tunnelId.writeBytes(os);
DataHelper.writeLong(os, 4, _tunnelDuration);
_configKey.writeBytes(os);
DataHelper.writeLong(os, 4, _maxPeakMessagesPerMin);
DataHelper.writeLong(os, 4, _maxAvgMessagesPerMin);
DataHelper.writeLong(os, 4, _maxPeakBytesPerMin);
DataHelper.writeLong(os, 4, _maxAvgBytesPerMin);
long flags = getFlags();
DataHelper.writeLong(os, 1, flags);
_verificationPubKey.writeBytes(os);
if (_participantType == PARTICIPANT_TYPE_GATEWAY) {
_verificationPrivKey.writeBytes(os);
}
if ( (_participantType == PARTICIPANT_TYPE_ENDPOINT) || (_participantType == PARTICIPANT_TYPE_GATEWAY) ) {
_tunnelKey.writeBytes(os);
}
_certificate.writeBytes(os);
_replyBlock.writeBytes(os);
} catch (Throwable t) {
throw new I2NPMessageException("Error writing out the message data", t);
}
/*
try {
DataHelper.writeLong(os, 1, _participantType);
if (_participantType != PARTICIPANT_TYPE_ENDPOINT) {
if (_nextRouter == null)
throw new I2NPMessageException("Next router is not defined");
_nextRouter.writeBytes(os);
}
if (_tunnelId == null)
throw new I2NPMessageException("Tunnel ID is not defined");
_tunnelId.writeBytes(os);
if (_tunnelDuration < 0)
throw new I2NPMessageException("Tunnel duration is negative");
DataHelper.writeLong(os, 4, _tunnelDuration);
if (_configKey == null)
throw new I2NPMessageException("Configuration key is not defined");
_configKey.writeBytes(os);
if ( (_maxPeakMessagesPerMin < 0) || (_maxAvgMessagesPerMin < 0) ||
(_maxAvgMessagesPerMin < 0) || (_maxAvgBytesPerMin < 0) )
throw new I2NPMessageException("Negative limits defined");
long flags = getFlags();
DataHelper.writeLong(os, 1, flags);
if (_verificationPubKey == null)
throw new I2NPMessageException("Verification public key is not defined");
_verificationPubKey.writeBytes(os);
if (_participantType == PARTICIPANT_TYPE_GATEWAY) {
if (_verificationPrivKey == null)
throw new I2NPMessageException("Verification private key is needed and not defined");
_verificationPrivKey.writeBytes(os);
}
if ( (_participantType == PARTICIPANT_TYPE_ENDPOINT) || (_participantType == PARTICIPANT_TYPE_GATEWAY) ) {
if (_tunnelKey == null)
throw new I2NPMessageException("Tunnel key is needed and not defined");
_tunnelKey.writeBytes(os);
}
if (_certificate == null)
throw new I2NPMessageException("Certificate is not defined");
_certificate.writeBytes(os);
if (_replyBlock == null)
throw new I2NPMessageException("Reply block not defined");
_replyBlock.writeBytes(os);
} catch (DataFormatException dfe) {
throw new I2NPMessageException("Error writing out the message data", dfe);
}
*/
return os.toByteArray();
}
private boolean flagsIncludeDummy(long flags) {
return (0 != (flags & FLAG_DUMMY));
return (0 != (flags & FLAG_DUMMY));
}
private boolean flagsReorder(long flags) {
return (0 != (flags & FLAG_REORDER));
return (0 != (flags & FLAG_REORDER));
}
private long getFlags() {
long val = 0L;
if (getIncludeDummyTraffic())
val = val | FLAG_DUMMY;
if (getReorderMessages())
val = val | FLAG_REORDER;
return val;
long val = 0L;
if (getIncludeDummyTraffic())
val = val | FLAG_DUMMY;
if (getReorderMessages())
val = val | FLAG_REORDER;
return val;
}
public int getType() { return MESSAGE_TYPE; }
public int hashCode() {
return (int)(DataHelper.hashCode(getCertificate()) +
DataHelper.hashCode(getConfigurationKey()) +
DataHelper.hashCode(getNextRouter()) +
DataHelper.hashCode(getReplyBlock()) +
DataHelper.hashCode(getTunnelId()) +
DataHelper.hashCode(getTunnelKey()) +
DataHelper.hashCode(getVerificationPrivateKey()) +
DataHelper.hashCode(getVerificationPublicKey()) +
(getIncludeDummyTraffic() ? 1 : 0) +
getMaxAvgBytesPerMin() +
getMaxAvgMessagesPerMin() +
getMaxPeakBytesPerMin() +
getMaxPeakMessagesPerMin() +
getParticipantType() +
(getReorderMessages() ? 1 : 0) +
getTunnelDurationSeconds());
return (int)(DataHelper.hashCode(getCertificate()) +
DataHelper.hashCode(getConfigurationKey()) +
DataHelper.hashCode(getNextRouter()) +
DataHelper.hashCode(getReplyBlock()) +
DataHelper.hashCode(getTunnelId()) +
DataHelper.hashCode(getTunnelKey()) +
DataHelper.hashCode(getVerificationPrivateKey()) +
DataHelper.hashCode(getVerificationPublicKey()) +
(getIncludeDummyTraffic() ? 1 : 0) +
getMaxAvgBytesPerMin() +
getMaxAvgMessagesPerMin() +
getMaxPeakBytesPerMin() +
getMaxPeakMessagesPerMin() +
getParticipantType() +
(getReorderMessages() ? 1 : 0) +
getTunnelDurationSeconds());
}
public boolean equals(Object object) {
if ( (object != null) && (object instanceof TunnelCreateMessage) ) {
TunnelCreateMessage msg = (TunnelCreateMessage)object;
return DataHelper.eq(getCertificate(), msg.getCertificate()) &&
DataHelper.eq(getConfigurationKey(), msg.getConfigurationKey()) &&
DataHelper.eq(getNextRouter(), msg.getNextRouter()) &&
DataHelper.eq(getReplyBlock(), msg.getReplyBlock()) &&
DataHelper.eq(getTunnelId(), msg.getTunnelId()) &&
DataHelper.eq(getTunnelKey(), msg.getTunnelKey()) &&
DataHelper.eq(getVerificationPrivateKey(), msg.getVerificationPrivateKey()) &&
DataHelper.eq(getVerificationPublicKey(), msg.getVerificationPublicKey()) &&
(getIncludeDummyTraffic() == msg.getIncludeDummyTraffic()) &&
(getMaxAvgBytesPerMin() == msg.getMaxAvgBytesPerMin()) &&
(getMaxAvgMessagesPerMin() == msg.getMaxAvgMessagesPerMin()) &&
(getMaxPeakBytesPerMin() == msg.getMaxPeakBytesPerMin()) &&
(getMaxPeakMessagesPerMin() == msg.getMaxPeakMessagesPerMin()) &&
(getParticipantType() == msg.getParticipantType()) &&
(getReorderMessages() == msg.getReorderMessages()) &&
(getTunnelDurationSeconds() == msg.getTunnelDurationSeconds());
return DataHelper.eq(getCertificate(), msg.getCertificate()) &&
DataHelper.eq(getConfigurationKey(), msg.getConfigurationKey()) &&
DataHelper.eq(getNextRouter(), msg.getNextRouter()) &&
DataHelper.eq(getReplyBlock(), msg.getReplyBlock()) &&
DataHelper.eq(getTunnelId(), msg.getTunnelId()) &&
DataHelper.eq(getTunnelKey(), msg.getTunnelKey()) &&
DataHelper.eq(getVerificationPrivateKey(), msg.getVerificationPrivateKey()) &&
DataHelper.eq(getVerificationPublicKey(), msg.getVerificationPublicKey()) &&
(getIncludeDummyTraffic() == msg.getIncludeDummyTraffic()) &&
(getMaxAvgBytesPerMin() == msg.getMaxAvgBytesPerMin()) &&
(getMaxAvgMessagesPerMin() == msg.getMaxAvgMessagesPerMin()) &&
(getMaxPeakBytesPerMin() == msg.getMaxPeakBytesPerMin()) &&
(getMaxPeakMessagesPerMin() == msg.getMaxPeakMessagesPerMin()) &&
(getParticipantType() == msg.getParticipantType()) &&
(getReorderMessages() == msg.getReorderMessages()) &&
(getTunnelDurationSeconds() == msg.getTunnelDurationSeconds());
} else {
return false;
}
}
public String toString() {
public String toString() {
StringBuffer buf = new StringBuffer();
buf.append("[TunnelCreateMessage: ");
buf.append("\n\tParticipant Type: ").append(getParticipantType());

View File

@ -1,9 +1,9 @@
package net.i2p.data.i2np;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
@ -17,9 +17,10 @@ import net.i2p.data.DataHelper;
import net.i2p.data.Hash;
import net.i2p.data.TunnelId;
import net.i2p.util.Log;
import net.i2p.I2PAppContext;
/**
* Defines the message a router sends to another router in reply to a
* Defines the message a router sends to another router in reply to a
* TunnelCreateMessage
*
* @author jrandom
@ -37,10 +38,11 @@ public class TunnelCreateStatusMessage extends I2NPMessageImpl {
public final static int STATUS_FAILED_CERTIFICATE = 3;
public final static int STATUS_FAILED_DELETED = 100;
public TunnelCreateStatusMessage() {
setTunnelId(null);
setStatus(-1);
setFromHash(null);
public TunnelCreateStatusMessage(I2PAppContext context) {
super(context);
setTunnelId(null);
setStatus(-1);
setFromHash(null);
}
public TunnelId getTunnelId() { return _tunnelId; }
@ -56,26 +58,26 @@ public class TunnelCreateStatusMessage extends I2NPMessageImpl {
public void setFromHash(Hash from) { _from = from; }
public void readMessage(InputStream in, int type) throws I2NPMessageException, IOException {
if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message");
if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message");
try {
_tunnelId = new TunnelId();
_tunnelId.readBytes(in);
_status = (int)DataHelper.readLong(in, 1);
_from = new Hash();
_from.readBytes(in);
_tunnelId = new TunnelId();
_tunnelId.readBytes(in);
_status = (int)DataHelper.readLong(in, 1);
_from = new Hash();
_from.readBytes(in);
} catch (DataFormatException dfe) {
throw new I2NPMessageException("Unable to load the message data", dfe);
}
}
protected byte[] writeMessage() throws I2NPMessageException, IOException {
if ( (_tunnelId == null) || (_from == null) ) throw new I2NPMessageException("Not enough data to write out");
if ( (_tunnelId == null) || (_from == null) ) throw new I2NPMessageException("Not enough data to write out");
ByteArrayOutputStream os = new ByteArrayOutputStream(32);
try {
_tunnelId.writeBytes(os);
DataHelper.writeLong(os, 1, (_status < 0 ? 255 : _status));
_from.writeBytes(os);
_tunnelId.writeBytes(os);
DataHelper.writeLong(os, 1, (_status < 0 ? 255 : _status));
_from.writeBytes(os);
} catch (DataFormatException dfe) {
throw new I2NPMessageException("Error writing out the message data", dfe);
}
@ -85,23 +87,23 @@ public class TunnelCreateStatusMessage extends I2NPMessageImpl {
public int getType() { return MESSAGE_TYPE; }
public int hashCode() {
return DataHelper.hashCode(getTunnelId()) +
getStatus() +
DataHelper.hashCode(getFromHash());
return DataHelper.hashCode(getTunnelId()) +
getStatus() +
DataHelper.hashCode(getFromHash());
}
public boolean equals(Object object) {
if ( (object != null) && (object instanceof TunnelCreateStatusMessage) ) {
TunnelCreateStatusMessage msg = (TunnelCreateStatusMessage)object;
return DataHelper.eq(getTunnelId(),msg.getTunnelId()) &&
DataHelper.eq(getFromHash(),msg.getFromHash()) &&
(getStatus() == msg.getStatus());
DataHelper.eq(getFromHash(),msg.getFromHash()) &&
(getStatus() == msg.getStatus());
} else {
return false;
}
}
public String toString() {
public String toString() {
StringBuffer buf = new StringBuffer();
buf.append("[TunnelCreateStatusMessage: ");
buf.append("\n\tTunnel ID: ").append(getTunnelId());

View File

@ -1,9 +1,9 @@
package net.i2p.data.i2np;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
@ -16,6 +16,7 @@ import net.i2p.data.DataFormatException;
import net.i2p.data.DataHelper;
import net.i2p.data.TunnelId;
import net.i2p.util.Log;
import net.i2p.I2PAppContext;
/**
* Defines the message sent between routers for tunnel delivery
@ -34,11 +35,12 @@ public class TunnelMessage extends I2NPMessageImpl {
private final static int FLAG_INCLUDESTRUCTURE = 0;
private final static int FLAG_DONT_INCLUDESTRUCTURE = 1;
public TunnelMessage() {
setTunnelId(null);
setData(null);
setVerificationStructure(null);
setEncryptedDeliveryInstructions(null);
public TunnelMessage(I2PAppContext context) {
super(context);
setTunnelId(null);
setData(null);
setVerificationStructure(null);
setEncryptedDeliveryInstructions(null);
}
public TunnelId getTunnelId() { return _tunnelId; }
@ -54,85 +56,85 @@ public class TunnelMessage extends I2NPMessageImpl {
public void setEncryptedDeliveryInstructions(byte instructions[]) { _encryptedInstructions = instructions; }
public void readMessage(InputStream in, int type) throws I2NPMessageException, IOException {
if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message");
if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message");
try {
_tunnelId = new TunnelId();
_tunnelId.readBytes(in);
_log.debug("Read tunnel message for tunnel " + _tunnelId);
_size = DataHelper.readLong(in, 4);
_log.debug("Read tunnel message size: " + _size);
if (_size < 0) throw new I2NPMessageException("Invalid size in the structure: " + _size);
_data = new byte[(int)_size];
int read = read(in, _data);
if (read != _size)
throw new I2NPMessageException("Incorrect number of bytes read (" + read + ", expected " + _size);
int includeVerification = (int)DataHelper.readLong(in, 1);
if (includeVerification == FLAG_INCLUDESTRUCTURE) {
_verification = new TunnelVerificationStructure();
_verification.readBytes(in);
int len = (int)DataHelper.readLong(in, 2);
_encryptedInstructions = new byte[len];
read = read(in, _encryptedInstructions);
if (read != len)
throw new I2NPMessageException("Incorrect number of bytes read for instructions (" + read + ", expected " + len + ")");
}
_tunnelId = new TunnelId();
_tunnelId.readBytes(in);
_log.debug("Read tunnel message for tunnel " + _tunnelId);
_size = DataHelper.readLong(in, 4);
_log.debug("Read tunnel message size: " + _size);
if (_size < 0) throw new I2NPMessageException("Invalid size in the structure: " + _size);
_data = new byte[(int)_size];
int read = read(in, _data);
if (read != _size)
throw new I2NPMessageException("Incorrect number of bytes read (" + read + ", expected " + _size);
int includeVerification = (int)DataHelper.readLong(in, 1);
if (includeVerification == FLAG_INCLUDESTRUCTURE) {
_verification = new TunnelVerificationStructure();
_verification.readBytes(in);
int len = (int)DataHelper.readLong(in, 2);
_encryptedInstructions = new byte[len];
read = read(in, _encryptedInstructions);
if (read != len)
throw new I2NPMessageException("Incorrect number of bytes read for instructions (" + read + ", expected " + len + ")");
}
} catch (DataFormatException dfe) {
throw new I2NPMessageException("Unable to load the message data", dfe);
}
}
protected byte[] writeMessage() throws I2NPMessageException, IOException {
if ( (_tunnelId == null) || (_data == null) || (_data.length <= 0) )
throw new I2NPMessageException("Not enough data to write out");
if ( (_tunnelId == null) || (_data == null) || (_data.length <= 0) )
throw new I2NPMessageException("Not enough data to write out");
ByteArrayOutputStream os = new ByteArrayOutputStream(32);
try {
_tunnelId.writeBytes(os);
_log.debug("Writing tunnel message for tunnel " + _tunnelId);
DataHelper.writeLong(os, 4, _data.length);
_log.debug("Writing tunnel message length: " + _data.length);
os.write(_data);
_log.debug("Writing tunnel message data");
if ( (_verification == null) || (_encryptedInstructions == null) ) {
DataHelper.writeLong(os, 1, FLAG_DONT_INCLUDESTRUCTURE);
_log.debug("Writing DontIncludeStructure flag");
} else {
DataHelper.writeLong(os, 1, FLAG_INCLUDESTRUCTURE);
_log.debug("Writing IncludeStructure flag, then the verification structure, then the E(instr).length [" + _encryptedInstructions.length + "], then the E(instr)");
_verification.writeBytes(os);
DataHelper.writeLong(os, 2, _encryptedInstructions.length);
os.write(_encryptedInstructions);
}
_tunnelId.writeBytes(os);
_log.debug("Writing tunnel message for tunnel " + _tunnelId);
DataHelper.writeLong(os, 4, _data.length);
_log.debug("Writing tunnel message length: " + _data.length);
os.write(_data);
_log.debug("Writing tunnel message data");
if ( (_verification == null) || (_encryptedInstructions == null) ) {
DataHelper.writeLong(os, 1, FLAG_DONT_INCLUDESTRUCTURE);
_log.debug("Writing DontIncludeStructure flag");
} else {
DataHelper.writeLong(os, 1, FLAG_INCLUDESTRUCTURE);
_log.debug("Writing IncludeStructure flag, then the verification structure, then the E(instr).length [" + _encryptedInstructions.length + "], then the E(instr)");
_verification.writeBytes(os);
DataHelper.writeLong(os, 2, _encryptedInstructions.length);
os.write(_encryptedInstructions);
}
} catch (DataFormatException dfe) {
throw new I2NPMessageException("Error writing out the message data", dfe);
}
byte rv[] = os.toByteArray();
_log.debug("Overall data being written: " + rv.length);
byte rv[] = os.toByteArray();
_log.debug("Overall data being written: " + rv.length);
return rv;
}
public int getType() { return MESSAGE_TYPE; }
public int hashCode() {
return DataHelper.hashCode(getTunnelId()) +
DataHelper.hashCode(_data) +
DataHelper.hashCode(getVerificationStructure()) +
DataHelper.hashCode(getEncryptedDeliveryInstructions());
return DataHelper.hashCode(getTunnelId()) +
DataHelper.hashCode(_data) +
DataHelper.hashCode(getVerificationStructure()) +
DataHelper.hashCode(getEncryptedDeliveryInstructions());
}
public boolean equals(Object object) {
if ( (object != null) && (object instanceof TunnelMessage) ) {
TunnelMessage msg = (TunnelMessage)object;
return DataHelper.eq(getTunnelId(),msg.getTunnelId()) &&
DataHelper.eq(getVerificationStructure(),msg.getVerificationStructure()) &&
DataHelper.eq(getData(),msg.getData()) &&
DataHelper.eq(getEncryptedDeliveryInstructions(), msg.getEncryptedDeliveryInstructions());
DataHelper.eq(getVerificationStructure(),msg.getVerificationStructure()) &&
DataHelper.eq(getData(),msg.getData()) &&
DataHelper.eq(getEncryptedDeliveryInstructions(), msg.getEncryptedDeliveryInstructions());
} else {
return false;
}
}
public String toString() {
public String toString() {
StringBuffer buf = new StringBuffer();
buf.append("[TunnelMessage: ");
buf.append("\n\tTunnel ID: ").append(getTunnelId());

View File

@ -1,9 +1,9 @@
package net.i2p.data.i2np;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
@ -21,19 +21,19 @@ import net.i2p.data.Signature;
import net.i2p.data.SigningPrivateKey;
import net.i2p.data.SigningPublicKey;
import net.i2p.util.Log;
import net.i2p.router.RouterContext;
/**
*
* @author jrandom
*/
public class TunnelVerificationStructure extends DataStructureImpl {
private final static Log _log = new Log(TunnelVerificationStructure.class);
private Hash _msgHash;
private Signature _authSignature;
public TunnelVerificationStructure() {
setMessageHash(null);
setAuthorizationSignature(null);
public TunnelVerificationStructure() {
setMessageHash(null);
setAuthorizationSignature(null);
}
public Hash getMessageHash() { return _msgHash; }
@ -42,45 +42,45 @@ public class TunnelVerificationStructure extends DataStructureImpl {
public Signature getAuthorizationSignature() { return _authSignature; }
public void setAuthorizationSignature(Signature sig) { _authSignature = sig; }
public void sign(SigningPrivateKey key) {
if (_msgHash != null) {
Signature sig = DSAEngine.getInstance().sign(_msgHash.getData(), key);
setAuthorizationSignature(sig);
}
public void sign(RouterContext context, SigningPrivateKey key) {
if (_msgHash != null) {
Signature sig = context.dsa().sign(_msgHash.getData(), key);
setAuthorizationSignature(sig);
}
}
public boolean verifySignature(SigningPublicKey key) {
if (_msgHash == null) return false;
return DSAEngine.getInstance().verifySignature(_authSignature, _msgHash.getData(), key);
public boolean verifySignature(RouterContext context, SigningPublicKey key) {
if (_msgHash == null) return false;
return context.dsa().verifySignature(_authSignature, _msgHash.getData(), key);
}
public void readBytes(InputStream in) throws DataFormatException, IOException {
_msgHash = new Hash();
_msgHash.readBytes(in);
_authSignature = new Signature();
_authSignature.readBytes(in);
_msgHash = new Hash();
_msgHash.readBytes(in);
_authSignature = new Signature();
_authSignature.readBytes(in);
}
public void writeBytes(OutputStream out) throws DataFormatException, IOException {
if (_authSignature == null) {
_authSignature = new Signature();
_authSignature.setData(Signature.FAKE_SIGNATURE);
}
if (_authSignature == null) {
_authSignature = new Signature();
_authSignature.setData(Signature.FAKE_SIGNATURE);
}
if ( (_msgHash == null) || (_authSignature == null) ) throw new DataFormatException("Invalid data");
_msgHash.writeBytes(out);
_authSignature.writeBytes(out);
_msgHash.writeBytes(out);
_authSignature.writeBytes(out);
}
public boolean equals(Object obj) {
if ( (obj == null) || !(obj instanceof TunnelVerificationStructure))
return false;
TunnelVerificationStructure str = (TunnelVerificationStructure)obj;
return DataHelper.eq(getMessageHash(), str.getMessageHash()) &&
DataHelper.eq(getAuthorizationSignature(), str.getAuthorizationSignature());
TunnelVerificationStructure str = (TunnelVerificationStructure)obj;
return DataHelper.eq(getMessageHash(), str.getMessageHash()) &&
DataHelper.eq(getAuthorizationSignature(), str.getAuthorizationSignature());
}
public int hashCode() {
if ( (_msgHash == null) || (_authSignature == null) ) return 0;
return getMessageHash().hashCode() + getAuthorizationSignature().hashCode();
if ( (_msgHash == null) || (_authSignature == null) ) return 0;
return getMessageHash().hashCode() + getAuthorizationSignature().hashCode();
}
public String toString() {

View File

@ -21,8 +21,6 @@ import net.i2p.router.client.ClientManagerFacadeImpl;
* @author jrandom
*/
public abstract class ClientManagerFacade implements Service {
private static ClientManagerFacade _instance = new ClientManagerFacadeImpl();
public static ClientManagerFacade getInstance() { return _instance; }
/**
* Request that a particular client authorize the Leases contained in the
@ -74,16 +72,19 @@ public abstract class ClientManagerFacade implements Service {
}
class DummyClientManagerFacade extends ClientManagerFacade {
private RouterContext _context;
public DummyClientManagerFacade(RouterContext ctx) {
_context = ctx;
}
public boolean isLocal(Hash destHash) { return true; }
public boolean isLocal(Destination dest) { return true; }
public void reportAbuse(Destination dest, String reason, int severity) { }
public void messageReceived(ClientMessage msg) {}
public void requestLeaseSet(Destination dest, LeaseSet set, long timeout, Job onCreateJob, Job onFailedJob) {
JobQueue.getInstance().addJob(onFailedJob);
public void requestLeaseSet(Destination dest, LeaseSet set, long timeout,
Job onCreateJob, Job onFailedJob) {
_context.jobQueue().addJob(onFailedJob);
}
public void startup() {
//JobQueue.getInstance().addJob(new PollOutboundClientMessagesJob());
}
public void startup() {}
public void stopAcceptingClients() { }
public void shutdown() {}

View File

@ -25,15 +25,12 @@ import net.i2p.util.Log;
*
*/
public class ClientMessagePool {
private final static Log _log = new Log(ClientMessagePool.class);
private static ClientMessagePool _instance = new ClientMessagePool();
public static final ClientMessagePool getInstance() { return _instance; }
private List _inMessages;
private List _outMessages;
private Log _log;
private RouterContext _context;
private ClientMessagePool() {
_inMessages = new ArrayList();
_outMessages = new ArrayList();
public ClientMessagePool(RouterContext context) {
_context = context;
_log = _context.logManager().getLog(ClientMessagePool.class);
}
/**
@ -42,84 +39,13 @@ public class ClientMessagePool {
*
*/
public void add(ClientMessage msg) {
if ( (ClientManagerFacade.getInstance().isLocal(msg.getDestination())) ||
(ClientManagerFacade.getInstance().isLocal(msg.getDestinationHash())) ) {
_log.debug("Adding message for local delivery");
ClientManagerFacade.getInstance().messageReceived(msg);
//synchronized (_inMessages) {
// _inMessages.add(msg);
//}
} else {
_log.debug("Adding message for remote delivery");
//JobQueue.getInstance().addJob(new ProcessOutboundClientMessageJob(msg));
JobQueue.getInstance().addJob(new OutboundClientMessageJob(msg));
//synchronized (_outMessages) {
// _outMessages.add(msg);
//}
}
}
/**
* Retrieve the next locally destined message, or null if none are available.
*
*/
public ClientMessage getNextLocal() {
synchronized (_inMessages) {
if (_inMessages.size() <= 0) return null;
return (ClientMessage)_inMessages.remove(0);
}
}
/**
* Retrieve the next remotely destined message, or null if none are available.
*
*/
public ClientMessage getNextRemote() {
synchronized (_outMessages) {
if (_outMessages.size() <= 0) return null;
return (ClientMessage)_outMessages.remove(0);
}
}
/**
* Determine how many locally bound messages are in the pool
*
*/
public int getLocalCount() {
synchronized (_inMessages) {
return _inMessages.size();
}
}
/**
* Determine how many remotely bound messages are in the pool.
*
*/
public int getRemoteCount() {
synchronized (_outMessages) {
return _outMessages.size();
}
}
public void dumpPoolInfo() {
StringBuffer buf = new StringBuffer();
buf.append("\nDumping Client Message Pool. Local messages: ").append(getLocalCount()).append(" Remote messages: ").append(getRemoteCount()).append("\n");
buf.append("Inbound messages\n");
buf.append("----------------------------\n");
synchronized (_inMessages) {
for (Iterator iter = _inMessages.iterator(); iter.hasNext();) {
ClientMessage msg = (ClientMessage)iter.next();
buf.append(msg).append("\n\n");
}
}
buf.append("Outbound messages\n");
buf.append("----------------------------\n");
synchronized (_outMessages) {
for (Iterator iter = _outMessages.iterator(); iter.hasNext();) {
ClientMessage msg = (ClientMessage)iter.next();
buf.append(msg).append("\n\n");
}
}
_log.debug(buf.toString());
if ( (_context.clientManager().isLocal(msg.getDestination())) ||
(_context.clientManager().isLocal(msg.getDestinationHash())) ) {
_log.debug("Adding message for local delivery");
_context.clientManager().messageReceived(msg);
} else {
_log.debug("Adding message for remote delivery");
_context.jobQueue().addJob(new OutboundClientMessageJob(_context, msg));
}
}
}

View File

@ -11,20 +11,12 @@ package net.i2p.router;
import java.util.HashSet;
import java.util.Set;
import net.i2p.router.transport.CommSystemFacadeImpl;
/**
* Manages the communication subsystem between peers, including connections,
* listeners, transports, connection keys, etc.
*
*/
public abstract class CommSystemFacade implements Service {
private static CommSystemFacade _instance = new CommSystemFacadeImpl();
public static CommSystemFacade getInstance() { return _instance; }
// getAddresses
// rotateAddress(address)
public abstract void processMessage(OutNetMessage msg);
public String renderStatusHTML() { return ""; }

View File

@ -1,62 +0,0 @@
package net.i2p.router;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
import java.io.FileOutputStream;
import java.io.IOException;
import net.i2p.util.Log;
public class GenerateStatusConsoleJob extends JobImpl {
private final static Log _log = new Log(GenerateStatusConsoleJob.class);
private final static long REGENERATE_DELAY_MS = 60*1000; // once per minute update the console
public final static String CONFIG_CONSOLE_LOCATION = "routerConsoleFile";
public final static String DEFAULT_CONSOLE_LOCATION = "routerConsole.html";
public final static String PARAM_GENERATE_CONFIG_CONSOLE = "router.generateConsole";
public final static boolean DEFAULT_GENERATE_CONFIG_CONSOLE = true;
private boolean shouldGenerateConsole() {
String str = Router.getInstance().getConfigSetting(PARAM_GENERATE_CONFIG_CONSOLE);
if ( (str == null) || (str.trim().length() <= 0) )
return DEFAULT_GENERATE_CONFIG_CONSOLE;
if (Boolean.TRUE.toString().equalsIgnoreCase(str))
return true;
else
return false;
}
public String getName() { return "Generate Status Console"; }
public void runJob() {
if (shouldGenerateConsole()) {
String consoleHTML = Router.getInstance().renderStatusHTML();
writeConsole(consoleHTML);
}
requeue(REGENERATE_DELAY_MS);
}
private void writeConsole(String html) {
String loc = Router.getInstance().getConfigSetting(CONFIG_CONSOLE_LOCATION);
if (loc == null)
loc = DEFAULT_CONSOLE_LOCATION;
FileOutputStream fos = null;
try {
fos = new FileOutputStream(loc);
fos.write(html.getBytes());
fos.flush();
} catch (IOException ioe) {
_log.error("Error writing out the console", ioe);
} finally {
if (fos != null) try { fos.close(); } catch (IOException ioe) {}
}
}
}

View File

@ -26,17 +26,18 @@ import net.i2p.util.Log;
*
*/
public class InNetMessagePool {
private final static Log _log = new Log(InNetMessagePool.class);
private static InNetMessagePool _instance = new InNetMessagePool();
public final static InNetMessagePool getInstance() { return _instance; }
private Log _log;
private RouterContext _context;
private List _messages;
private Map _handlerJobBuilders;
private InNetMessagePool() {
public InNetMessagePool(RouterContext context) {
_context = context;
_messages = new ArrayList();
_handlerJobBuilders = new HashMap();
StatManager.getInstance().createRateStat("inNetPool.dropped", "How often do we drop a message", "InNetPool", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
StatManager.getInstance().createRateStat("inNetPool.duplicate", "How often do we receive a duplicate message", "InNetPool", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
_log = _context.logManager().getLog(InNetMessagePool.class);
_context.statManager().createRateStat("inNetPool.dropped", "How often do we drop a message", "InNetPool", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("inNetPool.duplicate", "How often do we receive a duplicate message", "InNetPool", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
}
public HandlerJobBuilder registerHandlerJobBuilder(int i2npMessageType, HandlerJobBuilder builder) {
@ -57,15 +58,15 @@ public class InNetMessagePool {
*/
public int add(InNetMessage msg) {
Date exp = msg.getMessage().getMessageExpiration();
boolean valid = MessageValidator.getInstance().validateMessage(msg.getMessage().getUniqueId(), exp.getTime());
boolean valid = _context.messageValidator().validateMessage(msg.getMessage().getUniqueId(), exp.getTime());
if (!valid) {
if (_log.shouldLog(Log.WARN))
_log.warn("Duplicate message received [" + msg.getMessage().getUniqueId()
+ " expiring on " + exp + "]: " + msg.getMessage().getClass().getName());
StatManager.getInstance().addRateData("inNetPool.dropped", 1, 0);
StatManager.getInstance().addRateData("inNetPool.duplicate", 1, 0);
MessageHistory.getInstance().droppedOtherMessage(msg.getMessage());
MessageHistory.getInstance().messageProcessingError(msg.getMessage().getUniqueId(),
_context.statManager().addRateData("inNetPool.dropped", 1, 0);
_context.statManager().addRateData("inNetPool.duplicate", 1, 0);
_context.messageHistory().droppedOtherMessage(msg.getMessage());
_context.messageHistory().messageProcessingError(msg.getMessage().getUniqueId(),
msg.getMessage().getClass().getName(),
"Duplicate/expired");
return -1;
@ -87,14 +88,14 @@ public class InNetMessagePool {
Job job = builder.createJob(msg.getMessage(), msg.getFromRouter(),
msg.getFromRouterHash(), msg.getReplyBlock());
if (job != null) {
JobQueue.getInstance().addJob(job);
_context.jobQueue().addJob(job);
synchronized (_messages) {
size = _messages.size();
}
}
}
List origMessages = OutboundMessageRegistry.getInstance().getOriginalMessages(msg.getMessage());
List origMessages = _context.messageRegistry().getOriginalMessages(msg.getMessage());
if (_log.shouldLog(Log.DEBUG))
_log.debug("Original messages for inbound message: " + origMessages.size());
if (origMessages.size() > 1) {
@ -112,7 +113,7 @@ public class InNetMessagePool {
if (job != null) {
job.setMessage(msg.getMessage());
JobQueue.getInstance().addJob(job);
_context.jobQueue().addJob(job);
}
}
@ -120,24 +121,24 @@ public class InNetMessagePool {
// not handled as a reply
if (size == -1) {
// was not handled via HandlerJobBuilder
MessageHistory.getInstance().droppedOtherMessage(msg.getMessage());
_context.messageHistory().droppedOtherMessage(msg.getMessage());
if (_log.shouldLog(Log.ERROR))
_log.error("Message " + msg.getMessage() + " was not handled by a HandlerJobBuilder - DROPPING: "
+ msg, new Exception("DROPPED MESSAGE"));
StatManager.getInstance().addRateData("inNetPool.dropped", 1, 0);
_context.statManager().addRateData("inNetPool.dropped", 1, 0);
} else {
String mtype = msg.getMessage().getClass().getName();
MessageHistory.getInstance().receiveMessage(mtype, msg.getMessage().getUniqueId(),
msg.getMessage().getMessageExpiration(),
msg.getFromRouterHash(), true);
_context.messageHistory().receiveMessage(mtype, msg.getMessage().getUniqueId(),
msg.getMessage().getMessageExpiration(),
msg.getFromRouterHash(), true);
return size;
}
}
String mtype = msg.getMessage().getClass().getName();
MessageHistory.getInstance().receiveMessage(mtype, msg.getMessage().getUniqueId(),
msg.getMessage().getMessageExpiration(),
msg.getFromRouterHash(), true);
_context.messageHistory().receiveMessage(mtype, msg.getMessage().getUniqueId(),
msg.getMessage().getMessageExpiration(),
msg.getFromRouterHash(), true);
return size;
}
@ -174,19 +175,4 @@ public class InNetMessagePool {
return _messages.size();
}
}
public void dumpPoolInfo() {
if (!_log.shouldLog(Log.DEBUG)) return;
StringBuffer buf = new StringBuffer();
buf.append("\nDumping Inbound Network Message Pool. Total # message: ").append(getCount()).append("\n");
synchronized (_messages) {
for (Iterator iter = _messages.iterator(); iter.hasNext();) {
InNetMessage msg = (InNetMessage)iter.next();
buf.append("Message ").append(msg.getMessage()).append("\n\n");
}
}
_log.debug(buf.toString());
}
}

View File

@ -13,40 +13,42 @@ import net.i2p.util.Clock;
* Base implementation of a Job
*/
public abstract class JobImpl implements Job {
protected RouterContext _context;
private JobTiming _timing;
private static int _idSrc = 0;
private int _id;
private Exception _addedBy;
private long _madeReadyOn;
public JobImpl() {
_timing = new JobTiming();
_id = ++_idSrc;
_addedBy = null;
_madeReadyOn = 0;
public JobImpl(RouterContext context) {
_context = context;
_timing = new JobTiming(context);
_id = ++_idSrc;
_addedBy = null;
_madeReadyOn = 0;
}
public int getJobId() { return _id; }
public JobTiming getTiming() { return _timing; }
public String toString() {
StringBuffer buf = new StringBuffer(128);
buf.append(super.toString());
buf.append(": Job ").append(_id).append(": ").append(getName());
return buf.toString();
StringBuffer buf = new StringBuffer(128);
buf.append(super.toString());
buf.append(": Job ").append(_id).append(": ").append(getName());
return buf.toString();
}
void addedToQueue() {
_addedBy = new Exception();
_addedBy = new Exception();
}
public Exception getAddedBy() { return _addedBy; }
public long getMadeReadyOn() { return _madeReadyOn; }
public void madeReady() { _madeReadyOn = Clock.getInstance().now(); }
public void madeReady() { _madeReadyOn = _context.clock().now(); }
public void dropped() {}
protected void requeue(long delayMs) {
getTiming().setStartAfter(Clock.getInstance().now() + delayMs);
JobQueue.getInstance().addJob(this);
getTiming().setStartAfter(_context.clock().now() + delayMs);
_context.jobQueue().addJob(this);
}
}

File diff suppressed because it is too large Load Diff

View File

@ -6,103 +6,105 @@ import net.i2p.util.Log;
/** a do run run run a do run run */
class JobQueueRunner implements Runnable {
private final static Log _log = new Log(JobQueueRunner.class);
private Log _log;
private RouterContext _context;
private boolean _keepRunning;
private int _id;
private long _numJobs;
private Job _currentJob;
static {
StatManager.getInstance().createRateStat("jobQueue.jobRun", "How long jobs take", "JobQueue", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
StatManager.getInstance().createRateStat("jobQueue.jobLag", "How long jobs have to wait before running", "JobQueue", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
StatManager.getInstance().createRateStat("jobQueue.jobWait", "How long does a job sat on the job queue?", "JobQueue", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
StatManager.getInstance().createRateStat("jobQueue.jobRunnerInactive", "How long are runners inactive?", "JobQueue", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
public JobQueueRunner(RouterContext context, int id) {
_context = context;
_id = id;
_keepRunning = true;
_numJobs = 0;
_currentJob = null;
_log = _context.logManager().getLog(JobQueueRunner.class);
_context.statManager().createRateStat("jobQueue.jobRun", "How long jobs take", "JobQueue", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("jobQueue.jobLag", "How long jobs have to wait before running", "JobQueue", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("jobQueue.jobWait", "How long does a job sat on the job queue?", "JobQueue", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("jobQueue.jobRunnerInactive", "How long are runners inactive?", "JobQueue", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
}
public JobQueueRunner(int id) {
_id = id;
_keepRunning = true;
_numJobs = 0;
_currentJob = null;
}
public Job getCurrentJob() { return _currentJob; }
public int getRunnerId() { return _id; }
public void stopRunning() { _keepRunning = false; }
public void run() {
long lastActive = Clock.getInstance().now();;
while ( (_keepRunning) && (JobQueue.getInstance().isAlive()) ) {
try {
Job job = JobQueue.getInstance().getNext();
if (job == null) continue;
long now = Clock.getInstance().now();
long enqueuedTime = 0;
if (job instanceof JobImpl) {
long when = ((JobImpl)job).getMadeReadyOn();
if (when <= 0) {
_log.error("Job was not made ready?! " + job, new Exception("Not made ready?!"));
} else {
enqueuedTime = now - when;
}
}
long betweenJobs = now - lastActive;
StatManager.getInstance().addRateData("jobQueue.jobRunnerInactive", betweenJobs, betweenJobs);
_currentJob = job;
if (_log.shouldLog(Log.DEBUG))
_log.debug("Runner " + _id + " running job " + job.getJobId() + ": " + job.getName());
long origStartAfter = job.getTiming().getStartAfter();
long doStart = Clock.getInstance().now();
job.getTiming().start();
runCurrentJob();
job.getTiming().end();
long duration = job.getTiming().getActualEnd() - job.getTiming().getActualStart();
long lastActive = _context.clock().now();
while ( (_keepRunning) && (_context.jobQueue().isAlive()) ) {
try {
Job job = _context.jobQueue().getNext();
if (job == null) continue;
long now = _context.clock().now();
long beforeUpdate = Clock.getInstance().now();
JobQueue.getInstance().updateStats(job, doStart, origStartAfter, duration);
long diff = Clock.getInstance().now() - beforeUpdate;
StatManager.getInstance().addRateData("jobQueue.jobRun", duration, duration);
StatManager.getInstance().addRateData("jobQueue.jobLag", doStart - origStartAfter, 0);
StatManager.getInstance().addRateData("jobQueue.jobWait", enqueuedTime, enqueuedTime);
if (diff > 100) {
if (_log.shouldLog(Log.WARN))
_log.warn("Updating statistics for the job took too long [" + diff + "ms]");
}
if (_log.shouldLog(Log.DEBUG))
_log.debug("Job duration " + duration + "ms for " + job.getName() + " with lag of " + (doStart-origStartAfter) + "ms");
lastActive = Clock.getInstance().now();
_currentJob = null;
} catch (Throwable t) {
if (_log.shouldLog(Log.CRIT))
_log.log(Log.CRIT, "WTF, error running?", t);
}
}
if (_log.shouldLog(Log.CRIT))
_log.log(Log.CRIT, "Queue runner " + _id + " exiting");
JobQueue.getInstance().removeRunner(_id);
long enqueuedTime = 0;
if (job instanceof JobImpl) {
long when = ((JobImpl)job).getMadeReadyOn();
if (when <= 0) {
_log.error("Job was not made ready?! " + job,
new Exception("Not made ready?!"));
} else {
enqueuedTime = now - when;
}
}
long betweenJobs = now - lastActive;
_context.statManager().addRateData("jobQueue.jobRunnerInactive", betweenJobs, betweenJobs);
_currentJob = job;
if (_log.shouldLog(Log.DEBUG))
_log.debug("Runner " + _id + " running job " + job.getJobId() + ": " + job.getName());
long origStartAfter = job.getTiming().getStartAfter();
long doStart = _context.clock().now();
job.getTiming().start();
runCurrentJob();
job.getTiming().end();
long duration = job.getTiming().getActualEnd() - job.getTiming().getActualStart();
long beforeUpdate = _context.clock().now();
_context.jobQueue().updateStats(job, doStart, origStartAfter, duration);
long diff = _context.clock().now() - beforeUpdate;
_context.statManager().addRateData("jobQueue.jobRun", duration, duration);
_context.statManager().addRateData("jobQueue.jobLag", doStart - origStartAfter, 0);
_context.statManager().addRateData("jobQueue.jobWait", enqueuedTime, enqueuedTime);
if (diff > 100) {
if (_log.shouldLog(Log.WARN))
_log.warn("Updating statistics for the job took too long [" + diff + "ms]");
}
if (_log.shouldLog(Log.DEBUG))
_log.debug("Job duration " + duration + "ms for " + job.getName()
+ " with lag of " + (doStart-origStartAfter) + "ms");
lastActive = _context.clock().now();
_currentJob = null;
} catch (Throwable t) {
if (_log.shouldLog(Log.CRIT))
_log.log(Log.CRIT, "WTF, error running?", t);
}
}
if (_log.shouldLog(Log.CRIT))
_log.log(Log.CRIT, "Queue runner " + _id + " exiting");
_context.jobQueue().removeRunner(_id);
}
private void runCurrentJob() {
try {
_currentJob.runJob();
} catch (OutOfMemoryError oom) {
try {
if (_log.shouldLog(Log.CRIT))
_log.log(Log.CRIT, "Router ran out of memory, shutting down", oom);
Router.getInstance().shutdown();
} catch (Throwable t) {
System.err.println("***Router ran out of memory, shutting down hard");
}
try { Thread.sleep(1000); } catch (InterruptedException ie) {}
System.exit(-1);
} catch (Throwable t) {
if (_log.shouldLog(Log.CRIT))
_log.log(Log.CRIT, "Error processing job [" + _currentJob.getName() + "] on thread " + _id + ": " + t.getMessage(), t);
if (_log.shouldLog(Log.ERROR))
_log.error("The above job was enqueued by: ", _currentJob.getAddedBy());
JobQueue.getInstance().dumpRunners(true);
}
try {
_currentJob.runJob();
} catch (OutOfMemoryError oom) {
try {
if (_log.shouldLog(Log.CRIT))
_log.log(Log.CRIT, "Router ran out of memory, shutting down", oom);
_context.router().shutdown();
} catch (Throwable t) {
System.err.println("***Router ran out of memory, shutting down hard");
}
try { Thread.sleep(1000); } catch (InterruptedException ie) {}
System.exit(-1);
} catch (Throwable t) {
if (_log.shouldLog(Log.CRIT))
_log.log(Log.CRIT, "Error processing job [" + _currentJob.getName()
+ "] on thread " + _id + ": " + t.getMessage(), t);
if (_log.shouldLog(Log.ERROR))
_log.error("The above job was enqueued by: ", _currentJob.getAddedBy());
}
}
}

View File

@ -1,14 +1,15 @@
package net.i2p.router;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
import net.i2p.util.Clock;
/**
* Define the timing requirements and statistics for a particular job
*
@ -17,12 +18,14 @@ public class JobTiming implements Clock.ClockUpdateListener {
private long _start;
private long _actualStart;
private long _actualEnd;
private RouterContext _context;
public JobTiming() {
_start = Clock.getInstance().now();
_actualStart = 0;
_actualEnd = 0;
Clock.getInstance().addUpdateListener(this);
public JobTiming(RouterContext context) {
_context = context;
_start = context.clock().now();
_actualStart = 0;
_actualEnd = 0;
context.clock().addUpdateListener(this);
}
/**
@ -42,7 +45,7 @@ public class JobTiming implements Clock.ClockUpdateListener {
* Notify the timing that the job began
*
*/
public void start() { _actualStart = Clock.getInstance().now(); }
public void start() { _actualStart = _context.clock().now(); }
/**
* # of milliseconds after the epoch the job actually ended
*
@ -53,17 +56,17 @@ public class JobTiming implements Clock.ClockUpdateListener {
* Notify the timing that the job finished
*
*/
public void end() {
_actualEnd = Clock.getInstance().now();
Clock.getInstance().removeUpdateListener(this);
public void end() {
_actualEnd = _context.clock().now();
_context.clock().removeUpdateListener(this);
}
public void offsetChanged(long delta) {
if (_start != 0)
_start += delta;
if (_actualStart != 0)
_actualStart += delta;
if (_actualEnd != 0)
_actualEnd += delta;
if (_start != 0)
_start += delta;
if (_actualStart != 0)
_actualStart += delta;
if (_actualEnd != 0)
_actualEnd += delta;
}
}

View File

@ -32,9 +32,8 @@ import net.i2p.util.Log;
*
*/
public class KeyManager {
private final static Log _log = new Log(KeyManager.class);
private static KeyManager _instance = new KeyManager();
public static KeyManager getInstance() { return _instance; }
private Log _log;
private RouterContext _context;
private PrivateKey _privateKey;
private PublicKey _publicKey;
private SigningPrivateKey _signingPrivateKey;
@ -49,13 +48,15 @@ public class KeyManager {
private final static String KEYFILE_PUBLIC_SIGNING = "publicSigning.key";
private final static long DELAY = 30*1000;
private KeyManager() {
setPrivateKey(null);
setPublicKey(null);
setSigningPrivateKey(null);
setSigningPublicKey(null);
_leaseSetKeys = new HashMap();
JobQueue.getInstance().addJob(new SynchronizeKeysJob());
public KeyManager(RouterContext context) {
_context = context;
_log = _context.logManager().getLog(KeyManager.class);
setPrivateKey(null);
setPublicKey(null);
setSigningPrivateKey(null);
setSigningPublicKey(null);
_leaseSetKeys = new HashMap();
_context.jobQueue().addJob(new SynchronizeKeysJob());
}
/** Configure the router's private key */
@ -72,119 +73,122 @@ public class KeyManager {
public SigningPublicKey getSigningPublicKey() { return _signingPublicKey; }
public void registerKeys(Destination dest, SigningPrivateKey leaseRevocationPrivateKey, PrivateKey endpointDecryptionKey) {
_log.info("Registering keys for destination " + dest.calculateHash().toBase64());
LeaseSetKeys keys = new LeaseSetKeys(dest, leaseRevocationPrivateKey, endpointDecryptionKey);
synchronized (_leaseSetKeys) {
_leaseSetKeys.put(dest, keys);
}
_log.info("Registering keys for destination " + dest.calculateHash().toBase64());
LeaseSetKeys keys = new LeaseSetKeys(dest, leaseRevocationPrivateKey, endpointDecryptionKey);
synchronized (_leaseSetKeys) {
_leaseSetKeys.put(dest, keys);
}
}
public LeaseSetKeys unregisterKeys(Destination dest) {
_log.info("Unregistering keys for destination " + dest.calculateHash().toBase64());
synchronized (_leaseSetKeys) {
return (LeaseSetKeys)_leaseSetKeys.remove(dest);
}
_log.info("Unregistering keys for destination " + dest.calculateHash().toBase64());
synchronized (_leaseSetKeys) {
return (LeaseSetKeys)_leaseSetKeys.remove(dest);
}
}
public LeaseSetKeys getKeys(Destination dest) {
synchronized (_leaseSetKeys) {
return (LeaseSetKeys)_leaseSetKeys.get(dest);
}
synchronized (_leaseSetKeys) {
return (LeaseSetKeys)_leaseSetKeys.get(dest);
}
}
public Set getAllKeys() {
HashSet keys = new HashSet();
synchronized (_leaseSetKeys) {
keys.addAll(_leaseSetKeys.values());
}
return keys;
HashSet keys = new HashSet();
synchronized (_leaseSetKeys) {
keys.addAll(_leaseSetKeys.values());
}
return keys;
}
private class SynchronizeKeysJob extends JobImpl {
public void runJob() {
String keyDir = Router.getInstance().getConfigSetting(PROP_KEYDIR);
if (keyDir == null)
keyDir = DEFAULT_KEYDIR;
File dir = new File(keyDir);
if (!dir.exists())
dir.mkdirs();
if (dir.exists() && dir.isDirectory() && dir.canRead() && dir.canWrite())
syncKeys(dir);
getTiming().setStartAfter(Clock.getInstance().now()+DELAY);
JobQueue.getInstance().addJob(this);
}
private void syncKeys(File keyDir) {
syncPrivateKey(keyDir);
syncPublicKey(keyDir);
syncSigningKey(keyDir);
syncVerificationKey(keyDir);
}
private void syncPrivateKey(File keyDir) {
File keyFile = new File(keyDir, KeyManager.KEYFILE_PRIVATE_ENC);
boolean exists = (_privateKey != null);
if (!exists)
_privateKey = new PrivateKey();
_privateKey = (PrivateKey)syncKey(keyFile, _privateKey, exists);
}
private void syncPublicKey(File keyDir) {
File keyFile = new File(keyDir, KeyManager.KEYFILE_PUBLIC_ENC);
boolean exists = (_publicKey != null);
if (!exists)
_publicKey = new PublicKey();
_publicKey = (PublicKey)syncKey(keyFile, _publicKey, exists);
}
private void syncSigningKey(File keyDir) {
File keyFile = new File(keyDir, KeyManager.KEYFILE_PRIVATE_SIGNING);
boolean exists = (_signingPrivateKey != null);
if (!exists)
_signingPrivateKey = new SigningPrivateKey();
_signingPrivateKey = (SigningPrivateKey)syncKey(keyFile, _signingPrivateKey, exists);
}
private void syncVerificationKey(File keyDir) {
File keyFile = new File(keyDir, KeyManager.KEYFILE_PUBLIC_SIGNING);
boolean exists = (_signingPublicKey != null);
if (!exists)
_signingPublicKey = new SigningPublicKey();
_signingPublicKey = (SigningPublicKey)syncKey(keyFile, _signingPublicKey, exists);
}
private DataStructure syncKey(File keyFile, DataStructure structure, boolean exists) {
FileOutputStream out = null;
FileInputStream in = null;
try {
if (exists) {
out = new FileOutputStream(keyFile);
structure.writeBytes(out);
return structure;
} else {
if (keyFile.exists()) {
in = new FileInputStream(keyFile);
structure.readBytes(in);
return structure;
} else {
// we don't have it, and its not on disk. oh well.
return null;
}
}
} catch (IOException ioe) {
_log.error("Error syncing the structure to " + keyFile.getAbsolutePath(), ioe);
} catch (DataFormatException dfe) {
_log.error("Error syncing the structure with " + keyFile.getAbsolutePath(), dfe);
} finally {
if (out != null) try { out.close(); } catch (IOException ioe) {}
if (in != null) try { in.close(); } catch (IOException ioe) {}
}
if (exists)
return structure;
else
return null;
}
public String getName() { return "Synchronize Keys to Disk"; }
public SynchronizeKeysJob() {
super(KeyManager.this._context);
}
public void runJob() {
String keyDir = KeyManager.this._context.router().getConfigSetting(PROP_KEYDIR);
if (keyDir == null)
keyDir = DEFAULT_KEYDIR;
File dir = new File(keyDir);
if (!dir.exists())
dir.mkdirs();
if (dir.exists() && dir.isDirectory() && dir.canRead() && dir.canWrite())
syncKeys(dir);
getTiming().setStartAfter(KeyManager.this._context.clock().now()+DELAY);
KeyManager.this._context.jobQueue().addJob(this);
}
private void syncKeys(File keyDir) {
syncPrivateKey(keyDir);
syncPublicKey(keyDir);
syncSigningKey(keyDir);
syncVerificationKey(keyDir);
}
private void syncPrivateKey(File keyDir) {
File keyFile = new File(keyDir, KeyManager.KEYFILE_PRIVATE_ENC);
boolean exists = (_privateKey != null);
if (!exists)
_privateKey = new PrivateKey();
_privateKey = (PrivateKey)syncKey(keyFile, _privateKey, exists);
}
private void syncPublicKey(File keyDir) {
File keyFile = new File(keyDir, KeyManager.KEYFILE_PUBLIC_ENC);
boolean exists = (_publicKey != null);
if (!exists)
_publicKey = new PublicKey();
_publicKey = (PublicKey)syncKey(keyFile, _publicKey, exists);
}
private void syncSigningKey(File keyDir) {
File keyFile = new File(keyDir, KeyManager.KEYFILE_PRIVATE_SIGNING);
boolean exists = (_signingPrivateKey != null);
if (!exists)
_signingPrivateKey = new SigningPrivateKey();
_signingPrivateKey = (SigningPrivateKey)syncKey(keyFile, _signingPrivateKey, exists);
}
private void syncVerificationKey(File keyDir) {
File keyFile = new File(keyDir, KeyManager.KEYFILE_PUBLIC_SIGNING);
boolean exists = (_signingPublicKey != null);
if (!exists)
_signingPublicKey = new SigningPublicKey();
_signingPublicKey = (SigningPublicKey)syncKey(keyFile, _signingPublicKey, exists);
}
private DataStructure syncKey(File keyFile, DataStructure structure, boolean exists) {
FileOutputStream out = null;
FileInputStream in = null;
try {
if (exists) {
out = new FileOutputStream(keyFile);
structure.writeBytes(out);
return structure;
} else {
if (keyFile.exists()) {
in = new FileInputStream(keyFile);
structure.readBytes(in);
return structure;
} else {
// we don't have it, and its not on disk. oh well.
return null;
}
}
} catch (IOException ioe) {
_log.error("Error syncing the structure to " + keyFile.getAbsolutePath(), ioe);
} catch (DataFormatException dfe) {
_log.error("Error syncing the structure with " + keyFile.getAbsolutePath(), dfe);
} finally {
if (out != null) try { out.close(); } catch (IOException ioe) {}
if (in != null) try { in.close(); } catch (IOException ioe) {}
}
if (exists)
return structure;
else
return null;
}
public String getName() { return "Synchronize Keys to Disk"; }
}
}

View File

@ -23,13 +23,16 @@ import net.i2p.util.Log;
*
*/
public class MessageHistory {
private final static Log _log = new Log(MessageHistory.class);
private static MessageHistory _instance;
private Log _log;
private RouterContext _context;
private List _unwrittenEntries; // list of raw entries (strings) yet to be written
private String _historyFile; // where to write
private String _localIdent; // placed in each entry to uniquely identify the local router
private boolean _doLog; // true == we want to log
private boolean _doPause; // true == briefly stop writing data to the log (used while submitting it)
private ReinitializeJob _reinitializeJob;
private WriteJob _writeJob;
private SubmitMessageHistoryJob _submitMessageHistoryJob;
private final static byte[] NL = System.getProperty("line.separator").getBytes();
private final static int FLUSH_SIZE = 1000; // write out at least once every 1000 entries
@ -41,21 +44,12 @@ public class MessageHistory {
public final static String PROP_MESSAGE_HISTORY_FILENAME = "router.historyFilename";
public final static String DEFAULT_MESSAGE_HISTORY_FILENAME = "messageHistory.txt";
public final static MessageHistory getInstance() {
if (_instance == null)
initialize();
return _instance;
}
private final static void setInstance(MessageHistory hist) {
if (_instance != null) {
synchronized (_instance._unwrittenEntries) {
for (Iterator iter = _instance._unwrittenEntries.iterator(); iter.hasNext(); ) {
hist.addEntry((String)iter.next());
}
_instance._unwrittenEntries.clear();
}
}
_instance = hist;
public MessageHistory(RouterContext context) {
_context = context;
_reinitializeJob = new ReinitializeJob();
_writeJob = new WriteJob();
_submitMessageHistoryJob = new SubmitMessageHistoryJob(_context);
initialize(true);
}
void setDoLog(boolean log) { _doLog = log; }
@ -65,19 +59,19 @@ public class MessageHistory {
String getFilename() { return _historyFile; }
private void updateSettings() {
String keepHistory = Router.getInstance().getConfigSetting(PROP_KEEP_MESSAGE_HISTORY);
if (keepHistory != null) {
_doLog = Boolean.TRUE.toString().equalsIgnoreCase(keepHistory);
} else {
_doLog = DEFAULT_KEEP_MESSAGE_HISTORY;
}
String filename = null;
if (_doLog) {
filename = Router.getInstance().getConfigSetting(PROP_MESSAGE_HISTORY_FILENAME);
if ( (filename == null) || (filename.trim().length() <= 0) )
filename = DEFAULT_MESSAGE_HISTORY_FILENAME;
}
String keepHistory = _context.router().getConfigSetting(PROP_KEEP_MESSAGE_HISTORY);
if (keepHistory != null) {
_doLog = Boolean.TRUE.toString().equalsIgnoreCase(keepHistory);
} else {
_doLog = DEFAULT_KEEP_MESSAGE_HISTORY;
}
String filename = null;
if (_doLog) {
filename = _context.router().getConfigSetting(PROP_MESSAGE_HISTORY_FILENAME);
if ( (filename == null) || (filename.trim().length() <= 0) )
filename = DEFAULT_MESSAGE_HISTORY_FILENAME;
}
}
/**
@ -85,55 +79,38 @@ public class MessageHistory {
* Call this whenever the router identity changes.
*
*/
public static void initialize() {
initialize(false);
public void initialize(boolean forceReinitialize) {
if (!forceReinitialize) return;
if (_context.router().getRouterInfo() == null) {
_reinitializeJob.getTiming().setStartAfter(_context.clock().now()+5000);
_context.jobQueue().addJob(_reinitializeJob);
} else {
String filename = null;
filename = _context.router().getConfigSetting(PROP_MESSAGE_HISTORY_FILENAME);
if ( (filename == null) || (filename.trim().length() <= 0) )
filename = DEFAULT_MESSAGE_HISTORY_FILENAME;
_doLog = DEFAULT_KEEP_MESSAGE_HISTORY;
_historyFile = filename;
_localIdent = getName(_context.routerHash());
_unwrittenEntries = new LinkedList();
updateSettings();
addEntry(getPrefix() + "** Router initialized (started up or changed identities)");
_context.jobQueue().addJob(_writeJob);
_submitMessageHistoryJob.getTiming().setStartAfter(_context.clock().now() + 2*60*1000);
_context.jobQueue().addJob(_submitMessageHistoryJob);
}
}
public static void initialize(boolean forceReinitialize) {
if ( (!forceReinitialize) && (_instance != null) ) return;
if (Router.getInstance().getRouterInfo() == null) {
ReinitializeJob j = ReinitializeJob.getInstance();
j.getTiming().setStartAfter(Clock.getInstance().now()+5000);
JobQueue.getInstance().addJob(j);
} else {
String filename = null;
filename = Router.getInstance().getConfigSetting(PROP_MESSAGE_HISTORY_FILENAME);
if ( (filename == null) || (filename.trim().length() <= 0) )
filename = DEFAULT_MESSAGE_HISTORY_FILENAME;
MessageHistory hist = new MessageHistory(Router.getInstance().getRouterInfo().getIdentity().getHash(), filename);
setInstance(hist);
hist.updateSettings();
getInstance().addEntry(getInstance().getPrefix() + "** Router initialized (started up or changed identities)");
JobQueue.getInstance().addJob(new WriteJob());
SubmitMessageHistoryJob histJob = new SubmitMessageHistoryJob();
histJob.getTiming().setStartAfter(Clock.getInstance().now() + 2*60*1000);
JobQueue.getInstance().addJob(histJob);
}
}
private static final class ReinitializeJob extends JobImpl {
private final static ReinitializeJob _jobInstance = new ReinitializeJob();
public final static ReinitializeJob getInstance() { return _jobInstance; }
private ReinitializeJob() {
super();
}
public void runJob() {
MessageHistory.initialize();
}
public String getName() { return "Reinitialize message history"; }
}
/**
* Create a component to monitor the message history of the router.
*
* @param localIdent Hash of local identity
* @param filename file to log trace info to
*/
private MessageHistory(Hash localIdent, String filename) {
_doLog = DEFAULT_KEEP_MESSAGE_HISTORY;
_historyFile = filename;
_localIdent = getName(localIdent);
_unwrittenEntries = new LinkedList();
private final class ReinitializeJob extends JobImpl {
private ReinitializeJob() {
super(MessageHistory.this._context);
}
public void runJob() {
initialize(true);
}
public String getName() { return "Reinitialize message history"; }
}
/**
@ -151,20 +128,20 @@ public class MessageHistory {
* @param replyThrough the gateway of the tunnel that the sourceRoutePeer will be sending to
*/
public void requestTunnelCreate(TunnelId createTunnel, TunnelId outTunnel, Hash peerRequested, Hash nextPeer, Hash sourceRoutePeer, TunnelId replyTunnel, Hash replyThrough) {
if (!_doLog) return;
StringBuffer buf = new StringBuffer(128);
buf.append(getPrefix());
buf.append("request [").append(getName(peerRequested)).append("] to create tunnel [");
buf.append(createTunnel.getTunnelId()).append("] ");
if (nextPeer != null)
buf.append("(next [").append(getName(nextPeer)).append("]) ");
if (outTunnel != null)
buf.append("via [").append(outTunnel.getTunnelId()).append("] ");
if (sourceRoutePeer != null)
buf.append("with replies routed through [").append(getName(sourceRoutePeer)).append("] ");
if ( (replyTunnel != null) && (replyThrough != null) )
buf.append("who forwards it through [").append(replyTunnel.getTunnelId()).append("] on [").append(getName(replyThrough)).append("]");
addEntry(buf.toString());
if (!_doLog) return;
StringBuffer buf = new StringBuffer(128);
buf.append(getPrefix());
buf.append("request [").append(getName(peerRequested)).append("] to create tunnel [");
buf.append(createTunnel.getTunnelId()).append("] ");
if (nextPeer != null)
buf.append("(next [").append(getName(nextPeer)).append("]) ");
if (outTunnel != null)
buf.append("via [").append(outTunnel.getTunnelId()).append("] ");
if (sourceRoutePeer != null)
buf.append("with replies routed through [").append(getName(sourceRoutePeer)).append("] ");
if ( (replyTunnel != null) && (replyThrough != null) )
buf.append("who forwards it through [").append(replyTunnel.getTunnelId()).append("] on [").append(getName(replyThrough)).append("]");
addEntry(buf.toString());
}
/**
@ -178,14 +155,14 @@ public class MessageHistory {
* @param sourceRoutePeer peer through whom we should send our garlic routed ok through
*/
public void receiveTunnelCreate(TunnelId createTunnel, Hash nextPeer, Date expire, boolean ok, Hash sourceRoutePeer) {
if (!_doLog) return;
StringBuffer buf = new StringBuffer(128);
buf.append(getPrefix());
buf.append("receive tunnel create [").append(createTunnel.getTunnelId()).append("] ");
if (nextPeer != null)
buf.append("(next [").append(getName(nextPeer)).append("]) ");
buf.append("ok? ").append(ok).append(" expiring on [").append(getTime(expire)).append("]");
addEntry(buf.toString());
if (!_doLog) return;
StringBuffer buf = new StringBuffer(128);
buf.append(getPrefix());
buf.append("receive tunnel create [").append(createTunnel.getTunnelId()).append("] ");
if (nextPeer != null)
buf.append("(next [").append(getName(nextPeer)).append("]) ");
buf.append("ok? ").append(ok).append(" expiring on [").append(getTime(expire)).append("]");
addEntry(buf.toString());
}
/**
@ -195,22 +172,22 @@ public class MessageHistory {
* @param tunnel tunnel joined
*/
public void tunnelJoined(String state, TunnelInfo tunnel) {
if (!_doLog) return;
if (tunnel == null) return;
StringBuffer buf = new StringBuffer(128);
buf.append(getPrefix());
buf.append("joining tunnel [").append(tunnel.getTunnelId().getTunnelId()).append("] as [").append(state).append("] ");
buf.append(" (next: ");
TunnelInfo cur = tunnel;
while (cur.getNextHopInfo() != null) {
buf.append('[').append(getName(cur.getNextHopInfo().getThisHop()));
buf.append("], ");
cur = cur.getNextHopInfo();
}
if (cur.getNextHop() != null)
buf.append('[').append(getName(cur.getNextHop())).append(']');
buf.append(") expiring on [").append(getTime(new Date(tunnel.getSettings().getExpiration()))).append("]");
addEntry(buf.toString());
if (!_doLog) return;
if (tunnel == null) return;
StringBuffer buf = new StringBuffer(128);
buf.append(getPrefix());
buf.append("joining tunnel [").append(tunnel.getTunnelId().getTunnelId()).append("] as [").append(state).append("] ");
buf.append(" (next: ");
TunnelInfo cur = tunnel;
while (cur.getNextHopInfo() != null) {
buf.append('[').append(getName(cur.getNextHopInfo().getThisHop()));
buf.append("], ");
cur = cur.getNextHopInfo();
}
if (cur.getNextHop() != null)
buf.append('[').append(getName(cur.getNextHop())).append(']');
buf.append(") expiring on [").append(getTime(new Date(tunnel.getSettings().getExpiration()))).append("]");
addEntry(buf.toString());
}
/**
@ -219,12 +196,12 @@ public class MessageHistory {
* @param tunnel tunnel failed
*/
public void tunnelFailed(TunnelId tunnel) {
if (!_doLog) return;
if (tunnel == null) return;
StringBuffer buf = new StringBuffer(128);
buf.append(getPrefix());
buf.append("failing tunnel [").append(tunnel.getTunnelId()).append("]");
addEntry(buf.toString());
if (!_doLog) return;
if (tunnel == null) return;
StringBuffer buf = new StringBuffer(128);
buf.append(getPrefix());
buf.append("failing tunnel [").append(tunnel.getTunnelId()).append("]");
addEntry(buf.toString());
}
/**
@ -235,24 +212,24 @@ public class MessageHistory {
* @param timeToTest milliseconds to verify the tunnel
*/
public void tunnelValid(TunnelInfo tunnel, long timeToTest) {
if (!_doLog) return;
if (tunnel == null) return;
StringBuffer buf = new StringBuffer(128);
buf.append(getPrefix());
buf.append("tunnel ").append(tunnel.getTunnelId().getTunnelId()).append(" tested ok after ").append(timeToTest).append("ms (containing ");
TunnelInfo cur = tunnel;
while (cur != null) {
buf.append('[').append(getName(cur.getThisHop())).append("], ");
if (cur.getNextHopInfo() != null) {
cur = cur.getNextHopInfo();
} else {
if (cur.getNextHop() != null)
buf.append('[').append(getName(cur.getNextHop())).append(']');
cur = null;
}
}
buf.append(')');
addEntry(buf.toString());
if (!_doLog) return;
if (tunnel == null) return;
StringBuffer buf = new StringBuffer(128);
buf.append(getPrefix());
buf.append("tunnel ").append(tunnel.getTunnelId().getTunnelId()).append(" tested ok after ").append(timeToTest).append("ms (containing ");
TunnelInfo cur = tunnel;
while (cur != null) {
buf.append('[').append(getName(cur.getThisHop())).append("], ");
if (cur.getNextHopInfo() != null) {
cur = cur.getNextHopInfo();
} else {
if (cur.getNextHop() != null)
buf.append('[').append(getName(cur.getNextHop())).append(']');
cur = null;
}
}
buf.append(')');
addEntry(buf.toString());
}
/**
@ -260,15 +237,15 @@ public class MessageHistory {
*
*/
public void tunnelRejected(Hash peer, TunnelId tunnel, Hash replyThrough, String reason) {
if (!_doLog) return;
if ( (tunnel == null) || (peer == null) ) return;
StringBuffer buf = new StringBuffer(128);
buf.append(getPrefix());
buf.append("tunnel [").append(tunnel.getTunnelId()).append("] was rejected by [");
buf.append(getName(peer)).append("] for [").append(reason).append("]");
if (replyThrough != null)
buf.append(" with their reply intended to come through [").append(getName(replyThrough)).append("]");
addEntry(buf.toString());
if (!_doLog) return;
if ( (tunnel == null) || (peer == null) ) return;
StringBuffer buf = new StringBuffer(128);
buf.append(getPrefix());
buf.append("tunnel [").append(tunnel.getTunnelId()).append("] was rejected by [");
buf.append(getName(peer)).append("] for [").append(reason).append("]");
if (replyThrough != null)
buf.append(" with their reply intended to come through [").append(getName(replyThrough)).append("]");
addEntry(buf.toString());
}
/**
@ -277,15 +254,15 @@ public class MessageHistory {
*
*/
public void tunnelRequestTimedOut(Hash peer, TunnelId tunnel, Hash replyThrough) {
if (!_doLog) return;
if ( (tunnel == null) || (peer == null) ) return;
StringBuffer buf = new StringBuffer(128);
buf.append(getPrefix());
buf.append("tunnel [").append(tunnel.getTunnelId()).append("] timed out on [");
buf.append(getName(peer)).append("]");
if (replyThrough != null)
buf.append(" with their reply intended to come through [").append(getName(replyThrough)).append("]");
addEntry(buf.toString());
if (!_doLog) return;
if ( (tunnel == null) || (peer == null) ) return;
StringBuffer buf = new StringBuffer(128);
buf.append(getPrefix());
buf.append("tunnel [").append(tunnel.getTunnelId()).append("] timed out on [");
buf.append(getName(peer)).append("]");
if (replyThrough != null)
buf.append(" with their reply intended to come through [").append(getName(replyThrough)).append("]");
addEntry(buf.toString());
}
/**
@ -296,24 +273,24 @@ public class MessageHistory {
* @param from peer that sent us this message (if known)
*/
public void droppedTunnelMessage(TunnelId id, Hash from) {
if (!_doLog) return;
StringBuffer buf = new StringBuffer(128);
buf.append(getPrefix());
buf.append("dropped message for unknown tunnel [").append(id.getTunnelId()).append("] from [").append(getName(from)).append("]");
addEntry(buf.toString());
if (!_doLog) return;
StringBuffer buf = new StringBuffer(128);
buf.append(getPrefix());
buf.append("dropped message for unknown tunnel [").append(id.getTunnelId()).append("] from [").append(getName(from)).append("]");
addEntry(buf.toString());
}
/**
* We received another message we weren't waiting for and don't know how to handle
*/
public void droppedOtherMessage(I2NPMessage message) {
if (!_doLog) return;
if (message == null) return;
StringBuffer buf = new StringBuffer(512);
buf.append(getPrefix());
buf.append("dropped [").append(message.getClass().getName()).append("] ").append(message.getUniqueId());
buf.append(" [").append(message.toString()).append("]");
addEntry(buf.toString());
if (!_doLog) return;
if (message == null) return;
StringBuffer buf = new StringBuffer(512);
buf.append(getPrefix());
buf.append("dropped [").append(message.getClass().getName()).append("] ").append(message.getUniqueId());
buf.append(" [").append(message.toString()).append("]");
addEntry(buf.toString());
}
/**
@ -322,16 +299,16 @@ public class MessageHistory {
* @param sentMessage message sent that didn't receive a reply
*/
public void replyTimedOut(OutNetMessage sentMessage) {
if (!_doLog) return;
if (sentMessage == null) return;
StringBuffer buf = new StringBuffer(512);
buf.append(getPrefix());
buf.append("timed out waiting for a reply to [").append(sentMessage.getMessage().getClass().getName());
buf.append("] [").append(sentMessage.getMessage().getUniqueId()).append("] expiring on [");
if (sentMessage != null)
buf.append(getTime(new Date(sentMessage.getReplySelector().getExpiration())));
buf.append("] ").append(sentMessage.getReplySelector().toString());
addEntry(buf.toString());
if (!_doLog) return;
if (sentMessage == null) return;
StringBuffer buf = new StringBuffer(512);
buf.append(getPrefix());
buf.append("timed out waiting for a reply to [").append(sentMessage.getMessage().getClass().getName());
buf.append("] [").append(sentMessage.getMessage().getUniqueId()).append("] expiring on [");
if (sentMessage != null)
buf.append(getTime(new Date(sentMessage.getReplySelector().getExpiration())));
buf.append("] ").append(sentMessage.getReplySelector().toString());
addEntry(buf.toString());
}
/**
@ -342,11 +319,11 @@ public class MessageHistory {
* @param error error message related to the processing of the message
*/
public void messageProcessingError(long messageId, String messageType, String error) {
if (!_doLog) return;
StringBuffer buf = new StringBuffer(128);
buf.append(getPrefix());
buf.append("Error processing [").append(messageType).append("] [").append(messageId).append("] failed with [").append(error).append("]");
addEntry(buf.toString());
if (!_doLog) return;
StringBuffer buf = new StringBuffer(128);
buf.append(getPrefix());
buf.append("Error processing [").append(messageType).append("] [").append(messageId).append("] failed with [").append(error).append("]");
addEntry(buf.toString());
}
/**
@ -360,17 +337,17 @@ public class MessageHistory {
* @param sentOk whether the message was sent successfully
*/
public void sendMessage(String messageType, long messageId, Date expiration, Hash peer, boolean sentOk) {
if (!_doLog) return;
StringBuffer buf = new StringBuffer(128);
buf.append(getPrefix());
buf.append("send [").append(messageType).append("] message [").append(messageId).append("] ");
buf.append("to [").append(getName(peer)).append("] ");
buf.append("expiring on [").append(getTime(expiration)).append("] ");
if (sentOk)
buf.append("successfully");
else
buf.append("failed");
addEntry(buf.toString());
if (!_doLog) return;
StringBuffer buf = new StringBuffer(128);
buf.append(getPrefix());
buf.append("send [").append(messageType).append("] message [").append(messageId).append("] ");
buf.append("to [").append(getName(peer)).append("] ");
buf.append("expiring on [").append(getTime(expiration)).append("] ");
if (sentOk)
buf.append("successfully");
else
buf.append("failed");
addEntry(buf.toString());
}
/**
@ -385,20 +362,20 @@ public class MessageHistory {
*
*/
public void receiveMessage(String messageType, long messageId, Date expiration, Hash from, boolean isValid) {
if (!_doLog) return;
StringBuffer buf = new StringBuffer(128);
buf.append(getPrefix());
buf.append("receive [").append(messageType).append("] with id [").append(messageId).append("] ");
if (from != null)
buf.append("from [").append(getName(from)).append("] ");
buf.append("expiring on [").append(getTime(expiration)).append("] valid? ").append(isValid);
addEntry(buf.toString());
if (messageType.equals("net.i2p.data.i2np.TunnelMessage")) {
//_log.warn("ReceiveMessage tunnel message ["+messageId+"]", new Exception("Receive tunnel"));
if (!_doLog) return;
StringBuffer buf = new StringBuffer(128);
buf.append(getPrefix());
buf.append("receive [").append(messageType).append("] with id [").append(messageId).append("] ");
if (from != null)
buf.append("from [").append(getName(from)).append("] ");
buf.append("expiring on [").append(getTime(expiration)).append("] valid? ").append(isValid);
addEntry(buf.toString());
if (messageType.equals("net.i2p.data.i2np.TunnelMessage")) {
//_log.warn("ReceiveMessage tunnel message ["+messageId+"]", new Exception("Receive tunnel"));
}
}
public void receiveMessage(String messageType, long messageId, Date expiration, boolean isValid) {
receiveMessage(messageType, messageId, expiration, null, isValid);
receiveMessage(messageType, messageId, expiration, null, isValid);
}
/**
@ -410,12 +387,12 @@ public class MessageHistory {
* @param containerMessageId the unique message id of the message
*/
public void wrap(String bodyMessageType, long bodyMessageId, String containerMessageType, long containerMessageId) {
if (!_doLog) return;
StringBuffer buf = new StringBuffer(128);
buf.append(getPrefix());
buf.append("Wrap message [").append(bodyMessageType).append("] id [").append(bodyMessageId).append("] ");
buf.append("in [").append(containerMessageType).append("] id [").append(containerMessageId).append("]");
addEntry(buf.toString());
if (!_doLog) return;
StringBuffer buf = new StringBuffer(128);
buf.append(getPrefix());
buf.append("Wrap message [").append(bodyMessageType).append("] id [").append(bodyMessageId).append("] ");
buf.append("in [").append(containerMessageType).append("] id [").append(containerMessageId).append("]");
addEntry(buf.toString());
}
/**
@ -423,11 +400,11 @@ public class MessageHistory {
*
*/
public void receivePayloadMessage(long messageId) {
if (!_doLog) return;
StringBuffer buf = new StringBuffer(64);
buf.append(getPrefix());
buf.append("Receive payload message [").append(messageId).append("]");
addEntry(buf.toString());
if (!_doLog) return;
StringBuffer buf = new StringBuffer(64);
buf.append(getPrefix());
buf.append("Receive payload message [").append(messageId).append("]");
addEntry(buf.toString());
}
/**
@ -438,11 +415,11 @@ public class MessageHistory {
* @param timeToSend how long it took to send the message
*/
public void sendPayloadMessage(long messageId, boolean successfullySent, long timeToSend) {
if (!_doLog) return;
StringBuffer buf = new StringBuffer(128);
buf.append(getPrefix());
buf.append("Send payload message in [").append(messageId).append("] in [").append(timeToSend).append("] successfully? ").append(successfullySent);
addEntry(buf.toString());
if (!_doLog) return;
StringBuffer buf = new StringBuffer(128);
buf.append(getPrefix());
buf.append("Send payload message in [").append(messageId).append("] in [").append(timeToSend).append("] successfully? ").append(successfullySent);
addEntry(buf.toString());
}
/**
@ -450,27 +427,27 @@ public class MessageHistory {
*
*/
private final static String getName(Hash router) {
if (router == null) return "unknown";
String str = router.toBase64();
if ( (str == null) || (str.length() < 6) ) return "invalid";
return str.substring(0, 6);
if (router == null) return "unknown";
String str = router.toBase64();
if ( (str == null) || (str.length() < 6) ) return "invalid";
return str.substring(0, 6);
}
private final String getPrefix() {
StringBuffer buf = new StringBuffer(48);
buf.append(getTime(new Date(Clock.getInstance().now())));
buf.append(' ').append(_localIdent).append(": ");
return buf.toString();
StringBuffer buf = new StringBuffer(48);
buf.append(getTime(new Date(_context.clock().now())));
buf.append(' ').append(_localIdent).append(": ");
return buf.toString();
}
private final static SimpleDateFormat _fmt = new SimpleDateFormat("yy/MM/dd.HH:mm:ss.SSS");
static {
_fmt.setTimeZone(TimeZone.getTimeZone("GMT"));
_fmt.setTimeZone(TimeZone.getTimeZone("GMT"));
}
private final static String getTime(Date when) {
synchronized (_fmt) {
return _fmt.format(when);
}
synchronized (_fmt) {
return _fmt.format(when);
}
}
/**
@ -479,27 +456,27 @@ public class MessageHistory {
*
*/
private void addEntry(String entry) {
if (entry == null) return;
int sz = 0;
synchronized (_unwrittenEntries) {
_unwrittenEntries.add(entry);
sz = _unwrittenEntries.size();
}
if (sz > FLUSH_SIZE)
flushEntries();
if (entry == null) return;
int sz = 0;
synchronized (_unwrittenEntries) {
_unwrittenEntries.add(entry);
sz = _unwrittenEntries.size();
}
if (sz > FLUSH_SIZE)
flushEntries();
}
/**
* Write out any unwritten entries, and clear the pending list
*/
private void flushEntries() {
if (_doPause) return;
List entries = null;
synchronized (_unwrittenEntries) {
entries = new LinkedList(_unwrittenEntries);
_unwrittenEntries.clear();
}
writeEntries(entries);
if (_doPause) return;
List entries = null;
synchronized (_unwrittenEntries) {
entries = new LinkedList(_unwrittenEntries);
_unwrittenEntries.clear();
}
writeEntries(entries);
}
/**
@ -507,41 +484,46 @@ public class MessageHistory {
*
*/
private void writeEntries(List entries) {
if (!_doLog) return;
FileOutputStream fos = null;
try {
fos = new FileOutputStream(_historyFile, true);
for (Iterator iter = entries.iterator(); iter.hasNext(); ) {
String entry = (String)iter.next();
fos.write(entry.getBytes());
fos.write(NL);
}
} catch (IOException ioe) {
_log.error("Error writing trace entries", ioe);
} finally {
if (fos != null) try { fos.close(); } catch (IOException ioe) {}
}
if (!_doLog) return;
FileOutputStream fos = null;
try {
fos = new FileOutputStream(_historyFile, true);
for (Iterator iter = entries.iterator(); iter.hasNext(); ) {
String entry = (String)iter.next();
fos.write(entry.getBytes());
fos.write(NL);
}
} catch (IOException ioe) {
_log.error("Error writing trace entries", ioe);
} finally {
if (fos != null) try { fos.close(); } catch (IOException ioe) {}
}
}
/** write out the message history once per minute, if not sooner */
private final static long WRITE_DELAY = 60*1000;
private static class WriteJob extends JobImpl {
public String getName() { return "Write History Entries"; }
public void runJob() {
MessageHistory.getInstance().flushEntries();
MessageHistory.getInstance().updateSettings();
requeue(WRITE_DELAY);
}
private class WriteJob extends JobImpl {
public WriteJob() {
super(MessageHistory.this._context);
}
public String getName() { return "Write History Entries"; }
public void runJob() {
flushEntries();
updateSettings();
requeue(WRITE_DELAY);
}
}
public static void main(String args[]) {
MessageHistory hist = new MessageHistory(new Hash(new byte[32]), "messageHistory.txt");
MessageHistory.getInstance().setDoLog(false);
hist.addEntry("you smell before");
hist.getInstance().setDoLog(true);
hist.addEntry("you smell after");
hist.getInstance().setDoLog(false);
hist.addEntry("you smell finished");
hist.flushEntries();
RouterContext ctx = new RouterContext(null);
MessageHistory hist = new MessageHistory(ctx);
//, new Hash(new byte[32]), "messageHistory.txt");
hist.setDoLog(false);
hist.addEntry("you smell before");
hist.setDoLog(true);
hist.addEntry("you smell after");
hist.setDoLog(false);
hist.addEntry("you smell finished");
hist.flushEntries();
}
}

View File

@ -17,21 +17,29 @@ import net.i2p.util.Log;
*
*/
public class MessageValidator {
private final static Log _log = new Log(MessageValidator.class);
private final static MessageValidator _instance = new MessageValidator();
public final static MessageValidator getInstance() { return _instance; }
/**
private Log _log;
private RouterContext _context;
/**
* Expiration date (as a Long) to message id (as a Long).
* The expiration date (key) must be unique, so on collision, increment the value.
* This keeps messageIds around longer than they need to be, but hopefully not by much ;)
*
*/
private TreeMap _receivedIdExpirations = new TreeMap();
private TreeMap _receivedIdExpirations;
/** Message id (as a Long) */
private Set _receivedIds = new HashSet(1024);
private Set _receivedIds;
/** synchronize on this before adjusting the received id data */
private Object _receivedIdLock = new Object();
private Object _receivedIdLock;
public MessageValidator(RouterContext context) {
_log = context.logManager().getLog(MessageValidator.class);
_receivedIdExpirations = new TreeMap();
_receivedIds = new HashSet(1024);
_receivedIdLock = new Object();
_context = context;
}
/**
* Determine if this message should be accepted as valid (not expired, not a duplicate)
@ -39,88 +47,87 @@ public class MessageValidator {
* @return true if the message should be accepted as valid, false otherwise
*/
public boolean validateMessage(long messageId, long expiration) {
long now = Clock.getInstance().now();
if (now - Router.CLOCK_FUDGE_FACTOR >= expiration) {
if (_log.shouldLog(Log.WARN))
_log.warn("Rejecting message " + messageId + " because it expired " + (now-expiration) + "ms ago");
return false;
}
boolean isDuplicate = noteReception(messageId, expiration);
if (isDuplicate) {
if (_log.shouldLog(Log.WARN))
_log.warn("Rejecting message " + messageId + " because it is a duplicate", new Exception("Duplicate origin"));
return false;
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Accepting message " + messageId + " because it is NOT a duplicate", new Exception("Original origin"));
return true;
}
long now = _context.clock().now();
if (now - Router.CLOCK_FUDGE_FACTOR >= expiration) {
if (_log.shouldLog(Log.WARN))
_log.warn("Rejecting message " + messageId + " because it expired " + (now-expiration) + "ms ago");
return false;
}
boolean isDuplicate = noteReception(messageId, expiration);
if (isDuplicate) {
if (_log.shouldLog(Log.WARN))
_log.warn("Rejecting message " + messageId + " because it is a duplicate", new Exception("Duplicate origin"));
return false;
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Accepting message " + messageId + " because it is NOT a duplicate", new Exception("Original origin"));
return true;
}
}
/**
* Note that we've received the message (which has the expiration given).
* This functionality will need to be reworked for I2P 3.0 when we take into
* This functionality will need to be reworked for I2P 3.0 when we take into
* consideration messages with significant user specified delays (since we dont
* want to keep an infinite number of messages in RAM, etc)
*
* @return true if we HAVE already seen this message, false if not
*/
private boolean noteReception(long messageId, long messageExpiration) {
Long id = new Long(messageId);
synchronized (_receivedIdLock) {
locked_cleanReceivedIds(Clock.getInstance().now() - Router.CLOCK_FUDGE_FACTOR);
if (_receivedIds.contains(id)) {
return true;
} else {
long date = messageExpiration;
while (_receivedIdExpirations.containsKey(new Long(date)))
date++;
_receivedIdExpirations.put(new Long(date), id);
_receivedIds.add(id);
return false;
}
}
Long id = new Long(messageId);
synchronized (_receivedIdLock) {
locked_cleanReceivedIds(_context.clock().now() - Router.CLOCK_FUDGE_FACTOR);
if (_receivedIds.contains(id)) {
return true;
} else {
long date = messageExpiration;
while (_receivedIdExpirations.containsKey(new Long(date)))
date++;
_receivedIdExpirations.put(new Long(date), id);
_receivedIds.add(id);
return false;
}
}
}
/**
* Clean the ids that we no longer need to keep track of to prevent replay
* Clean the ids that we no longer need to keep track of to prevent replay
* attacks.
*
*/
*/
private void cleanReceivedIds() {
long now = Clock.getInstance().now() - Router.CLOCK_FUDGE_FACTOR ;
synchronized (_receivedIdLock) {
locked_cleanReceivedIds(now);
}
long now = _context.clock().now() - Router.CLOCK_FUDGE_FACTOR ;
synchronized (_receivedIdLock) {
locked_cleanReceivedIds(now);
}
}
/**
* Clean the ids that we no longer need to keep track of to prevent replay
* Clean the ids that we no longer need to keep track of to prevent replay
* attacks - only call this from within a block synchronized on the received ID lock.
*
*/
private void locked_cleanReceivedIds(long now) {
Set toRemoveIds = new HashSet(4);
Set toRemoveDates = new HashSet(4);
for (Iterator iter = _receivedIdExpirations.keySet().iterator(); iter.hasNext(); ) {
Long date = (Long)iter.next();
if (date.longValue() <= now) {
// no need to keep track of things in the past
toRemoveDates.add(date);
toRemoveIds.add(_receivedIdExpirations.get(date));
} else {
// the expiration is in the future, we still need to keep track of
// it to prevent replays
break;
}
}
for (Iterator iter = toRemoveDates.iterator(); iter.hasNext(); )
_receivedIdExpirations.remove(iter.next());
for (Iterator iter = toRemoveIds.iterator(); iter.hasNext(); )
_receivedIds.remove(iter.next());
if (_log.shouldLog(Log.INFO))
_log.info("Cleaned out " + toRemoveDates.size() + " expired messageIds, leaving " + _receivedIds.size() + " remaining");
Set toRemoveIds = new HashSet(4);
Set toRemoveDates = new HashSet(4);
for (Iterator iter = _receivedIdExpirations.keySet().iterator(); iter.hasNext(); ) {
Long date = (Long)iter.next();
if (date.longValue() <= now) {
// no need to keep track of things in the past
toRemoveDates.add(date);
toRemoveIds.add(_receivedIdExpirations.get(date));
} else {
// the expiration is in the future, we still need to keep track of
// it to prevent replays
break;
}
}
for (Iterator iter = toRemoveDates.iterator(); iter.hasNext(); )
_receivedIdExpirations.remove(iter.next());
for (Iterator iter = toRemoveIds.iterator(); iter.hasNext(); )
_receivedIds.remove(iter.next());
if (_log.shouldLog(Log.INFO))
_log.info("Cleaned out " + toRemoveDates.size() + " expired messageIds, leaving " + _receivedIds.size() + " remaining");
}
}

View File

@ -23,9 +23,6 @@ import net.i2p.router.networkdb.kademlia.KademliaNetworkDatabaseFacade;
*
*/
public abstract class NetworkDatabaseFacade implements Service {
private static NetworkDatabaseFacade _instance = new KademliaNetworkDatabaseFacade(); // NetworkDatabaseFacadeImpl();
public static NetworkDatabaseFacade getInstance() { return _instance; }
/**
* Return the RouterInfo structures for the routers closest to the given key.
* At most maxNumRouters will be returned
@ -54,33 +51,35 @@ public abstract class NetworkDatabaseFacade implements Service {
class DummyNetworkDatabaseFacade extends NetworkDatabaseFacade {
private Map _routers;
private RouterContext _context;
public DummyNetworkDatabaseFacade() {
_routers = new HashMap();
public DummyNetworkDatabaseFacade(RouterContext ctx) {
_routers = new HashMap();
_context = ctx;
}
public void shutdown() {}
public void startup() {
RouterInfo info = Router.getInstance().getRouterInfo();
_routers.put(info.getIdentity().getHash(), info);
RouterInfo info = _context.router().getRouterInfo();
_routers.put(info.getIdentity().getHash(), info);
}
public void lookupLeaseSet(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs) {}
public LeaseSet lookupLeaseSetLocally(Hash key) { return null; }
public void lookupRouterInfo(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs) {
RouterInfo info = lookupRouterInfoLocally(key);
if (info == null)
JobQueue.getInstance().addJob(onFailedLookupJob);
else
JobQueue.getInstance().addJob(onFindJob);
RouterInfo info = lookupRouterInfoLocally(key);
if (info == null)
_context.jobQueue().addJob(onFailedLookupJob);
else
_context.jobQueue().addJob(onFindJob);
}
public RouterInfo lookupRouterInfoLocally(Hash key) { return (RouterInfo)_routers.get(key); }
public void publish(LeaseSet localLeaseSet) {}
public void publish(RouterInfo localRouterInfo) {}
public LeaseSet store(Hash key, LeaseSet leaseSet) { return leaseSet; }
public RouterInfo store(Hash key, RouterInfo routerInfo) {
_routers.put(key, routerInfo);
return routerInfo;
_routers.put(key, routerInfo);
return routerInfo;
}
public void unpublish(LeaseSet localLeaseSet) {}
public void fail(Hash dbEntry) {}

View File

@ -1,9 +1,9 @@
package net.i2p.router;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
@ -32,7 +32,8 @@ import net.i2p.util.Log;
*
*/
public class OutNetMessage {
private final static Log _log = new Log(OutNetMessage.class);
private Log _log;
private RouterContext _context;
private RouterInfo _target;
private I2NPMessage _message;
private long _messageSize;
@ -49,53 +50,55 @@ public class OutNetMessage {
private long _created;
/** for debugging, contains a mapping of even name to Long (e.g. "begin sending", "handleOutbound", etc) */
private HashMap _timestamps;
/**
* contains a list of timestamp event names in the order they were fired
* (some JVMs have less than 10ms resolution, so the Long above doesn't guarantee order)
/**
* contains a list of timestamp event names in the order they were fired
* (some JVMs have less than 10ms resolution, so the Long above doesn't guarantee order)
*/
private List _timestampOrder;
public OutNetMessage() {
setTarget(null);
_message = null;
_messageSize = 0;
setPriority(-1);
setExpiration(-1);
setOnSendJob(null);
setOnFailedSendJob(null);
setOnReplyJob(null);
setOnFailedReplyJob(null);
setReplySelector(null);
_timestamps = new HashMap(8);
_timestampOrder = new LinkedList();
_failedTransports = new HashSet();
_sendBegin = 0;
_createdBy = new Exception("Created by");
_created = Clock.getInstance().now();
timestamp("Created");
public OutNetMessage(RouterContext context) {
_context = context;
_log = context.logManager().getLog(OutNetMessage.class);
setTarget(null);
_message = null;
_messageSize = 0;
setPriority(-1);
setExpiration(-1);
setOnSendJob(null);
setOnFailedSendJob(null);
setOnReplyJob(null);
setOnFailedReplyJob(null);
setReplySelector(null);
_timestamps = new HashMap(8);
_timestampOrder = new LinkedList();
_failedTransports = new HashSet();
_sendBegin = 0;
_createdBy = new Exception("Created by");
_created = context.clock().now();
timestamp("Created");
}
public void timestamp(String eventName) {
synchronized (_timestamps) {
_timestamps.put(eventName, new Long(Clock.getInstance().now()));
_timestampOrder.add(eventName);
}
synchronized (_timestamps) {
_timestamps.put(eventName, new Long(_context.clock().now()));
_timestampOrder.add(eventName);
}
}
public Map getTimestamps() {
synchronized (_timestamps) {
return (Map)_timestamps.clone();
}
synchronized (_timestamps) {
return (Map)_timestamps.clone();
}
}
public Long getTimestamp(String eventName) {
synchronized (_timestamps) {
return (Long)_timestamps.get(eventName);
}
synchronized (_timestamps) {
return (Long)_timestamps.get(eventName);
}
}
public Exception getCreatedBy() { return _createdBy; }
/**
* Specifies the router to which the message should be delivered.
* Specifies the router to which the message should be delivered.
*
*/
public RouterInfo getTarget() { return _target; }
@ -105,48 +108,48 @@ public class OutNetMessage {
*
*/
public I2NPMessage getMessage() { return _message; }
public void setMessage(I2NPMessage msg) {
_message = msg;
public void setMessage(I2NPMessage msg) {
_message = msg;
}
public long getMessageSize() {
if (_messageSize <= 0) {
try {
ByteArrayOutputStream baos = new ByteArrayOutputStream(4096); // large enough to hold most messages
_message.writeBytes(baos);
long sz = baos.size();
baos.reset();
_messageSize = sz;
} catch (DataFormatException dfe) {
_log.error("Error serializing the I2NPMessage for the OutNetMessage", dfe);
} catch (IOException ioe) {
_log.error("Error serializing the I2NPMessage for the OutNetMessage", ioe);
}
}
return _messageSize;
public long getMessageSize() {
if (_messageSize <= 0) {
try {
ByteArrayOutputStream baos = new ByteArrayOutputStream(4096); // large enough to hold most messages
_message.writeBytes(baos);
long sz = baos.size();
baos.reset();
_messageSize = sz;
} catch (DataFormatException dfe) {
_log.error("Error serializing the I2NPMessage for the OutNetMessage", dfe);
} catch (IOException ioe) {
_log.error("Error serializing the I2NPMessage for the OutNetMessage", ioe);
}
}
return _messageSize;
}
public byte[] getMessageData() {
if (_message == null) {
return null;
} else {
try {
ByteArrayOutputStream baos = new ByteArrayOutputStream(4096); // large enough to hold most messages
_message.writeBytes(baos);
byte data[] = baos.toByteArray();
baos.reset();
return data;
} catch (DataFormatException dfe) {
_log.error("Error serializing the I2NPMessage for the OutNetMessage", dfe);
} catch (IOException ioe) {
_log.error("Error serializing the I2NPMessage for the OutNetMessage", ioe);
}
return null;
}
public byte[] getMessageData() {
if (_message == null) {
return null;
} else {
try {
ByteArrayOutputStream baos = new ByteArrayOutputStream(4096); // large enough to hold most messages
_message.writeBytes(baos);
byte data[] = baos.toByteArray();
baos.reset();
return data;
} catch (DataFormatException dfe) {
_log.error("Error serializing the I2NPMessage for the OutNetMessage", dfe);
} catch (IOException ioe) {
_log.error("Error serializing the I2NPMessage for the OutNetMessage", ioe);
}
return null;
}
}
/**
* Specify the priority of the message, where higher numbers are higher
* priority. Higher priority messages should be delivered before lower
* priority. Higher priority messages should be delivered before lower
* priority ones, though some algorithm may be used to avoid starvation.
*
*/
@ -154,15 +157,15 @@ public class OutNetMessage {
public void setPriority(int priority) { _priority = priority; }
/**
* Specify the # ms since the epoch after which if the message has not been
* sent the OnFailedSend job should be fired and the message should be
* removed from the pool. If the message has already been sent, this
* sent the OnFailedSend job should be fired and the message should be
* removed from the pool. If the message has already been sent, this
* expiration is ignored and the expiration from the ReplySelector is used.
*
*/
public long getExpiration() { return _expiration; }
public void setExpiration(long expiration) { _expiration = expiration; }
/**
* After the message is successfully passed to the router specified, the
* After the message is successfully passed to the router specified, the
* given job is enqueued.
*
*/
@ -199,74 +202,74 @@ public class OutNetMessage {
/** when did the sending process begin */
public long getSendBegin() { return _sendBegin; }
public void beginSend() { _sendBegin = Clock.getInstance().now(); }
public void beginSend() { _sendBegin = _context.clock().now(); }
public long getCreated() { return _created; }
public long getLifetime() { return Clock.getInstance().now() - _created; }
public long getLifetime() { return _context.clock().now() - _created; }
public String toString() {
StringBuffer buf = new StringBuffer(128);
buf.append("[OutNetMessage contains ");
if (_message == null) {
buf.append("*no message*");
} else {
buf.append("a ").append(_messageSize).append(" byte ");
buf.append(_message.getClass().getName());
}
buf.append(" expiring on ").append(new Date(_expiration));
buf.append(" failed delivery on transports ").append(_failedTransports);
if (_target == null)
buf.append(" targetting no one in particular...");
else
buf.append(" targetting ").append(_target.getIdentity().getHash().toBase64());
if (_onReply != null)
buf.append(" with onReply job: ").append(_onReply);
if (_onSend != null)
buf.append(" with onSend job: ").append(_onSend);
if (_onFailedReply != null)
buf.append(" with onFailedReply job: ").append(_onFailedReply);
if (_onFailedSend != null)
buf.append(" with onFailedSend job: ").append(_onFailedSend);
buf.append(" {timestamps: \n");
synchronized (_timestamps) {
long lastWhen = -1;
for (int i = 0; i < _timestampOrder.size(); i++) {
String name = (String)_timestampOrder.get(i);
Long when = (Long)_timestamps.get(name);
buf.append("\t[");
long diff = when.longValue() - lastWhen;
if ( (lastWhen > 0) && (diff > 500) )
buf.append("**");
if (lastWhen > 0)
buf.append(diff);
else
buf.append(0);
buf.append("ms: \t").append(name).append('=').append(formatDate(when.longValue())).append("]\n");
lastWhen = when.longValue();
}
}
buf.append("}");
buf.append("]");
return buf.toString();
StringBuffer buf = new StringBuffer(128);
buf.append("[OutNetMessage contains ");
if (_message == null) {
buf.append("*no message*");
} else {
buf.append("a ").append(_messageSize).append(" byte ");
buf.append(_message.getClass().getName());
}
buf.append(" expiring on ").append(new Date(_expiration));
buf.append(" failed delivery on transports ").append(_failedTransports);
if (_target == null)
buf.append(" targetting no one in particular...");
else
buf.append(" targetting ").append(_target.getIdentity().getHash().toBase64());
if (_onReply != null)
buf.append(" with onReply job: ").append(_onReply);
if (_onSend != null)
buf.append(" with onSend job: ").append(_onSend);
if (_onFailedReply != null)
buf.append(" with onFailedReply job: ").append(_onFailedReply);
if (_onFailedSend != null)
buf.append(" with onFailedSend job: ").append(_onFailedSend);
buf.append(" {timestamps: \n");
synchronized (_timestamps) {
long lastWhen = -1;
for (int i = 0; i < _timestampOrder.size(); i++) {
String name = (String)_timestampOrder.get(i);
Long when = (Long)_timestamps.get(name);
buf.append("\t[");
long diff = when.longValue() - lastWhen;
if ( (lastWhen > 0) && (diff > 500) )
buf.append("**");
if (lastWhen > 0)
buf.append(diff);
else
buf.append(0);
buf.append("ms: \t").append(name).append('=').append(formatDate(when.longValue())).append("]\n");
lastWhen = when.longValue();
}
}
buf.append("}");
buf.append("]");
return buf.toString();
}
private final static SimpleDateFormat _fmt = new SimpleDateFormat("HH:mm:ss.SSS");
private final static String formatDate(long when) {
Date d = new Date(when);
synchronized (_fmt) {
return _fmt.format(d);
}
Date d = new Date(when);
synchronized (_fmt) {
return _fmt.format(d);
}
}
public int hashCode() {
int rv = 0;
rv += DataHelper.hashCode(_message);
rv += DataHelper.hashCode(_target);
// the others are pretty much inconsequential
return rv;
int rv = 0;
rv += DataHelper.hashCode(_message);
rv += DataHelper.hashCode(_target);
// the others are pretty much inconsequential
return rv;
}
public boolean equals(Object obj) {
return obj == this; // two OutNetMessages are different even if they contain the same message
return obj == this; // two OutNetMessages are different even if they contain the same message
}
}

View File

@ -26,13 +26,12 @@ import net.i2p.util.Log;
*
*/
public class OutNetMessagePool {
private final static Log _log = new Log(OutNetMessagePool.class);
private static OutNetMessagePool _instance = new OutNetMessagePool();
public static OutNetMessagePool getInstance() { return _instance; }
private TreeMap _messageLists; // priority --> List of OutNetMessage objects, where HIGHEST priority first
private Log _log;
private RouterContext _context;
private OutNetMessagePool() {
_messageLists = new TreeMap(new ReverseIntegerComparator());
public OutNetMessagePool(RouterContext context) {
_context = context;
_log = _context.logManager().getLog(OutNetMessagePool.class);
}
/**
@ -40,19 +39,7 @@ public class OutNetMessagePool {
*
*/
public OutNetMessage getNext() {
synchronized (_messageLists) {
if (_messageLists.size() <= 0) return null;
for (Iterator iter = _messageLists.keySet().iterator(); iter.hasNext(); ) {
Integer priority = (Integer)iter.next();
List messages = (List)_messageLists.get(priority);
if (messages.size() > 0) {
_log.debug("Found a message of priority " + priority);
return (OutNetMessage)messages.remove(0);
}
}
// no messages of any priority
return null;
}
return null;
}
/**
@ -60,45 +47,35 @@ public class OutNetMessagePool {
*
*/
public void add(OutNetMessage msg) {
boolean valid = validate(msg);
if (!valid) return;
if (true) { // skip the pool
MessageSelector selector = msg.getReplySelector();
if (selector != null) {
OutboundMessageRegistry.getInstance().registerPending(msg);
}
CommSystemFacade.getInstance().processMessage(msg);
return;
}
synchronized (_messageLists) {
Integer pri = new Integer(msg.getPriority());
if ( (_messageLists.size() <= 0) || (!_messageLists.containsKey(pri)) )
_messageLists.put(new Integer(msg.getPriority()), new ArrayList(32));
List messages = (List)_messageLists.get(pri);
messages.add(msg);
}
boolean valid = validate(msg);
if (!valid) return;
MessageSelector selector = msg.getReplySelector();
if (selector != null) {
_context.messageRegistry().registerPending(msg);
}
_context.commSystem().processMessage(msg);
return;
}
private boolean validate(OutNetMessage msg) {
if (msg == null) return false;
if (msg.getMessage() == null) {
_log.error("Null message in the OutNetMessage: " + msg, new Exception("Someone fucked up"));
return false;
}
if (msg.getTarget() == null) {
_log.error("No target in the OutNetMessage: " + msg, new Exception("Definitely a fuckup"));
return false;
}
if (msg.getPriority() < 0) {
_log.warn("Priority less than 0? sounds like nonsense to me... " + msg, new Exception("Negative priority"));
return false;
}
if (msg.getExpiration() <= Clock.getInstance().now()) {
_log.error("Already expired! wtf: " + msg, new Exception("Expired message"));
return false;
}
return true;
if (msg == null) return false;
if (msg.getMessage() == null) {
_log.error("Null message in the OutNetMessage: " + msg, new Exception("Someone fucked up"));
return false;
}
if (msg.getTarget() == null) {
_log.error("No target in the OutNetMessage: " + msg, new Exception("Definitely a fuckup"));
return false;
}
if (msg.getPriority() < 0) {
_log.warn("Priority less than 0? sounds like nonsense to me... " + msg, new Exception("Negative priority"));
return false;
}
if (msg.getExpiration() <= _context.clock().now()) {
_log.error("Already expired! wtf: " + msg, new Exception("Expired message"));
return false;
}
return true;
}
/**
@ -106,43 +83,14 @@ public class OutNetMessagePool {
*
*/
public void clearExpired() {
long now = Clock.getInstance().now();
List jobsToEnqueue = new ArrayList();
synchronized (_messageLists) {
for (Iterator iter = _messageLists.values().iterator(); iter.hasNext();) {
List toRemove = new ArrayList();
List messages = (List)iter.next();
for (Iterator msgIter = messages.iterator(); msgIter.hasNext(); ) {
OutNetMessage msg = (OutNetMessage)msgIter.next();
if (msg.getExpiration() <= now) {
_log.warn("Outbound network message expired: " + msg);
toRemove.add(msg);
jobsToEnqueue.add(msg.getOnFailedSendJob());
}
}
messages.removeAll(toRemove);
}
}
for (int i = 0; i < jobsToEnqueue.size(); i++) {
Job j = (Job)jobsToEnqueue.get(i);
JobQueue.getInstance().addJob(j);
}
// noop
}
/**
* Retrieve the number of messages, regardless of priority.
*
*/
public int getCount() {
int size = 0;
synchronized (_messageLists) {
for (Iterator iter = _messageLists.values().iterator(); iter.hasNext(); ) {
List lst = (List)iter.next();
size += lst.size();
}
}
return size;
}
public int getCount() { return 0; }
/**
* Retrieve the number of messages at the given priority. This can be used for
@ -150,45 +98,17 @@ public class OutNetMessagePool {
* where all of these 'spare' messages are of the same priority.
*
*/
public int getCount(int priority) {
synchronized (_messageLists) {
Integer pri = new Integer(priority);
List messages = (List)_messageLists.get(pri);
if (messages == null)
return 0;
else
return messages.size();
}
}
public int getCount(int priority) { return 0; }
public void dumpPoolInfo() {
StringBuffer buf = new StringBuffer();
buf.append("\nDumping Outbound Network Message Pool. Total # message: ").append(getCount()).append("\n");
synchronized (_messageLists) {
for (Iterator iter = _messageLists.keySet().iterator(); iter.hasNext();) {
Integer pri = (Integer)iter.next();
List messages = (List)_messageLists.get(pri);
if (messages.size() > 0) {
buf.append("Messages of priority ").append(pri).append(": ").append(messages.size()).append("\n");
buf.append("---------------------------\n");
for (Iterator msgIter = messages.iterator(); msgIter.hasNext(); ) {
OutNetMessage msg = (OutNetMessage)msgIter.next();
buf.append("Message ").append(msg.getMessage()).append("\n\n");
}
buf.append("---------------------------\n");
}
}
}
_log.debug(buf.toString());
}
public void dumpPoolInfo() { return; }
private static class ReverseIntegerComparator implements Comparator {
public int compare(Object lhs, Object rhs) {
if ( (lhs == null) || (rhs == null) ) return 0; // invalid, but never used
if ( !(lhs instanceof Integer) || !(rhs instanceof Integer)) return 0;
Integer lv = (Integer)lhs;
Integer rv = (Integer)rhs;
return - (lv.compareTo(rv));
}
public int compare(Object lhs, Object rhs) {
if ( (lhs == null) || (rhs == null) ) return 0; // invalid, but never used
if ( !(lhs instanceof Integer) || !(rhs instanceof Integer)) return 0;
Integer lv = (Integer)lhs;
Integer rv = (Integer)rhs;
return - (lv.compareTo(rv));
}
}
}

View File

@ -18,23 +18,19 @@ import net.i2p.router.peermanager.PeerManagerFacadeImpl;
* includes periodically queueing up outbound messages to the peers to test them.
*
*/
public abstract class PeerManagerFacade implements Service {
private static PeerManagerFacade _instance = new PeerManagerFacadeImpl();
public static PeerManagerFacade getInstance() { return _instance; }
public interface PeerManagerFacade extends Service {
/**
* Select peers from the manager's existing routing tables according to
* the specified criteria. This call DOES block.
*
* @return List of Hash objects of the RouterIdentity for matching peers
*/
public abstract List selectPeers(PeerSelectionCriteria criteria);
public String renderStatusHTML() { return ""; }
public List selectPeers(PeerSelectionCriteria criteria);
}
class DummyPeerManagerFacade extends PeerManagerFacade {
class DummyPeerManagerFacade implements PeerManagerFacade {
public void shutdown() {}
public void startup() {}
public String renderStatusHTML() { return ""; }
public List selectPeers(PeerSelectionCriteria criteria) { return null; }
}

View File

@ -11,71 +11,64 @@ package net.i2p.router;
import java.util.Properties;
import net.i2p.data.Hash;
import net.i2p.router.peermanager.ProfileManagerImpl;
public abstract class ProfileManager {
private final static ProfileManager _instance = new ProfileManagerImpl();
public static ProfileManager getInstance() { return _instance; }
/** is this peer failing or already dropped? */
public abstract boolean isFailing(Hash peer);
public interface ProfileManager {
/**
* Note that it took msToSend to send a message of size bytesSent to the peer over the transport.
* This should only be called if the transport considered the send successful.
*
*/
public abstract void messageSent(Hash peer, String transport, long msToSend, long bytesSent);
void messageSent(Hash peer, String transport, long msToSend, long bytesSent);
/**
* Note that the router failed to send a message to the peer over the transport specified
*
*/
public abstract void messageFailed(Hash peer, String transport);
void messageFailed(Hash peer, String transport);
/**
* Note that the router failed to send a message to the peer over any transport
*
*/
public abstract void messageFailed(Hash peer);
void messageFailed(Hash peer);
/**
* Note that there was some sort of communication error talking with the peer
*
*/
public abstract void commErrorOccurred(Hash peer);
void commErrorOccurred(Hash peer);
/**
* Note that the router agreed to participate in a tunnel
*
*/
public abstract void tunnelJoined(Hash peer, long responseTimeMs);
void tunnelJoined(Hash peer, long responseTimeMs);
/**
* Note that a router explicitly rejected joining a tunnel
*
*/
public abstract void tunnelRejected(Hash peer, long responseTimeMs);
void tunnelRejected(Hash peer, long responseTimeMs);
/**
* Note that the peer participated in a tunnel that failed. Its failure may not have
* been the peer's fault however.
*
*/
public abstract void tunnelFailed(Hash peer);
void tunnelFailed(Hash peer);
/**
* Note that the peer was able to return the valid data for a db lookup
*
*/
public abstract void dbLookupSuccessful(Hash peer, long responseTimeMs);
void dbLookupSuccessful(Hash peer, long responseTimeMs);
/**
* Note that the peer was unable to reply to a db lookup - either with data or with
* a lookupReply redirecting the user elsewhere
*
*/
public abstract void dbLookupFailed(Hash peer);
void dbLookupFailed(Hash peer);
/**
* Note that the peer replied to a db lookup with a redirect to other routers, where
@ -85,39 +78,39 @@ public abstract class ProfileManager {
* asked them not to send us, but they did anyway
*
*/
public abstract void dbLookupReply(Hash peer, int newPeers, int oldPeers, int invalid, int duplicate, long responseTimeMs);
void dbLookupReply(Hash peer, int newPeers, int oldPeers, int invalid, int duplicate, long responseTimeMs);
/**
* Note that the local router received a db lookup from the given peer
*
*/
public abstract void dbLookupReceived(Hash peer);
void dbLookupReceived(Hash peer);
/**
* Note that the local router received an unprompted db store from the given peer
*
*/
public abstract void dbStoreReceived(Hash peer, boolean wasNewKey);
void dbStoreReceived(Hash peer, boolean wasNewKey);
/**
* Note that we've confirmed a successful send of db data to the peer (though we haven't
* necessarily requested it again from them, so they /might/ be lying)
*
*/
public abstract void dbStoreSent(Hash peer, long responseTimeMs);
void dbStoreSent(Hash peer, long responseTimeMs);
/**
* Note that we were unable to confirm a successful send of db data to
* the peer, at least not within our timeout period
*
*/
public abstract void dbStoreFailed(Hash peer);
void dbStoreFailed(Hash peer);
/**
* Note that the local router received a reference to the given peer, either
* through an explicit dbStore or in a dbLookupReply
*/
public abstract void heardAbout(Hash peer);
void heardAbout(Hash peer);
/**
* Note that the router received a message from the given peer on the specified
@ -126,8 +119,8 @@ public abstract class ProfileManager {
* available
*
*/
public abstract void messageReceived(Hash peer, String style, long msToReceive, int bytesRead);
void messageReceived(Hash peer, String style, long msToReceive, int bytesRead);
/** provide a simple summary of a number of peers, suitable for publication in the netDb */
public abstract Properties summarizePeers(int numPeers);
Properties summarizePeers(int numPeers);
}

View File

@ -54,14 +54,14 @@ import net.i2p.util.RandomSource;
*
*/
public class Router {
private final static Log _log = new Log(Router.class);
private final static Router _instance = new Router();
public static Router getInstance() { return _instance; }
private Log _log;
private RouterContext _context;
private Properties _config;
private String _configFilename;
private RouterInfo _routerInfo;
private long _started;
private boolean _higherVersionSeen;
private SessionKeyPersistenceHelper _sessionKeyPersistenceHelper;
public final static String PROP_CONFIG_FILE = "router.configLocation";
@ -73,16 +73,19 @@ public class Router {
public final static String PROP_KEYS_FILENAME = "router.keys.location";
public final static String PROP_KEYS_FILENAME_DEFAULT = "router.keys";
private Router() {
_config = new Properties();
_configFilename = System.getProperty(PROP_CONFIG_FILE, "router.config");
_routerInfo = null;
_higherVersionSeen = false;
public Router() {
// grumble about sun's java caching DNS entries *forever*
System.setProperty("sun.net.inetaddr.ttl", "0");
System.setProperty("networkaddress.cache.ttl", "0");
// (no need for keepalive)
System.setProperty("http.keepAlive", "false");
_config = new Properties();
_context = new RouterContext(this);
_configFilename = _context.getProperty(PROP_CONFIG_FILE, "router.config");
_routerInfo = null;
_higherVersionSeen = false;
_log = _context.logManager().getLog(Router.class);
_sessionKeyPersistenceHelper = new SessionKeyPersistenceHelper(_context);
}
public String getConfigFilename() { return _configFilename; }
@ -97,7 +100,7 @@ public class Router {
public void setRouterInfo(RouterInfo info) {
_routerInfo = info;
if (info != null)
JobQueue.getInstance().addJob(new PersistRouterInfoJob());
_context.jobQueue().addJob(new PersistRouterInfoJob());
}
/**
@ -110,10 +113,10 @@ public class Router {
public long getWhenStarted() { return _started; }
/** wall clock uptime */
public long getUptime() { return Clock.getInstance().now() - Clock.getInstance().getOffset() - _started; }
public long getUptime() { return _context.clock().now() - _context.clock().getOffset() - _started; }
private void runRouter() {
_started = Clock.getInstance().now();
_started = _context.clock().now();
Runtime.getRuntime().addShutdownHook(new ShutdownHook());
I2PThread.setOOMEventListener(new I2PThread.OOMEventListener() {
public void outOfMemory(OutOfMemoryError oom) {
@ -123,21 +126,22 @@ public class Router {
});
setupHandlers();
startupQueue();
JobQueue.getInstance().addJob(new CoallesceStatsJob());
JobQueue.getInstance().addJob(new UpdateRoutingKeyModifierJob());
_context.jobQueue().addJob(new CoallesceStatsJob());
_context.jobQueue().addJob(new UpdateRoutingKeyModifierJob());
warmupCrypto();
SessionKeyPersistenceHelper.getInstance().startup();
JobQueue.getInstance().addJob(new StartupJob());
_sessionKeyPersistenceHelper.startup();
_context.jobQueue().addJob(new StartupJob(_context));
}
/**
* coallesce the stats framework every minute
*
*/
private final static class CoallesceStatsJob extends JobImpl {
private final class CoallesceStatsJob extends JobImpl {
public CoallesceStatsJob() { super(Router.this._context); }
public String getName() { return "Coallesce stats"; }
public void runJob() {
StatManager.getInstance().coallesceStats();
Router.this._context.statManager().coallesceStats();
requeue(60*1000);
}
}
@ -147,15 +151,16 @@ public class Router {
* This is done here because we want to make sure the key is updated before anyone
* uses it.
*/
private final static class UpdateRoutingKeyModifierJob extends JobImpl {
private final class UpdateRoutingKeyModifierJob extends JobImpl {
private Calendar _cal = new GregorianCalendar(TimeZone.getTimeZone("GMT"));
public UpdateRoutingKeyModifierJob() { super(Router.this._context); }
public String getName() { return "Update Routing Key Modifier"; }
public void runJob() {
RoutingKeyGenerator.getInstance().generateDateBasedModData();
Router.this._context.routingKeyGenerator().generateDateBasedModData();
requeue(getTimeTillMidnight());
}
private long getTimeTillMidnight() {
long now = Clock.getInstance().now();
long now = Router.this._context.clock().now();
_cal.setTime(new Date(now));
_cal.add(Calendar.DATE, 1);
_cal.set(Calendar.HOUR_OF_DAY, 0);
@ -175,18 +180,18 @@ public class Router {
}
private void warmupCrypto() {
RandomSource.getInstance().nextBoolean();
_context.random().nextBoolean();
new DHSessionKeyBuilder(); // load the class so it starts the precalc process
}
private void startupQueue() {
JobQueue.getInstance().runQueue(1);
_context.jobQueue().runQueue(1);
}
private void setupHandlers() {
InNetMessagePool.getInstance().registerHandlerJobBuilder(GarlicMessage.MESSAGE_TYPE, new GarlicMessageHandler());
InNetMessagePool.getInstance().registerHandlerJobBuilder(TunnelMessage.MESSAGE_TYPE, new TunnelMessageHandler());
InNetMessagePool.getInstance().registerHandlerJobBuilder(SourceRouteReplyMessage.MESSAGE_TYPE, new SourceRouteReplyMessageHandler());
_context.inNetMessagePool().registerHandlerJobBuilder(GarlicMessage.MESSAGE_TYPE, new GarlicMessageHandler(_context));
_context.inNetMessagePool().registerHandlerJobBuilder(TunnelMessage.MESSAGE_TYPE, new TunnelMessageHandler(_context));
_context.inNetMessagePool().registerHandlerJobBuilder(SourceRouteReplyMessage.MESSAGE_TYPE, new SourceRouteReplyMessageHandler(_context));
}
public String renderStatusHTML() {
@ -214,9 +219,9 @@ public class Router {
if ( (_routerInfo != null) && (_routerInfo.getIdentity() != null) )
buf.append("<b>Router: </b> ").append(_routerInfo.getIdentity().getHash().toBase64()).append("<br />\n");
buf.append("<b>As of: </b> ").append(new Date(Clock.getInstance().now())).append(" (uptime: ").append(DataHelper.formatDuration(getUptime())).append(") <br />\n");
buf.append("<b>As of: </b> ").append(new Date(_context.clock().now())).append(" (uptime: ").append(DataHelper.formatDuration(getUptime())).append(") <br />\n");
buf.append("<b>Started on: </b> ").append(new Date(getWhenStarted())).append("<br />\n");
buf.append("<b>Clock offset: </b> ").append(Clock.getInstance().getOffset()).append("ms (OS time: ").append(new Date(Clock.getInstance().now() - Clock.getInstance().getOffset())).append(")<br />\n");
buf.append("<b>Clock offset: </b> ").append(_context.clock().getOffset()).append("ms (OS time: ").append(new Date(_context.clock().now() - _context.clock().getOffset())).append(")<br />\n");
long tot = Runtime.getRuntime().totalMemory()/1024;
long free = Runtime.getRuntime().freeMemory()/1024;
buf.append("<b>Memory:</b> In use: ").append((tot-free)).append("KB Free: ").append(free).append("KB <br />\n");
@ -225,8 +230,8 @@ public class Router {
buf.append("<b><font color=\"red\">HIGHER VERSION SEEN</font><b> - please <a href=\"http://i2p.dnsalias.net/\">check</a> to see if there is a new release out<br />\n");
buf.append("<hr /><a name=\"bandwidth\"> </a><h2>Bandwidth</h2>\n");
long sent = BandwidthLimiter.getInstance().getTotalSendBytes();
long received = BandwidthLimiter.getInstance().getTotalReceiveBytes();
long sent = _context.bandwidthLimiter().getTotalSendBytes();
long received = _context.bandwidthLimiter().getTotalReceiveBytes();
buf.append("<ul>");
buf.append("<li> ").append(sent).append(" bytes sent, ");
@ -235,7 +240,7 @@ public class Router {
DecimalFormat fmt = new DecimalFormat("##0.00");
// we use the unadjusted time, since thats what getWhenStarted is based off
long lifetime = Clock.getInstance().now()-Clock.getInstance().getOffset() - getWhenStarted();
long lifetime = _context.clock().now()-_context.clock().getOffset() - getWhenStarted();
lifetime /= 1000;
if ( (sent > 0) && (received > 0) ) {
double sendKBps = sent / (lifetime*1024.0);
@ -246,7 +251,7 @@ public class Router {
buf.append("</li>");
}
RateStat sendRate = StatManager.getInstance().getRate("transport.sendMessageSize");
RateStat sendRate = _context.statManager().getRate("transport.sendMessageSize");
for (int i = 0; i < sendRate.getPeriods().length; i++) {
Rate rate = sendRate.getRate(sendRate.getPeriods()[i]);
double bytes = rate.getLastTotalValue() + rate.getCurrentTotalValue();
@ -280,7 +285,7 @@ public class Router {
buf.append("</li>");
}
RateStat receiveRate = StatManager.getInstance().getRate("transport.receiveMessageSize");
RateStat receiveRate = _context.statManager().getRate("transport.receiveMessageSize");
for (int i = 0; i < receiveRate.getPeriods().length; i++) {
Rate rate = receiveRate.getRate(receiveRate.getPeriods()[i]);
double bytes = rate.getLastTotalValue() + rate.getCurrentTotalValue();
@ -321,23 +326,23 @@ public class Router {
buf.append("\n");
buf.append("<hr /><a name=\"clients\"> </a>\n");
buf.append(ClientManagerFacade.getInstance().renderStatusHTML());
buf.append(_context.clientManager().renderStatusHTML());
buf.append("\n<hr /><a name=\"transports\"> </a>\n");
buf.append(CommSystemFacade.getInstance().renderStatusHTML());
buf.append(_context.commSystem().renderStatusHTML());
buf.append("\n<hr /><a name=\"profiles\"> </a>\n");
buf.append(PeerManagerFacade.getInstance().renderStatusHTML());
buf.append(_context.peerManager().renderStatusHTML());
buf.append("\n<hr /><a name=\"tunnels\"> </a>\n");
buf.append(TunnelManagerFacade.getInstance().renderStatusHTML());
buf.append(_context.tunnelManager().renderStatusHTML());
buf.append("\n<hr /><a name=\"jobs\"> </a>\n");
buf.append(JobQueue.getInstance().renderStatusHTML());
buf.append(_context.jobQueue().renderStatusHTML());
buf.append("\n<hr /><a name=\"shitlist\"> </a>\n");
buf.append(Shitlist.getInstance().renderStatusHTML());
buf.append(_context.shitlist().renderStatusHTML());
buf.append("\n<hr /><a name=\"pending\"> </a>\n");
buf.append(OutboundMessageRegistry.getInstance().renderStatusHTML());
buf.append(_context.messageRegistry().renderStatusHTML());
buf.append("\n<hr /><a name=\"netdb\"> </a>\n");
buf.append(NetworkDatabaseFacade.getInstance().renderStatusHTML());
buf.append(_context.netDb().renderStatusHTML());
buf.append("\n<hr /><a name=\"logs\"> </a>\n");
List msgs = LogConsoleBuffer.getInstance().getMostRecentMessages();
List msgs = _context.logManager().getBuffer().getMostRecentMessages();
buf.append("\n<h2>Most recent console messages:</h2><table border=\"1\">\n");
for (Iterator iter = msgs.iterator(); iter.hasNext(); ) {
String msg = (String)iter.next();
@ -350,27 +355,28 @@ public class Router {
}
public void shutdown() {
try { JobQueue.getInstance().shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the job queue", t); }
try { StatisticsManager.getInstance().shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the stats manager", t); }
try { ClientManagerFacade.getInstance().shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the client manager", t); }
try { TunnelManagerFacade.getInstance().shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the tunnel manager", t); }
try { NetworkDatabaseFacade.getInstance().shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the networkDb", t); }
try { CommSystemFacade.getInstance().shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the comm system", t); }
try { PeerManagerFacade.getInstance().shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the peer manager", t); }
try { SessionKeyPersistenceHelper.getInstance().shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the session key manager", t); }
try { _context.jobQueue().shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the job queue", t); }
try { _context.statPublisher().shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the stats manager", t); }
try { _context.clientManager().shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the client manager", t); }
try { _context.tunnelManager().shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the tunnel manager", t); }
try { _context.netDb().shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the networkDb", t); }
try { _context.commSystem().shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the comm system", t); }
try { _context.peerManager().shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the peer manager", t); }
try { _sessionKeyPersistenceHelper.shutdown(); } catch (Throwable t) { _log.log(Log.CRIT, "Error shutting down the session key manager", t); }
dumpStats();
_log.log(Log.CRIT, "Shutdown complete", new Exception("Shutdown"));
try { LogManager.getInstance().shutdown(); } catch (Throwable t) { }
try { _context.logManager().shutdown(); } catch (Throwable t) { }
try { Thread.sleep(1000); } catch (InterruptedException ie) {}
Runtime.getRuntime().halt(-1);
}
private void dumpStats() {
_log.log(Log.CRIT, "Lifetime stats:\n\n" + StatsGenerator.generateStatsPage());
//_log.log(Log.CRIT, "Lifetime stats:\n\n" + StatsGenerator.generateStatsPage());
}
public static void main(String args[]) {
Router.getInstance().runRouter();
Router r = new Router();
r.runRouter();
}
private class ShutdownHook extends Thread {
@ -381,17 +387,18 @@ public class Router {
}
/** update the router.info file whenever its, er, updated */
private static class PersistRouterInfoJob extends JobImpl {
private class PersistRouterInfoJob extends JobImpl {
public PersistRouterInfoJob() { super(Router.this._context); }
public String getName() { return "Persist Updated Router Information"; }
public void runJob() {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Persisting updated router info");
String infoFilename = Router.getInstance().getConfigSetting(PROP_INFO_FILENAME);
String infoFilename = getConfigSetting(PROP_INFO_FILENAME);
if (infoFilename == null)
infoFilename = PROP_INFO_FILENAME_DEFAULT;
RouterInfo info = Router.getInstance().getRouterInfo();
RouterInfo info = getRouterInfo();
FileOutputStream fos = null;
try {

View File

@ -0,0 +1,465 @@
package net.i2p.router;
import net.i2p.data.Hash;
import net.i2p.router.client.ClientManagerFacadeImpl;
import net.i2p.router.transport.OutboundMessageRegistry;
import net.i2p.router.networkdb.kademlia.KademliaNetworkDatabaseFacade;
import net.i2p.router.transport.CommSystemFacadeImpl;
import net.i2p.router.transport.BandwidthLimiter;
import net.i2p.router.transport.TrivialBandwidthLimiter;
import net.i2p.router.tunnelmanager.PoolingTunnelManagerFacade;
import net.i2p.router.peermanager.ProfileOrganizer;
import net.i2p.router.peermanager.PeerManagerFacadeImpl;
import net.i2p.router.peermanager.ProfileManagerImpl;
import net.i2p.router.peermanager.Calculator;
import net.i2p.router.peermanager.IsFailingCalculator;
import net.i2p.router.peermanager.ReliabilityCalculator;
import net.i2p.router.peermanager.SpeedCalculator;
import net.i2p.router.peermanager.IntegrationCalculator;
import net.i2p.I2PAppContext;
/**
* Build off the core I2P context to provide a root for a router instance to
* coordinate its resources. Router instances themselves should be sure to have
* their own RouterContext, and rooting off of it will allow multiple routers to
* operate in the same JVM without conflict (e.g. sessionTags wont get
* intermingled, nor will their netDbs, jobQueues, or bandwidth limiters).
*
*/
public class RouterContext extends I2PAppContext {
private Router _router;
private ClientManagerFacade _clientManagerFacade;
private ClientMessagePool _clientMessagePool;
private JobQueue _jobQueue;
private InNetMessagePool _inNetMessagePool;
private OutNetMessagePool _outNetMessagePool;
private MessageHistory _messageHistory;
private OutboundMessageRegistry _messageRegistry;
private NetworkDatabaseFacade _netDb;
private KeyManager _keyManager;
private CommSystemFacade _commSystem;
private ProfileOrganizer _profileOrganizer;
private PeerManagerFacade _peerManagerFacade;
private ProfileManager _profileManager;
private BandwidthLimiter _bandwidthLimiter;
private TunnelManagerFacade _tunnelManager;
private StatisticsManager _statPublisher;
private Shitlist _shitlist;
private MessageValidator _messageValidator;
private Calculator _isFailingCalc;
private Calculator _integrationCalc;
private Calculator _speedCalc;
private Calculator _reliabilityCalc;
public RouterContext(Router router) {
super();
_router = router;
initAll();
}
private void initAll() {
_clientManagerFacade = new ClientManagerFacadeImpl(this);
_clientMessagePool = new ClientMessagePool(this);
_jobQueue = new JobQueue(this);
_inNetMessagePool = new InNetMessagePool(this);
_outNetMessagePool = new OutNetMessagePool(this);
_messageHistory = new MessageHistory(this);
_messageRegistry = new OutboundMessageRegistry(this);
_netDb = new KademliaNetworkDatabaseFacade(this);
_keyManager = new KeyManager(this);
_commSystem = new CommSystemFacadeImpl(this);
_profileOrganizer = new ProfileOrganizer(this);
_peerManagerFacade = new PeerManagerFacadeImpl(this);
_profileManager = new ProfileManagerImpl(this);
_bandwidthLimiter = new TrivialBandwidthLimiter(this);
_tunnelManager = new PoolingTunnelManagerFacade(this);
_statPublisher = new StatisticsManager(this);
_shitlist = new Shitlist(this);
_messageValidator = new MessageValidator(this);
_isFailingCalc = new IsFailingCalculator(this);
_integrationCalc = new IntegrationCalculator(this);
_speedCalc = new SpeedCalculator(this);
_reliabilityCalc = new ReliabilityCalculator(this);
}
/** what router is this context working for? */
public Router router() { return _router; }
/** convenience method for querying the router's ident */
public Hash routerHash() { return _router.getRouterInfo().getIdentity().getHash(); }
/**
* How are we coordinating clients for the router?
*/
public ClientManagerFacade clientManager() { return _clientManagerFacade; }
/**
* Where do we toss messages for the clients (and where do we get client messages
* to forward on from)?
*/
public ClientMessagePool clientMessagePool() { return _clientMessagePool; }
/**
* Where do we get network messages from (aka where does the comm system dump what
* it reads)?
*/
public InNetMessagePool inNetMessagePool() { return _inNetMessagePool; }
/**
* Where do we put messages that the router wants to forwards onto the network?
*/
public OutNetMessagePool outNetMessagePool() { return _outNetMessagePool; }
/**
* Tracker component for monitoring what messages are wrapped in what containers
* and how they proceed through the network. This is fully for debugging, as when
* a large portion of the network tracks their messages through this messageHistory
* and submits their logs, we can correlate them and watch as messages flow from
* hop to hop.
*/
public MessageHistory messageHistory() { return _messageHistory; }
/**
* The registry is used by outbound messages to wait for replies.
*/
public OutboundMessageRegistry messageRegistry() { return _messageRegistry; }
/**
* Our db cache
*/
public NetworkDatabaseFacade netDb() { return _netDb; }
/**
* The actual driver of the router, where all jobs are enqueued and processed.
*/
public JobQueue jobQueue() { return _jobQueue; }
/**
* Coordinates the router's ElGamal and DSA keys, as well as any keys given
* to it by clients as part of a LeaseSet.
*/
public KeyManager keyManager() { return _keyManager; }
/**
* How do we pass messages from our outNetMessagePool to another router
*/
public CommSystemFacade commSystem() { return _commSystem; }
/**
* Organize the peers we know about into various tiers, profiling their
* performance and sorting them accordingly.
*/
public ProfileOrganizer profileOrganizer() { return _profileOrganizer; }
/**
* Minimal interface for selecting peers for various tasks based on given
* criteria. This is kept seperate from the profile organizer since this
* logic is independent of how the peers are organized (or profiled even).
*/
public PeerManagerFacade peerManager() { return _peerManagerFacade; }
/**
* Expose a simple API for various router components to take note of
* particular events that a peer enacts (sends us a message, agrees to
* participate in a tunnel, etc).
*/
public ProfileManager profileManager() { return _profileManager; }
/**
* Coordinate this router's bandwidth limits
*/
public BandwidthLimiter bandwidthLimiter() { return _bandwidthLimiter; }
/**
* Coordinate this router's tunnels (its pools, participation, backup, etc).
* Any configuration for the tunnels is rooted from the context's properties
*/
public TunnelManagerFacade tunnelManager() { return _tunnelManager; }
/**
* If the router is configured to, gather up some particularly tasty morsels
* regarding the stats managed and offer to publish them into the routerInfo.
*/
public StatisticsManager statPublisher() { return _statPublisher; }
/**
* who does this peer hate?
*/
public Shitlist shitlist() { return _shitlist; }
/**
* The router keeps track of messages it receives to prevent duplicates, as
* well as other criteria for "validity".
*/
public MessageValidator messageValidator() { return _messageValidator; }
/** how do we rank the failure of profiles? */
public Calculator isFailingCalculator() { return _isFailingCalc; }
/** how do we rank the integration of profiles? */
public Calculator integrationCalculator() { return _integrationCalc; }
/** how do we rank the speed of profiles? */
public Calculator speedCalculator() { return _speedCalc; }
/** how do we rank the reliability of profiles? */
public Calculator reliabilityCalculator() { return _reliabilityCalc; }
}
/*
public class RouterContext extends I2PAppContext {
private Router _router;
private ClientManagerFacade _clientManagerFacade;
private ClientMessagePool _clientMessagePool;
private JobQueue _jobQueue;
private InNetMessagePool _inNetMessagePool;
private OutNetMessagePool _outNetMessagePool;
private MessageHistory _messageHistory;
private OutboundMessageRegistry _messageRegistry;
private NetworkDatabaseFacade _netDb;
private KeyManager _keyManager;
private CommSystemFacade _commSystem;
private ProfileOrganizer _profileOrganizer;
private PeerManagerFacade _peerManagerFacade;
private ProfileManager _profileManager;
private BandwidthLimiter _bandwidthLimiter;
private TunnelManagerFacade _tunnelManager;
private StatisticsManager _statPublisher;
private Shitlist _shitlist;
private MessageValidator _messageValidator;
private volatile boolean _clientManagerFacadeInitialized;
private volatile boolean _clientMessagePoolInitialized;
private volatile boolean _jobQueueInitialized;
private volatile boolean _inNetMessagePoolInitialized;
private volatile boolean _outNetMessagePoolInitialized;
private volatile boolean _messageHistoryInitialized;
private volatile boolean _messageRegistryInitialized;
private volatile boolean _netDbInitialized;
private volatile boolean _peerSelectorInitialized;
private volatile boolean _keyManagerInitialized;
private volatile boolean _commSystemInitialized;
private volatile boolean _profileOrganizerInitialized;
private volatile boolean _profileManagerInitialized;
private volatile boolean _peerManagerFacadeInitialized;
private volatile boolean _bandwidthLimiterInitialized;
private volatile boolean _tunnelManagerInitialized;
private volatile boolean _statPublisherInitialized;
private volatile boolean _shitlistInitialized;
private volatile boolean _messageValidatorInitialized;
private Calculator _isFailingCalc = new IsFailingCalculator(this);
private Calculator _integrationCalc = new IntegrationCalculator(this);
private Calculator _speedCalc = new SpeedCalculator(this);
private Calculator _reliabilityCalc = new ReliabilityCalculator(this);
public Calculator isFailingCalculator() { return _isFailingCalc; }
public Calculator integrationCalculator() { return _integrationCalc; }
public Calculator speedCalculator() { return _speedCalc; }
public Calculator reliabilityCalculator() { return _reliabilityCalc; }
public RouterContext(Router router) {
super();
_router = router;
}
public Router router() { return _router; }
public Hash routerHash() { return _router.getRouterInfo().getIdentity().getHash(); }
public ClientManagerFacade clientManager() {
if (!_clientManagerFacadeInitialized) initializeClientManagerFacade();
return _clientManagerFacade;
}
private void initializeClientManagerFacade() {
synchronized (this) {
if (_clientManagerFacade == null) {
_clientManagerFacade = new ClientManagerFacadeImpl(this);
}
_clientManagerFacadeInitialized = true;
}
}
public ClientMessagePool clientMessagePool() {
if (!_clientMessagePoolInitialized) initializeClientMessagePool();
return _clientMessagePool;
}
private void initializeClientMessagePool() {
synchronized (this) {
if (_clientMessagePool == null) {
_clientMessagePool = new ClientMessagePool(this);
}
_clientMessagePoolInitialized = true;
}
}
public InNetMessagePool inNetMessagePool() {
if (!_inNetMessagePoolInitialized) initializeInNetMessagePool();
return _inNetMessagePool;
}
private void initializeInNetMessagePool() {
synchronized (this) {
if (_inNetMessagePool == null) {
_inNetMessagePool = new InNetMessagePool(this);
}
_inNetMessagePoolInitialized = true;
}
}
public OutNetMessagePool outNetMessagePool() {
if (!_outNetMessagePoolInitialized) initializeOutNetMessagePool();
return _outNetMessagePool;
}
private void initializeOutNetMessagePool() {
synchronized (this) {
if (_outNetMessagePool == null) {
_outNetMessagePool = new OutNetMessagePool(this);
}
_outNetMessagePoolInitialized = true;
}
}
public MessageHistory messageHistory() {
if (!_messageHistoryInitialized) initializeMessageHistory();
return _messageHistory;
}
private void initializeMessageHistory() {
synchronized (this) {
if (_messageHistory == null) {
_messageHistory = new MessageHistory(this);
}
_messageHistoryInitialized = true;
}
}
public OutboundMessageRegistry messageRegistry() {
if (!_messageRegistryInitialized) initializeMessageRegistry();
return _messageRegistry;
}
private void initializeMessageRegistry() {
synchronized (this) {
if (_messageRegistry == null)
_messageRegistry = new OutboundMessageRegistry(this);
_messageRegistryInitialized = true;
}
}
public NetworkDatabaseFacade netDb() {
if (!_netDbInitialized) initializeNetDb();
return _netDb;
}
private void initializeNetDb() {
synchronized (this) {
if (_netDb == null)
_netDb = new KademliaNetworkDatabaseFacade(this);
_netDbInitialized = true;
}
}
public JobQueue jobQueue() {
if (!_jobQueueInitialized) initializeJobQueue();
return _jobQueue;
}
private void initializeJobQueue() {
synchronized (this) {
if (_jobQueue == null) {
_jobQueue= new JobQueue(this);
}
_jobQueueInitialized = true;
}
}
public KeyManager keyManager() {
if (!_keyManagerInitialized) initializeKeyManager();
return _keyManager;
}
private void initializeKeyManager() {
synchronized (this) {
if (_keyManager == null)
_keyManager = new KeyManager(this);
_keyManagerInitialized = true;
}
}
public CommSystemFacade commSystem() {
if (!_commSystemInitialized) initializeCommSystem();
return _commSystem;
}
private void initializeCommSystem() {
synchronized (this) {
if (_commSystem == null)
_commSystem = new CommSystemFacadeImpl(this);
_commSystemInitialized = true;
}
}
public ProfileOrganizer profileOrganizer() {
if (!_profileOrganizerInitialized) initializeProfileOrganizer();
return _profileOrganizer;
}
private void initializeProfileOrganizer() {
synchronized (this) {
if (_profileOrganizer == null)
_profileOrganizer = new ProfileOrganizer(this);
_profileOrganizerInitialized = true;
}
}
public PeerManagerFacade peerManager() {
if (!_peerManagerFacadeInitialized) initializePeerManager();
return _peerManagerFacade;
}
private void initializePeerManager() {
synchronized (this) {
if (_peerManagerFacade == null)
_peerManagerFacade = new PeerManagerFacadeImpl(this);
_peerManagerFacadeInitialized = true;
}
}
public BandwidthLimiter bandwidthLimiter() {
if (!_bandwidthLimiterInitialized) initializeBandwidthLimiter();
return _bandwidthLimiter;
}
private void initializeBandwidthLimiter() {
synchronized (this) {
if (_bandwidthLimiter == null)
_bandwidthLimiter = new TrivialBandwidthLimiter(this);
_bandwidthLimiterInitialized = true;
}
}
public TunnelManagerFacade tunnelManager() {
if (!_tunnelManagerInitialized) initializeTunnelManager();
return _tunnelManager;
}
private void initializeTunnelManager() {
synchronized (this) {
if (_tunnelManager == null)
_tunnelManager = new PoolingTunnelManagerFacade(this);
_tunnelManagerInitialized = true;
}
}
public ProfileManager profileManager() {
if (!_profileManagerInitialized) initializeProfileManager();
return _profileManager;
}
private void initializeProfileManager() {
synchronized (this) {
if (_profileManager == null)
_profileManager = new ProfileManagerImpl(this);
_profileManagerInitialized = true;
}
}
public StatisticsManager statPublisher() {
if (!_statPublisherInitialized) initializeStatPublisher();
return _statPublisher;
}
private void initializeStatPublisher() {
synchronized (this) {
if (_statPublisher == null)
_statPublisher = new StatisticsManager(this);
_statPublisherInitialized = true;
}
}
public Shitlist shitlist() {
if (!_shitlistInitialized) initializeShitlist();
return _shitlist;
}
private void initializeShitlist() {
synchronized (this) {
if (_shitlist == null)
_shitlist = new Shitlist(this);
_shitlistInitialized = true;
}
}
public MessageValidator messageValidator() {
if (!_messageValidatorInitialized) initializeMessageValidator();
return _messageValidator;
}
private void initializeMessageValidator() {
synchronized (this) {
if (_messageValidator == null)
_messageValidator = new MessageValidator(this);
_messageValidatorInitialized = true;
}
}
}
*/

View File

@ -15,77 +15,83 @@ import net.i2p.util.Log;
*
*/
public class SessionKeyPersistenceHelper implements Service {
private final static Log _log = new Log(SessionKeyPersistenceHelper.class);
private static SessionKeyPersistenceHelper _instance = new SessionKeyPersistenceHelper();
public static SessionKeyPersistenceHelper getInstance() { return _instance; }
private Log _log;
private RouterContext _context;
private final static long PERSIST_DELAY = 3*60*1000;
private final static String SESSION_KEY_FILE = "sessionKeys.dat";
public SessionKeyPersistenceHelper(RouterContext context) {
_context = context;
_log = _context.logManager().getLog(SessionKeyPersistenceHelper.class);
}
public void shutdown() {
writeState();
writeState();
}
public void startup() {
SessionKeyManager mgr = SessionKeyManager.getInstance();
if (mgr instanceof PersistentSessionKeyManager) {
PersistentSessionKeyManager manager = (PersistentSessionKeyManager)mgr;
File f = new File(SESSION_KEY_FILE);
if (f.exists()) {
FileInputStream fin = null;
try {
fin = new FileInputStream(f);
manager.loadState(fin);
int expired = manager.aggressiveExpire();
_log.debug("Session keys loaded [not error] with " + expired + " sets immediately expired");
} catch (Throwable t) {
_log.error("Error reading in session key data", t);
} finally {
if (fin != null) try { fin.close(); } catch (IOException ioe) {}
}
}
JobQueue.getInstance().addJob(new SessionKeyWriterJob());
}
SessionKeyManager mgr = _context.sessionKeyManager();
if (mgr instanceof PersistentSessionKeyManager) {
PersistentSessionKeyManager manager = (PersistentSessionKeyManager)mgr;
File f = new File(SESSION_KEY_FILE);
if (f.exists()) {
FileInputStream fin = null;
try {
fin = new FileInputStream(f);
manager.loadState(fin);
int expired = manager.aggressiveExpire();
if (_log.shouldLog(Log.DEBUG))
_log.debug("Session keys loaded [not error] with " + expired
+ " sets immediately expired");
} catch (Throwable t) {
_log.error("Error reading in session key data", t);
} finally {
if (fin != null) try { fin.close(); } catch (IOException ioe) {}
}
}
_context.jobQueue().addJob(new SessionKeyWriterJob());
}
}
private static void writeState() {
Object o = SessionKeyManager.getInstance();
if (!(o instanceof PersistentSessionKeyManager)) {
_log.error("Unable to persist the session key state - manager is " + o.getClass().getName());
return;
}
PersistentSessionKeyManager mgr = (PersistentSessionKeyManager)o;
// only need for synchronization is during shutdown()
synchronized (mgr) {
FileOutputStream fos = null;
try {
int expired = mgr.aggressiveExpire();
if (expired > 0) {
_log.info("Agressive expired " + expired + " tag sets");
}
fos = new FileOutputStream(SESSION_KEY_FILE);
mgr.saveState(fos);
fos.flush();
_log.debug("Session keys written");
} catch (Throwable t) {
_log.debug("Error writing session key state", t);
} finally {
if (fos != null) try { fos.close(); } catch (IOException ioe) {}
}
}
private void writeState() {
Object o = _context.sessionKeyManager();
if (!(o instanceof PersistentSessionKeyManager)) {
_log.error("Unable to persist the session key state - manager is " + o.getClass().getName());
return;
}
PersistentSessionKeyManager mgr = (PersistentSessionKeyManager)o;
// only need for synchronization is during shutdown()
synchronized (mgr) {
FileOutputStream fos = null;
try {
int expired = mgr.aggressiveExpire();
if (expired > 0) {
_log.info("Agressive expired " + expired + " tag sets");
}
fos = new FileOutputStream(SESSION_KEY_FILE);
mgr.saveState(fos);
fos.flush();
_log.debug("Session keys written");
} catch (Throwable t) {
_log.debug("Error writing session key state", t);
} finally {
if (fos != null) try { fos.close(); } catch (IOException ioe) {}
}
}
}
public String renderStatusHTML() { return ""; }
private class SessionKeyWriterJob extends JobImpl {
public SessionKeyWriterJob() {
super();
getTiming().setStartAfter(PERSIST_DELAY);
}
public String getName() { return "Write Session Keys"; }
public void runJob() {
writeState();
requeue(PERSIST_DELAY);
}
public SessionKeyWriterJob() {
super(SessionKeyPersistenceHelper.this._context);
getTiming().setStartAfter(PERSIST_DELAY);
}
public String getName() { return "Write Session Keys"; }
public void runJob() {
writeState();
requeue(PERSIST_DELAY);
}
}
}

View File

@ -1,9 +1,9 @@
package net.i2p.router;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
@ -24,76 +24,82 @@ import net.i2p.util.Log;
*
*/
public class Shitlist {
private final static Shitlist _instance = new Shitlist();
public final static Shitlist getInstance() { return _instance; }
private final static Log _log = new Log(Shitlist.class);
private Log _log;
private RouterContext _context;
private Map _shitlist; // H(routerIdent) --> Date
public final static long SHITLIST_DURATION_MS = 4*60*1000; // 4 minute shitlist
private Shitlist() {
_shitlist = new HashMap(100);
public Shitlist(RouterContext context) {
_context = context;
_log = context.logManager().getLog(Shitlist.class);
_shitlist = new HashMap(100);
}
public boolean shitlistRouter(Hash peer) {
if (peer == null) return false;
boolean wasAlready = false;
if (_log.shouldLog(Log.INFO))
_log.info("Shitlisting router " + peer.toBase64(), new Exception("Shitlist cause"));
synchronized (_shitlist) {
Date oldDate = (Date)_shitlist.put(peer, new Date(Clock.getInstance().now()));
wasAlready = (null == oldDate);
}
NetworkDatabaseFacade.getInstance().fail(peer);
TunnelManagerFacade.getInstance().peerFailed(peer);
return wasAlready;
if (peer == null) return false;
if (_context.routerHash().equals(peer)) {
_log.error("wtf, why did we try to shitlist ourselves?", new Exception("shitfaced"));
return false;
}
boolean wasAlready = false;
if (_log.shouldLog(Log.INFO))
_log.info("Shitlisting router " + peer.toBase64(), new Exception("Shitlist cause"));
synchronized (_shitlist) {
Date oldDate = (Date)_shitlist.put(peer, new Date(_context.clock().now()));
wasAlready = (null == oldDate);
}
_context.netDb().fail(peer);
_context.tunnelManager().peerFailed(peer);
return wasAlready;
}
public void unshitlistRouter(Hash peer) {
if (peer == null) return;
_log.info("Unshitlisting router " + peer.toBase64());
synchronized (_shitlist) {
_shitlist.remove(peer);
}
if (peer == null) return;
_log.info("Unshitlisting router " + peer.toBase64());
synchronized (_shitlist) {
_shitlist.remove(peer);
}
}
public boolean isShitlisted(Hash peer) {
Date shitlistDate = null;
synchronized (_shitlist) {
shitlistDate = (Date)_shitlist.get(peer);
}
if (shitlistDate == null) return false;
// check validity
if (shitlistDate.getTime() > Clock.getInstance().now() - SHITLIST_DURATION_MS) {
return true;
} else {
unshitlistRouter(peer);
return false;
}
Date shitlistDate = null;
synchronized (_shitlist) {
shitlistDate = (Date)_shitlist.get(peer);
}
if (shitlistDate == null) return false;
// check validity
if (shitlistDate.getTime() > _context.clock().now() - SHITLIST_DURATION_MS) {
return true;
} else {
unshitlistRouter(peer);
return false;
}
}
public String renderStatusHTML() {
StringBuffer buf = new StringBuffer();
buf.append("<h2>Shitlist</h2>");
Map shitlist = new HashMap();
synchronized (_shitlist) {
shitlist.putAll(_shitlist);
}
buf.append("<ul>");
long limit = Clock.getInstance().now() - SHITLIST_DURATION_MS;
for (Iterator iter = shitlist.keySet().iterator(); iter.hasNext(); ) {
Hash key = (Hash)iter.next();
Date shitDate = (Date)shitlist.get(key);
if (shitDate.getTime() < limit)
unshitlistRouter(key);
else
buf.append("<li><b>").append(key.toBase64()).append("</b> was shitlisted on ").append(shitDate).append("</li>\n");
}
buf.append("</ul>\n");
return buf.toString();
StringBuffer buf = new StringBuffer();
buf.append("<h2>Shitlist</h2>");
Map shitlist = new HashMap();
synchronized (_shitlist) {
shitlist.putAll(_shitlist);
}
buf.append("<ul>");
long limit = _context.clock().now() - SHITLIST_DURATION_MS;
for (Iterator iter = shitlist.keySet().iterator(); iter.hasNext(); ) {
Hash key = (Hash)iter.next();
Date shitDate = (Date)shitlist.get(key);
if (shitDate.getTime() < limit)
unshitlistRouter(key);
else
buf.append("<li><b>").append(key.toBase64()).append("</b> was shitlisted on ").append(shitDate).append("</li>\n");
}
buf.append("</ul>\n");
return buf.toString();
}
}

View File

@ -25,9 +25,8 @@ import net.i2p.util.Log;
*
*/
public class StatisticsManager implements Service {
private final static Log _log = new Log(StatisticsManager.class);
private static StatisticsManager _instance = new StatisticsManager();
public static StatisticsManager getInstance() { return _instance; }
private Log _log;
private RouterContext _context;
private boolean _includePeerRankings;
private int _publishedStats;
@ -36,13 +35,15 @@ public class StatisticsManager implements Service {
public final static String PROP_MAX_PUBLISHED_PEERS = "router.publishPeerMax";
public final static int DEFAULT_MAX_PUBLISHED_PEERS = 20;
public StatisticsManager() {
public StatisticsManager(RouterContext context) {
_context = context;
_log = context.logManager().getLog(StatisticsManager.class);
_includePeerRankings = false;
}
public void shutdown() {}
public void startup() {
String val = Router.getInstance().getConfigSetting(PROP_PUBLISH_RANKINGS);
String val = _context.router().getConfigSetting(PROP_PUBLISH_RANKINGS);
try {
if (val == null) {
if (_log.shouldLog(Log.INFO))
@ -65,7 +66,7 @@ public class StatisticsManager implements Service {
+ "], so we're defaulting to FALSE");
_includePeerRankings = false;
}
val = Router.getInstance().getConfigSetting(PROP_MAX_PUBLISHED_PEERS);
val = _context.router().getConfigSetting(PROP_MAX_PUBLISHED_PEERS);
if (val == null) {
_publishedStats = DEFAULT_MAX_PUBLISHED_PEERS;
} else {
@ -90,7 +91,7 @@ public class StatisticsManager implements Service {
stats.setProperty("core.id", CoreVersion.ID);
if (_includePeerRankings) {
stats.putAll(ProfileManager.getInstance().summarizePeers(_publishedStats));
stats.putAll(_context.profileManager().summarizePeers(_publishedStats));
includeRate("transport.sendProcessingTime", stats, new long[] { 60*1000, 60*60*1000 });
//includeRate("tcp.queueSize", stats);
@ -110,7 +111,7 @@ public class StatisticsManager implements Service {
includeRate("netDb.successPeers", stats, new long[] { 60*60*1000 });
includeRate("transport.receiveMessageSize", stats, new long[] { 5*60*1000, 60*60*1000 });
includeRate("transport.sendMessageSize", stats, new long[] { 5*60*1000, 60*60*1000 });
stats.setProperty("stat_uptime", DataHelper.formatDuration(Router.getInstance().getUptime()));
stats.setProperty("stat_uptime", DataHelper.formatDuration(_context.router().getUptime()));
stats.setProperty("stat__rateKey", "avg;maxAvg;pctLifetime;[sat;satLim;maxSat;maxSatLim;][num;lifetimeFreq;maxFreq]");
_log.debug("Publishing peer rankings");
} else {
@ -126,7 +127,7 @@ public class StatisticsManager implements Service {
includeRate(rateName, stats, null);
}
private void includeRate(String rateName, Properties stats, long selectedPeriods[]) {
RateStat rate = StatManager.getInstance().getRate(rateName);
RateStat rate = _context.statManager().getRate(rateName);
if (rate == null) return;
long periods[] = rate.getPeriods();
for (int i = 0; i < periods.length; i++) {

View File

@ -9,12 +9,13 @@ import net.i2p.util.Clock;
import net.i2p.util.HTTPSendData;
import net.i2p.util.I2PThread;
import net.i2p.util.Log;
import net.i2p.router.RouterContext;
/**
* Job that, if its allowed to, will submit the data gathered by the MessageHistory
* component to some URL so that the network can be debugged more easily. By default
* it does not submit any data or touch the message history file, but if the router
* has the line "router.submitHistory=true", it will send the file that the
* has the line "router.submitHistory=true", it will send the file that the
* MessageHistory component is configured to write to once an hour, post it to
* http://i2p.net/cgi-bin/submitMessageHistory, and then delete that file
* locally. This should only be used if the MessageHistory component is configured to
@ -22,14 +23,14 @@ import net.i2p.util.Log;
*
*/
public class SubmitMessageHistoryJob extends JobImpl {
private final static Log _log = new Log(SubmitMessageHistoryJob.class);
private Log _log;
/** default submitting data every hour */
private final static long DEFAULT_REQUEUE_DELAY = 60*60*1000;
/**
private final static long DEFAULT_REQUEUE_DELAY = 60*60*1000;
/**
* router config param for whether we want to autosubmit (and delete) the
* history data managed by MessageHistory
*/
* history data managed by MessageHistory
*/
public final static String PARAM_SUBMIT_DATA = "router.submitHistory";
/** default value for whether we autosubmit the data */
public final static boolean DEFAULT_SUBMIT_DATA = true;
@ -38,14 +39,19 @@ public class SubmitMessageHistoryJob extends JobImpl {
/** default location */
public final static String DEFAULT_SUBMIT_URL = "http://i2p.net/cgi-bin/submitMessageHistory";
public SubmitMessageHistoryJob(RouterContext context) {
super(context);
_log = context.logManager().getLog(SubmitMessageHistoryJob.class);
}
public void runJob() {
if (shouldSubmit()) {
submit();
} else {
_log.debug("Not submitting data");
// if we didn't submit we can just requeue
requeue(getRequeueDelay());
}
if (shouldSubmit()) {
submit();
} else {
_log.debug("Not submitting data");
// if we didn't submit we can just requeue
requeue(getRequeueDelay());
}
}
/**
@ -53,64 +59,64 @@ public class SubmitMessageHistoryJob extends JobImpl {
* to do the actual submission, enqueueing a new submit job when its done
*/
private void submit() {
I2PThread t = new I2PThread(new Runnable() {
public void run() {
_log.debug("Submitting data");
MessageHistory.getInstance().setPauseFlushes(true);
String filename = MessageHistory.getInstance().getFilename();
send(filename);
MessageHistory.getInstance().setPauseFlushes(false);
Job job = new SubmitMessageHistoryJob();
job.getTiming().setStartAfter(Clock.getInstance().now() + getRequeueDelay());
JobQueue.getInstance().addJob(job);
}
});
t.setName("SubmitData");
t.setPriority(I2PThread.MIN_PRIORITY);
t.setDaemon(true);
t.start();
I2PThread t = new I2PThread(new Runnable() {
public void run() {
_log.debug("Submitting data");
_context.messageHistory().setPauseFlushes(true);
String filename = _context.messageHistory().getFilename();
send(filename);
_context.messageHistory().setPauseFlushes(false);
Job job = new SubmitMessageHistoryJob(_context);
job.getTiming().setStartAfter(_context.clock().now() + getRequeueDelay());
_context.jobQueue().addJob(job);
}
});
t.setName("SubmitData");
t.setPriority(I2PThread.MIN_PRIORITY);
t.setDaemon(true);
t.start();
}
private void send(String filename) {
String url = getURL();
try {
File dataFile = new File(filename);
if (!dataFile.exists() || !dataFile.canRead()) {
_log.warn("Unable to read the message data file [" + dataFile.getAbsolutePath() + "]");
return;
}
long size = dataFile.length();
int expectedSend = 512; // 512 for HTTP overhead
if (size > 0)
expectedSend += (int)size/10; // compression
FileInputStream fin = new FileInputStream(dataFile);
BandwidthLimiter.getInstance().delayOutbound(null, expectedSend);
boolean sent = HTTPSendData.postData(url, size, fin);
fin.close();
boolean deleted = dataFile.delete();
_log.debug("Submitted " + size + " bytes? " + sent + " and deleted? " + deleted);
} catch (IOException ioe) {
_log.error("Error sending the data", ioe);
}
String url = getURL();
try {
File dataFile = new File(filename);
if (!dataFile.exists() || !dataFile.canRead()) {
_log.warn("Unable to read the message data file [" + dataFile.getAbsolutePath() + "]");
return;
}
long size = dataFile.length();
int expectedSend = 512; // 512 for HTTP overhead
if (size > 0)
expectedSend += (int)size/10; // compression
FileInputStream fin = new FileInputStream(dataFile);
_context.bandwidthLimiter().delayOutbound(null, expectedSend);
boolean sent = HTTPSendData.postData(url, size, fin);
fin.close();
boolean deleted = dataFile.delete();
_log.debug("Submitted " + size + " bytes? " + sent + " and deleted? " + deleted);
} catch (IOException ioe) {
_log.error("Error sending the data", ioe);
}
}
private String getURL() {
String str = Router.getInstance().getConfigSetting(PARAM_SUBMIT_URL);
if ( (str == null) || (str.trim().length() <= 0) )
return DEFAULT_SUBMIT_URL;
else
return str.trim();
String str = _context.router().getConfigSetting(PARAM_SUBMIT_URL);
if ( (str == null) || (str.trim().length() <= 0) )
return DEFAULT_SUBMIT_URL;
else
return str.trim();
}
private boolean shouldSubmit() {
String str = Router.getInstance().getConfigSetting(PARAM_SUBMIT_DATA);
if (str == null) {
_log.debug("History submit config not specified [" + PARAM_SUBMIT_DATA + "], default = " + DEFAULT_SUBMIT_DATA);
return DEFAULT_SUBMIT_DATA;
} else {
_log.debug("History submit config specified [" + str + "]");
}
return Boolean.TRUE.toString().equals(str);
private boolean shouldSubmit() {
String str = _context.router().getConfigSetting(PARAM_SUBMIT_DATA);
if (str == null) {
_log.debug("History submit config not specified [" + PARAM_SUBMIT_DATA + "], default = " + DEFAULT_SUBMIT_DATA);
return DEFAULT_SUBMIT_DATA;
} else {
_log.debug("History submit config specified [" + str + "]");
}
return Boolean.TRUE.toString().equals(str);
}
private long getRequeueDelay() { return DEFAULT_REQUEUE_DELAY; }
public String getName() { return "Submit Message History"; }

View File

@ -1,9 +1,9 @@
package net.i2p.router;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
@ -29,12 +29,13 @@ import net.i2p.data.i2np.TunnelConfigurationSessionKey;
import net.i2p.data.i2np.TunnelSessionKey;
import net.i2p.data.i2np.TunnelSigningPrivateKey;
import net.i2p.data.i2np.TunnelSigningPublicKey;
import net.i2p.util.Clock;
import net.i2p.I2PAppContext;
/**
* Defines the information associated with a tunnel
*/
public class TunnelInfo extends DataStructureImpl {
private I2PAppContext _context;
private TunnelId _id;
private Hash _nextHop;
private Hash _thisHop;
@ -50,21 +51,22 @@ public class TunnelInfo extends DataStructureImpl {
private boolean _ready;
private boolean _wasEverReady;
public TunnelInfo() {
setTunnelId(null);
setThisHop(null);
setNextHop(null);
setNextHopInfo(null);
_configurationKey = null;
_verificationKey = null;
_signingKey = null;
_encryptionKey = null;
setDestination(null);
setSettings(null);
_options = new Properties();
_ready = false;
_wasEverReady = false;
_created = Clock.getInstance().now();
public TunnelInfo(I2PAppContext context) {
_context = context;
setTunnelId(null);
setThisHop(null);
setNextHop(null);
setNextHopInfo(null);
_configurationKey = null;
_verificationKey = null;
_signingKey = null;
_encryptionKey = null;
setDestination(null);
setSettings(null);
_options = new Properties();
_ready = false;
_wasEverReady = false;
_created = _context.clock().now();
}
public TunnelId getTunnelId() { return _id; }
@ -81,34 +83,34 @@ public class TunnelInfo extends DataStructureImpl {
public TunnelConfigurationSessionKey getConfigurationKey() { return _configurationKey; }
public void setConfigurationKey(TunnelConfigurationSessionKey key) { _configurationKey = key; }
public void setConfigurationKey(SessionKey key) {
TunnelConfigurationSessionKey tk = new TunnelConfigurationSessionKey();
tk.setKey(key);
_configurationKey = tk;
public void setConfigurationKey(SessionKey key) {
TunnelConfigurationSessionKey tk = new TunnelConfigurationSessionKey();
tk.setKey(key);
_configurationKey = tk;
}
public TunnelSigningPublicKey getVerificationKey() { return _verificationKey; }
public void setVerificationKey(TunnelSigningPublicKey key) { _verificationKey = key; }
public void setVerificationKey(SigningPublicKey key) {
TunnelSigningPublicKey tk = new TunnelSigningPublicKey();
tk.setKey(key);
_verificationKey = tk;
public void setVerificationKey(SigningPublicKey key) {
TunnelSigningPublicKey tk = new TunnelSigningPublicKey();
tk.setKey(key);
_verificationKey = tk;
}
public TunnelSigningPrivateKey getSigningKey() { return _signingKey; }
public void setSigningKey(TunnelSigningPrivateKey key) { _signingKey = key; }
public void setSigningKey(SigningPrivateKey key) {
TunnelSigningPrivateKey tk = new TunnelSigningPrivateKey();
tk.setKey(key);
_signingKey = tk;
public void setSigningKey(SigningPrivateKey key) {
TunnelSigningPrivateKey tk = new TunnelSigningPrivateKey();
tk.setKey(key);
_signingKey = tk;
}
public TunnelSessionKey getEncryptionKey() { return _encryptionKey; }
public void setEncryptionKey(TunnelSessionKey key) { _encryptionKey = key; }
public void setEncryptionKey(SessionKey key) {
TunnelSessionKey tk = new TunnelSessionKey();
tk.setKey(key);
_encryptionKey = tk;
public void setEncryptionKey(SessionKey key) {
TunnelSessionKey tk = new TunnelSessionKey();
tk.setKey(key);
_encryptionKey = tk;
}
public Destination getDestination() { return _destination; }
@ -120,17 +122,17 @@ public class TunnelInfo extends DataStructureImpl {
public Set getPropertyNames() { return new HashSet(_options.keySet()); }
public TunnelSettings getSettings() { return _settings; }
public void setSettings(TunnelSettings settings) { _settings = settings; }
public void setSettings(TunnelSettings settings) { _settings = settings; }
/**
* Have all of the routers in this tunnel confirmed participation, and we're ok to
* start sending messages through this tunnel?
*/
public boolean getIsReady() { return _ready; }
public void setIsReady(boolean ready) {
_ready = ready;
if (ready)
_wasEverReady = true;
public void setIsReady(boolean ready) {
_ready = ready;
if (ready)
_wasEverReady = true;
}
/**
* true if this tunnel was ever working (aka rebuildable)
@ -145,204 +147,204 @@ public class TunnelInfo extends DataStructureImpl {
*
*/
public final int getLength() {
int len = 0;
TunnelInfo info = this;
while (info != null) {
info = info.getNextHopInfo();
len++;
}
return len;
int len = 0;
TunnelInfo info = this;
while (info != null) {
info = info.getNextHopInfo();
len++;
}
return len;
}
public void readBytes(InputStream in) throws DataFormatException, IOException {
_options = DataHelper.readProperties(in);
Boolean includeDest = DataHelper.readBoolean(in);
if (includeDest.booleanValue()) {
_destination = new Destination();
_destination.readBytes(in);
} else {
_destination = null;
}
Boolean includeThis = DataHelper.readBoolean(in);
if (includeThis.booleanValue()) {
_thisHop = new Hash();
_thisHop.readBytes(in);
} else {
_thisHop = null;
}
Boolean includeNext = DataHelper.readBoolean(in);
if (includeNext.booleanValue()) {
_nextHop = new Hash();
_nextHop.readBytes(in);
} else {
_nextHop = null;
}
Boolean includeNextInfo = DataHelper.readBoolean(in);
if (includeNextInfo.booleanValue()) {
_nextHopInfo = new TunnelInfo();
_nextHopInfo.readBytes(in);
} else {
_nextHopInfo = null;
}
_id = new TunnelId();
_id.readBytes(in);
Boolean includeConfigKey = DataHelper.readBoolean(in);
if (includeConfigKey.booleanValue()) {
_configurationKey = new TunnelConfigurationSessionKey();
_configurationKey.readBytes(in);
} else {
_configurationKey = null;
}
Boolean includeEncryptionKey = DataHelper.readBoolean(in);
if (includeEncryptionKey.booleanValue()) {
_encryptionKey = new TunnelSessionKey();
_encryptionKey.readBytes(in);
} else {
_encryptionKey = null;
}
Boolean includeSigningKey = DataHelper.readBoolean(in);
if (includeSigningKey.booleanValue()) {
_signingKey = new TunnelSigningPrivateKey();
_signingKey.readBytes(in);
} else {
_signingKey = null;
}
Boolean includeVerificationKey = DataHelper.readBoolean(in);
if (includeVerificationKey.booleanValue()) {
_verificationKey = new TunnelSigningPublicKey();
_verificationKey.readBytes(in);
} else {
_verificationKey = null;
}
_settings = new TunnelSettings();
_settings.readBytes(in);
Boolean ready = DataHelper.readBoolean(in);
if (ready != null)
setIsReady(ready.booleanValue());
_options = DataHelper.readProperties(in);
Boolean includeDest = DataHelper.readBoolean(in);
if (includeDest.booleanValue()) {
_destination = new Destination();
_destination.readBytes(in);
} else {
_destination = null;
}
Boolean includeThis = DataHelper.readBoolean(in);
if (includeThis.booleanValue()) {
_thisHop = new Hash();
_thisHop.readBytes(in);
} else {
_thisHop = null;
}
Boolean includeNext = DataHelper.readBoolean(in);
if (includeNext.booleanValue()) {
_nextHop = new Hash();
_nextHop.readBytes(in);
} else {
_nextHop = null;
}
Boolean includeNextInfo = DataHelper.readBoolean(in);
if (includeNextInfo.booleanValue()) {
_nextHopInfo = new TunnelInfo(_context);
_nextHopInfo.readBytes(in);
} else {
_nextHopInfo = null;
}
_id = new TunnelId();
_id.readBytes(in);
Boolean includeConfigKey = DataHelper.readBoolean(in);
if (includeConfigKey.booleanValue()) {
_configurationKey = new TunnelConfigurationSessionKey();
_configurationKey.readBytes(in);
} else {
_configurationKey = null;
}
Boolean includeEncryptionKey = DataHelper.readBoolean(in);
if (includeEncryptionKey.booleanValue()) {
_encryptionKey = new TunnelSessionKey();
_encryptionKey.readBytes(in);
} else {
_encryptionKey = null;
}
Boolean includeSigningKey = DataHelper.readBoolean(in);
if (includeSigningKey.booleanValue()) {
_signingKey = new TunnelSigningPrivateKey();
_signingKey.readBytes(in);
} else {
_signingKey = null;
}
Boolean includeVerificationKey = DataHelper.readBoolean(in);
if (includeVerificationKey.booleanValue()) {
_verificationKey = new TunnelSigningPublicKey();
_verificationKey.readBytes(in);
} else {
_verificationKey = null;
}
_settings = new TunnelSettings(_context);
_settings.readBytes(in);
Boolean ready = DataHelper.readBoolean(in);
if (ready != null)
setIsReady(ready.booleanValue());
}
public void writeBytes(OutputStream out) throws DataFormatException, IOException {
if (_id == null) throw new DataFormatException("Invalid tunnel ID: " + _id);
if (_options == null) throw new DataFormatException("Options are null");
if (_settings == null) throw new DataFormatException("Settings are null");
// everything else is optional in the serialization
DataHelper.writeProperties(out, _options);
if (_destination != null) {
DataHelper.writeBoolean(out, Boolean.TRUE);
_destination.writeBytes(out);
} else {
DataHelper.writeBoolean(out, Boolean.FALSE);
}
if (_thisHop != null) {
DataHelper.writeBoolean(out, Boolean.TRUE);
_thisHop.writeBytes(out);
} else {
DataHelper.writeBoolean(out, Boolean.FALSE);
}
if (_nextHop != null) {
DataHelper.writeBoolean(out, Boolean.TRUE);
_nextHop.writeBytes(out);
} else {
DataHelper.writeBoolean(out, Boolean.FALSE);
}
if (_nextHopInfo != null) {
DataHelper.writeBoolean(out, Boolean.TRUE);
_nextHopInfo.writeBytes(out);
} else {
DataHelper.writeBoolean(out, Boolean.FALSE);
}
_id.writeBytes(out);
if (_configurationKey != null) {
DataHelper.writeBoolean(out, Boolean.TRUE);
_configurationKey.writeBytes(out);
} else {
DataHelper.writeBoolean(out, Boolean.FALSE);
}
if (_encryptionKey != null) {
DataHelper.writeBoolean(out, Boolean.TRUE);
_encryptionKey.writeBytes(out);
} else {
DataHelper.writeBoolean(out, Boolean.FALSE);
}
if (_signingKey != null) {
DataHelper.writeBoolean(out, Boolean.TRUE);
_signingKey.writeBytes(out);
} else {
DataHelper.writeBoolean(out, Boolean.FALSE);
}
if (_verificationKey != null) {
DataHelper.writeBoolean(out, Boolean.TRUE);
_verificationKey.writeBytes(out);
} else {
DataHelper.writeBoolean(out, Boolean.FALSE);
}
_settings.writeBytes(out);
DataHelper.writeBoolean(out, new Boolean(_ready));
if (_options == null) throw new DataFormatException("Options are null");
if (_settings == null) throw new DataFormatException("Settings are null");
// everything else is optional in the serialization
DataHelper.writeProperties(out, _options);
if (_destination != null) {
DataHelper.writeBoolean(out, Boolean.TRUE);
_destination.writeBytes(out);
} else {
DataHelper.writeBoolean(out, Boolean.FALSE);
}
if (_thisHop != null) {
DataHelper.writeBoolean(out, Boolean.TRUE);
_thisHop.writeBytes(out);
} else {
DataHelper.writeBoolean(out, Boolean.FALSE);
}
if (_nextHop != null) {
DataHelper.writeBoolean(out, Boolean.TRUE);
_nextHop.writeBytes(out);
} else {
DataHelper.writeBoolean(out, Boolean.FALSE);
}
if (_nextHopInfo != null) {
DataHelper.writeBoolean(out, Boolean.TRUE);
_nextHopInfo.writeBytes(out);
} else {
DataHelper.writeBoolean(out, Boolean.FALSE);
}
_id.writeBytes(out);
if (_configurationKey != null) {
DataHelper.writeBoolean(out, Boolean.TRUE);
_configurationKey.writeBytes(out);
} else {
DataHelper.writeBoolean(out, Boolean.FALSE);
}
if (_encryptionKey != null) {
DataHelper.writeBoolean(out, Boolean.TRUE);
_encryptionKey.writeBytes(out);
} else {
DataHelper.writeBoolean(out, Boolean.FALSE);
}
if (_signingKey != null) {
DataHelper.writeBoolean(out, Boolean.TRUE);
_signingKey.writeBytes(out);
} else {
DataHelper.writeBoolean(out, Boolean.FALSE);
}
if (_verificationKey != null) {
DataHelper.writeBoolean(out, Boolean.TRUE);
_verificationKey.writeBytes(out);
} else {
DataHelper.writeBoolean(out, Boolean.FALSE);
}
_settings.writeBytes(out);
DataHelper.writeBoolean(out, new Boolean(_ready));
}
public String toString() {
StringBuffer buf = new StringBuffer();
buf.append("[Tunnel ").append(_id.getTunnelId());
TunnelInfo cur = this;
int i = 0;
while (cur != null) {
buf.append("\n*Hop ").append(i).append(": ").append(cur.getThisHop());
if (cur.getEncryptionKey() != null)
buf.append("\n Encryption key: ").append(cur.getEncryptionKey());
if (cur.getSigningKey() != null)
buf.append("\n Signing key: ").append(cur.getSigningKey());
if (cur.getVerificationKey() != null)
buf.append("\n Verification key: ").append(cur.getVerificationKey());
if (cur.getDestination() != null)
buf.append("\n Destination: ").append(cur.getDestination().calculateHash().toBase64());
if (cur.getNextHop() != null)
buf.append("\n Next: ").append(cur.getNextHop());
if (cur.getSettings() == null)
buf.append("\n Expiration: ").append("none");
else
buf.append("\n Expiration: ").append(new Date(cur.getSettings().getExpiration()));
buf.append("\n Ready: ").append(getIsReady());
cur = cur.getNextHopInfo();
i++;
}
buf.append("]");
return buf.toString();
StringBuffer buf = new StringBuffer();
buf.append("[Tunnel ").append(_id.getTunnelId());
TunnelInfo cur = this;
int i = 0;
while (cur != null) {
buf.append("\n*Hop ").append(i).append(": ").append(cur.getThisHop());
if (cur.getEncryptionKey() != null)
buf.append("\n Encryption key: ").append(cur.getEncryptionKey());
if (cur.getSigningKey() != null)
buf.append("\n Signing key: ").append(cur.getSigningKey());
if (cur.getVerificationKey() != null)
buf.append("\n Verification key: ").append(cur.getVerificationKey());
if (cur.getDestination() != null)
buf.append("\n Destination: ").append(cur.getDestination().calculateHash().toBase64());
if (cur.getNextHop() != null)
buf.append("\n Next: ").append(cur.getNextHop());
if (cur.getSettings() == null)
buf.append("\n Expiration: ").append("none");
else
buf.append("\n Expiration: ").append(new Date(cur.getSettings().getExpiration()));
buf.append("\n Ready: ").append(getIsReady());
cur = cur.getNextHopInfo();
i++;
}
buf.append("]");
return buf.toString();
}
public int hashCode() {
int rv = 0;
rv = 7*rv + DataHelper.hashCode(_options);
rv = 7*rv + DataHelper.hashCode(_destination);
rv = 7*rv + DataHelper.hashCode(_nextHop);
rv = 7*rv + DataHelper.hashCode(_thisHop);
rv = 7*rv + DataHelper.hashCode(_id);
rv = 7*rv + DataHelper.hashCode(_configurationKey);
rv = 7*rv + DataHelper.hashCode(_encryptionKey);
rv = 7*rv + DataHelper.hashCode(_signingKey);
rv = 7*rv + DataHelper.hashCode(_verificationKey);
rv = 7*rv + DataHelper.hashCode(_settings);
rv = 7*rv + (_ready ? 0 : 1);
return rv;
int rv = 0;
rv = 7*rv + DataHelper.hashCode(_options);
rv = 7*rv + DataHelper.hashCode(_destination);
rv = 7*rv + DataHelper.hashCode(_nextHop);
rv = 7*rv + DataHelper.hashCode(_thisHop);
rv = 7*rv + DataHelper.hashCode(_id);
rv = 7*rv + DataHelper.hashCode(_configurationKey);
rv = 7*rv + DataHelper.hashCode(_encryptionKey);
rv = 7*rv + DataHelper.hashCode(_signingKey);
rv = 7*rv + DataHelper.hashCode(_verificationKey);
rv = 7*rv + DataHelper.hashCode(_settings);
rv = 7*rv + (_ready ? 0 : 1);
return rv;
}
public boolean equals(Object obj) {
if ( (obj != null) && (obj instanceof TunnelInfo) ) {
TunnelInfo info = (TunnelInfo)obj;
return DataHelper.eq(getConfigurationKey(), info.getConfigurationKey()) &&
DataHelper.eq(getDestination(), info.getDestination()) &&
getIsReady() == info.getIsReady() &&
DataHelper.eq(getEncryptionKey(), info.getEncryptionKey()) &&
DataHelper.eq(getNextHop(), info.getNextHop()) &&
DataHelper.eq(getNextHopInfo(), info.getNextHopInfo()) &&
DataHelper.eq(getSettings(), info.getSettings()) &&
DataHelper.eq(getSigningKey(), info.getSigningKey()) &&
DataHelper.eq(getThisHop(), info.getThisHop()) &&
DataHelper.eq(getTunnelId(), info.getTunnelId()) &&
DataHelper.eq(getVerificationKey(), info.getVerificationKey()) &&
DataHelper.eq(_options, info._options);
} else {
return false;
}
if ( (obj != null) && (obj instanceof TunnelInfo) ) {
TunnelInfo info = (TunnelInfo)obj;
return DataHelper.eq(getConfigurationKey(), info.getConfigurationKey()) &&
DataHelper.eq(getDestination(), info.getDestination()) &&
getIsReady() == info.getIsReady() &&
DataHelper.eq(getEncryptionKey(), info.getEncryptionKey()) &&
DataHelper.eq(getNextHop(), info.getNextHop()) &&
DataHelper.eq(getNextHopInfo(), info.getNextHopInfo()) &&
DataHelper.eq(getSettings(), info.getSettings()) &&
DataHelper.eq(getSigningKey(), info.getSigningKey()) &&
DataHelper.eq(getThisHop(), info.getThisHop()) &&
DataHelper.eq(getTunnelId(), info.getTunnelId()) &&
DataHelper.eq(getVerificationKey(), info.getVerificationKey()) &&
DataHelper.eq(_options, info._options);
} else {
return false;
}
}
}

View File

@ -19,29 +19,27 @@ import net.i2p.router.tunnelmanager.PoolingTunnelManagerFacade;
* Build and maintain tunnels throughout the network.
*
*/
public abstract class TunnelManagerFacade implements Service {
private static TunnelManagerFacade _instance = new PoolingTunnelManagerFacade();
public static TunnelManagerFacade getInstance() { return _instance; }
public interface TunnelManagerFacade extends Service {
/**
* React to a request to join the specified tunnel.
*
* @return true if the router will accept participation, else false.
*/
public abstract boolean joinTunnel(TunnelInfo info);
boolean joinTunnel(TunnelInfo info);
/**
* Retrieve the information related to a particular tunnel
*
*/
public abstract TunnelInfo getTunnelInfo(TunnelId id);
TunnelInfo getTunnelInfo(TunnelId id);
/**
* Retrieve a set of tunnels from the existing ones for various purposes
*/
public abstract List selectOutboundTunnelIds(TunnelSelectionCriteria criteria);
List selectOutboundTunnelIds(TunnelSelectionCriteria criteria);
/**
* Retrieve a set of tunnels from the existing ones for various purposes
*/
public abstract List selectInboundTunnelIds(TunnelSelectionCriteria criteria);
List selectInboundTunnelIds(TunnelSelectionCriteria criteria);
/**
* Make sure appropriate outbound tunnels are in place, builds requested
@ -49,18 +47,18 @@ public abstract class TunnelManagerFacade implements Service {
* validate the leaseSet, then publish it in the network database.
*
*/
public abstract void createTunnels(Destination destination, ClientTunnelSettings clientSettings, long timeoutMs);
void createTunnels(Destination destination, ClientTunnelSettings clientSettings, long timeoutMs);
/**
* Called when a peer becomes unreachable - go through all of the current
* tunnels and rebuild them if we can, or drop them if we can't.
*
*/
public abstract void peerFailed(Hash peer);
void peerFailed(Hash peer);
/**
* True if the peer currently part of a tunnel
*
*/
public abstract boolean isInUse(Hash peer);
boolean isInUse(Hash peer);
}

View File

@ -1,9 +1,9 @@
package net.i2p.router;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
@ -16,13 +16,14 @@ import java.util.Date;
import net.i2p.data.DataFormatException;
import net.i2p.data.DataHelper;
import net.i2p.data.DataStructureImpl;
import net.i2p.util.Clock;
import net.i2p.I2PAppContext;
/**
* Wrap up the settings specified for a particular tunnel
* Wrap up the settings specified for a particular tunnel
*
*/
public class TunnelSettings extends DataStructureImpl {
private I2PAppContext _context;
private int _depth;
private long _msgsPerMinuteAvg;
private long _bytesPerMinuteAvg;
@ -33,18 +34,19 @@ public class TunnelSettings extends DataStructureImpl {
private long _expiration;
private long _created;
public TunnelSettings() {
_depth = 0;
_msgsPerMinuteAvg = 0;
_msgsPerMinutePeak = 0;
_bytesPerMinuteAvg = 0;
_bytesPerMinutePeak = 0;
_includeDummy = false;
_reorder = false;
_expiration = 0;
_created = Clock.getInstance().now();
public TunnelSettings(I2PAppContext context) {
_context = context;
_depth = 0;
_msgsPerMinuteAvg = 0;
_msgsPerMinutePeak = 0;
_bytesPerMinuteAvg = 0;
_bytesPerMinutePeak = 0;
_includeDummy = false;
_reorder = false;
_expiration = 0;
_created = _context.clock().now();
}
public int getDepth() { return _depth; }
public void setDepth(int depth) { _depth = depth; }
public long getMessagesPerMinuteAverage() { return _msgsPerMinuteAvg; }
@ -64,71 +66,71 @@ public class TunnelSettings extends DataStructureImpl {
public long getCreated() { return _created; }
public void readBytes(InputStream in) throws DataFormatException, IOException {
Boolean b = DataHelper.readBoolean(in);
if (b == null) throw new DataFormatException("Null includeDummy boolean value");
_includeDummy = b.booleanValue();
b = DataHelper.readBoolean(in);
if (b == null) throw new DataFormatException("Null reorder boolean value");
_reorder = b.booleanValue();
_depth = (int)DataHelper.readLong(in, 1);
_bytesPerMinuteAvg = DataHelper.readLong(in, 4);
_bytesPerMinutePeak = DataHelper.readLong(in, 4);
Date exp = DataHelper.readDate(in);
if (exp == null)
_expiration = 0;
else
_expiration = exp.getTime();
_msgsPerMinuteAvg = DataHelper.readLong(in, 4);
_msgsPerMinutePeak = DataHelper.readLong(in, 4);
Date created = DataHelper.readDate(in);
if (created != null)
_created = created.getTime();
else
_created = Clock.getInstance().now();
Boolean b = DataHelper.readBoolean(in);
if (b == null) throw new DataFormatException("Null includeDummy boolean value");
_includeDummy = b.booleanValue();
b = DataHelper.readBoolean(in);
if (b == null) throw new DataFormatException("Null reorder boolean value");
_reorder = b.booleanValue();
_depth = (int)DataHelper.readLong(in, 1);
_bytesPerMinuteAvg = DataHelper.readLong(in, 4);
_bytesPerMinutePeak = DataHelper.readLong(in, 4);
Date exp = DataHelper.readDate(in);
if (exp == null)
_expiration = 0;
else
_expiration = exp.getTime();
_msgsPerMinuteAvg = DataHelper.readLong(in, 4);
_msgsPerMinutePeak = DataHelper.readLong(in, 4);
Date created = DataHelper.readDate(in);
if (created != null)
_created = created.getTime();
else
_created = _context.clock().now();
}
public void writeBytes(OutputStream out) throws DataFormatException, IOException {
DataHelper.writeBoolean(out, _includeDummy ? Boolean.TRUE : Boolean.FALSE);
DataHelper.writeBoolean(out, _reorder ? Boolean.TRUE : Boolean.FALSE);
DataHelper.writeLong(out, 1, _depth);
DataHelper.writeLong(out, 4, _bytesPerMinuteAvg);
DataHelper.writeLong(out, 4, _bytesPerMinutePeak);
if (_expiration <= 0)
DataHelper.writeDate(out, new Date(0));
else
DataHelper.writeDate(out, new Date(_expiration));
DataHelper.writeLong(out, 4, _msgsPerMinuteAvg);
DataHelper.writeLong(out, 4, _msgsPerMinutePeak);
DataHelper.writeDate(out, new Date(_created));
DataHelper.writeBoolean(out, _includeDummy ? Boolean.TRUE : Boolean.FALSE);
DataHelper.writeBoolean(out, _reorder ? Boolean.TRUE : Boolean.FALSE);
DataHelper.writeLong(out, 1, _depth);
DataHelper.writeLong(out, 4, _bytesPerMinuteAvg);
DataHelper.writeLong(out, 4, _bytesPerMinutePeak);
if (_expiration <= 0)
DataHelper.writeDate(out, new Date(0));
else
DataHelper.writeDate(out, new Date(_expiration));
DataHelper.writeLong(out, 4, _msgsPerMinuteAvg);
DataHelper.writeLong(out, 4, _msgsPerMinutePeak);
DataHelper.writeDate(out, new Date(_created));
}
public int hashCode() {
int rv = 0;
rv += _includeDummy ? 100 : 0;
rv += _reorder ? 50 : 0;
rv += _depth;
rv += _bytesPerMinuteAvg;
rv += _bytesPerMinutePeak;
rv += _expiration;
rv += _msgsPerMinuteAvg;
rv += _msgsPerMinutePeak;
return rv;
int rv = 0;
rv += _includeDummy ? 100 : 0;
rv += _reorder ? 50 : 0;
rv += _depth;
rv += _bytesPerMinuteAvg;
rv += _bytesPerMinutePeak;
rv += _expiration;
rv += _msgsPerMinuteAvg;
rv += _msgsPerMinutePeak;
return rv;
}
public boolean equals(Object obj) {
if ( (obj != null) && (obj instanceof TunnelSettings) ) {
TunnelSettings settings = (TunnelSettings)obj;
return settings.getBytesPerMinuteAverage() == getBytesPerMinuteAverage() &&
settings.getBytesPerMinutePeak() == getBytesPerMinutePeak() &&
settings.getDepth() == getDepth() &&
settings.getExpiration() == getExpiration() &&
settings.getIncludeDummy() == getIncludeDummy() &&
settings.getMessagesPerMinuteAverage() == getMessagesPerMinuteAverage() &&
settings.getMessagesPerMinutePeak() == getMessagesPerMinutePeak() &&
settings.getReorder() == getReorder();
} else {
return false;
}
if ( (obj != null) && (obj instanceof TunnelSettings) ) {
TunnelSettings settings = (TunnelSettings)obj;
return settings.getBytesPerMinuteAverage() == getBytesPerMinuteAverage() &&
settings.getBytesPerMinutePeak() == getBytesPerMinutePeak() &&
settings.getDepth() == getDepth() &&
settings.getExpiration() == getExpiration() &&
settings.getIncludeDummy() == getIncludeDummy() &&
settings.getMessagesPerMinuteAverage() == getMessagesPerMinuteAverage() &&
settings.getMessagesPerMinutePeak() == getMessagesPerMinutePeak() &&
settings.getReorder() == getReorder();
} else {
return false;
}
}
}

View File

@ -1,9 +1,9 @@
package net.i2p.router.admin;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
@ -14,6 +14,7 @@ import java.net.Socket;
import net.i2p.util.I2PThread;
import net.i2p.util.Log;
import net.i2p.router.RouterContext;
/**
* Listen for connections on the specified port, and toss them onto the client manager's
@ -22,15 +23,18 @@ import net.i2p.util.Log;
* @author jrandom
*/
public class AdminListener implements Runnable {
private final static Log _log = new Log(AdminListener.class);
private Log _log;
private RouterContext _context;
private ServerSocket _socket;
private int _port;
private boolean _running;
private long _nextFailDelay = 1000;
public AdminListener(int port) {
_port = port;
_running = false;
public AdminListener(RouterContext context, int port) {
_context = context;
_log = context.logManager().getLog(AdminListener.class);
_port = port;
_running = false;
}
public void setPort(int port) { _port = port; }
@ -39,50 +43,50 @@ public class AdminListener implements Runnable {
/** max time to bind */
private final static int MAX_FAIL_DELAY = 5*60*1000;
/**
/**
* Start up the socket listener, listens for connections, and
* fires those connections off via {@link #runConnection runConnection}.
* fires those connections off via {@link #runConnection runConnection}.
* This only returns if the socket cannot be opened or there is a catastrophic
* failure.
*
*/
public void startup() {
_running = true;
int curDelay = 0;
while ( (_running) && (curDelay < MAX_FAIL_DELAY) ) {
try {
_log.info("Starting up listening for connections on port " + _port);
_socket = new ServerSocket(_port);
curDelay = 0;
while (_running) {
try {
Socket socket = _socket.accept();
_log.debug("Connection received");
runConnection(socket);
} catch (IOException ioe) {
_log.error("Server error accepting", ioe);
} catch (Throwable t) {
_log.error("Fatal error running client listener - killing the thread!", t);
return;
}
}
} catch (IOException ioe) {
_log.error("Error listening on port " + _port, ioe);
}
if (_socket != null) {
try { _socket.close(); } catch (IOException ioe) {}
_socket = null;
}
_log.error("Error listening, waiting " + _nextFailDelay + "ms before we try again");
try { Thread.sleep(_nextFailDelay); } catch (InterruptedException ie) {}
curDelay += _nextFailDelay;
_nextFailDelay *= 5;
}
_log.error("CANCELING ADMIN LISTENER. delay = " + curDelay, new Exception("ADMIN LISTENER cancelled!!!"));
_running = false;
_running = true;
int curDelay = 0;
while ( (_running) && (curDelay < MAX_FAIL_DELAY) ) {
try {
_log.info("Starting up listening for connections on port " + _port);
_socket = new ServerSocket(_port);
curDelay = 0;
while (_running) {
try {
Socket socket = _socket.accept();
_log.debug("Connection received");
runConnection(socket);
} catch (IOException ioe) {
_log.error("Server error accepting", ioe);
} catch (Throwable t) {
_log.error("Fatal error running client listener - killing the thread!", t);
return;
}
}
} catch (IOException ioe) {
_log.error("Error listening on port " + _port, ioe);
}
if (_socket != null) {
try { _socket.close(); } catch (IOException ioe) {}
_socket = null;
}
_log.error("Error listening, waiting " + _nextFailDelay + "ms before we try again");
try { Thread.sleep(_nextFailDelay); } catch (InterruptedException ie) {}
curDelay += _nextFailDelay;
_nextFailDelay *= 5;
}
_log.error("CANCELING ADMIN LISTENER. delay = " + curDelay, new Exception("ADMIN LISTENER cancelled!!!"));
_running = false;
}
/**
@ -90,20 +94,20 @@ public class AdminListener implements Runnable {
*
*/
protected void runConnection(Socket socket) throws IOException {
AdminRunner runner = new AdminRunner(socket);
I2PThread t = new I2PThread(runner);
t.setName("Admin Runner");
t.setPriority(Thread.MIN_PRIORITY);
t.setDaemon(true);
t.start();
AdminRunner runner = new AdminRunner(_context, socket);
I2PThread t = new I2PThread(runner);
t.setName("Admin Runner");
t.setPriority(Thread.MIN_PRIORITY);
t.setDaemon(true);
t.start();
}
public void shutdown() {
_running = false;
if (_socket != null) try {
_socket.close();
_socket = null;
} catch (IOException ioe) {}
public void shutdown() {
_running = false;
if (_socket != null) try {
_socket.close();
_socket = null;
} catch (IOException ioe) {}
}
public void run() { startup(); }
}

View File

@ -4,47 +4,52 @@ import net.i2p.router.Router;
import net.i2p.router.Service;
import net.i2p.util.I2PThread;
import net.i2p.util.Log;
import net.i2p.router.RouterContext;
public class AdminManager implements Service {
private final static Log _log = new Log(AdminManager.class);
private final static AdminManager _instance = new AdminManager();
public final static AdminManager getInstance() { return _instance; }
private Log _log;
private RouterContext _context;
public final static String PARAM_ADMIN_PORT = "router.adminPort";
public final static int DEFAULT_ADMIN_PORT = 7655;
private AdminListener _listener;
public AdminManager(RouterContext context) {
_context = context;
_log = context.logManager().getLog(AdminManager.class);
}
public String renderStatusHTML() { return ""; }
public void shutdown() {
if (_listener != null) {
_log.info("Shutting down admin listener");
_listener.shutdown();
_listener = null;
}
if (_listener != null) {
_log.info("Shutting down admin listener");
_listener.shutdown();
_listener = null;
}
}
public void startup() {
int port = DEFAULT_ADMIN_PORT;
String str = Router.getInstance().getConfigSetting(PARAM_ADMIN_PORT);
if (str != null) {
try {
int val = Integer.parseInt(str);
port = val;
} catch (NumberFormatException nfe) {
_log.warn("Invalid admin port specified [" + str + "]", nfe);
}
}
_log.info("Starting up admin listener on port " + port);
startup(port);
int port = DEFAULT_ADMIN_PORT;
String str = _context.router().getConfigSetting(PARAM_ADMIN_PORT);
if (str != null) {
try {
int val = Integer.parseInt(str);
port = val;
} catch (NumberFormatException nfe) {
_log.warn("Invalid admin port specified [" + str + "]", nfe);
}
}
_log.info("Starting up admin listener on port " + port);
startup(port);
}
private void startup(int port) {
_listener = new AdminListener(port);
I2PThread t = new I2PThread(_listener);
t.setName("Admin Listener");
t.setDaemon(true);
t.setPriority(Thread.MIN_PRIORITY);
t.start();
_listener = new AdminListener(_context, port);
I2PThread t = new I2PThread(_listener);
t.setName("Admin Listener");
t.setDaemon(true);
t.setPriority(Thread.MIN_PRIORITY);
t.start();
}
}

View File

@ -13,90 +13,96 @@ import net.i2p.data.Hash;
import net.i2p.router.Router;
import net.i2p.router.peermanager.ProfileOrganizer;
import net.i2p.util.Log;
import net.i2p.router.RouterContext;
class AdminRunner implements Runnable {
private final static Log _log = new Log(AdminRunner.class);
private Log _log;
private RouterContext _context;
private Socket _socket;
private StatsGenerator _generator;
public AdminRunner(Socket socket) {
_socket = socket;
public AdminRunner(RouterContext context, Socket socket) {
_context = context;
_log = context.logManager().getLog(AdminRunner.class);
_socket = socket;
_generator = new StatsGenerator(context);
}
public void run() {
try {
BufferedReader in = new BufferedReader(new InputStreamReader(_socket.getInputStream()));
OutputStream out = _socket.getOutputStream();
String command = in.readLine();
runCommand(command, out);
} catch (IOException ioe) {
_log.error("Error running admin command", ioe);
}
try {
BufferedReader in = new BufferedReader(new InputStreamReader(_socket.getInputStream()));
OutputStream out = _socket.getOutputStream();
String command = in.readLine();
runCommand(command, out);
} catch (IOException ioe) {
_log.error("Error running admin command", ioe);
}
}
private void runCommand(String command, OutputStream out) throws IOException {
_log.debug("Command [" + command + "]");
if (command.indexOf("favicon") >= 0) {
reply(out, "this is not a website");
} else if (command.indexOf("routerStats.html") >= 0) {
reply(out, StatsGenerator.generateStatsPage());
} else if (command.indexOf("/profile/") >= 0) {
replyText(out, getProfile(command));
} else if (true || command.indexOf("routerConsole.html") > 0) {
reply(out, Router.getInstance().renderStatusHTML());
}
_log.debug("Command [" + command + "]");
if (command.indexOf("favicon") >= 0) {
reply(out, "this is not a website");
} else if (command.indexOf("routerStats.html") >= 0) {
reply(out, _generator.generateStatsPage());
} else if (command.indexOf("/profile/") >= 0) {
replyText(out, getProfile(command));
} else if (true || command.indexOf("routerConsole.html") > 0) {
reply(out, _context.router().renderStatusHTML());
}
}
private void reply(OutputStream out, String content) throws IOException {
StringBuffer reply = new StringBuffer(10240);
reply.append("HTTP/1.1 200 OK\n");
reply.append("Connection: close\n");
reply.append("Cache-control: no-cache\n");
reply.append("Content-type: text/html\n\n");
reply.append(content);
try {
out.write(reply.toString().getBytes());
out.close();
} catch (IOException ioe) {
if (_log.shouldLog(Log.WARN))
_log.warn("Error writing out the admin reply:\n" + content);
throw ioe;
}
StringBuffer reply = new StringBuffer(10240);
reply.append("HTTP/1.1 200 OK\n");
reply.append("Connection: close\n");
reply.append("Cache-control: no-cache\n");
reply.append("Content-type: text/html\n\n");
reply.append(content);
try {
out.write(reply.toString().getBytes());
out.close();
} catch (IOException ioe) {
if (_log.shouldLog(Log.WARN))
_log.warn("Error writing out the admin reply:\n" + content);
throw ioe;
}
}
private void replyText(OutputStream out, String content) throws IOException {
StringBuffer reply = new StringBuffer(10240);
reply.append("HTTP/1.1 200 OK\n");
reply.append("Connection: close\n");
reply.append("Cache-control: no-cache\n");
reply.append("Content-type: text/plain\n\n");
reply.append(content);
try {
out.write(reply.toString().getBytes());
out.close();
} catch (IOException ioe) {
if (_log.shouldLog(Log.WARN))
_log.warn("Error writing out the admin reply:\n" + content);
throw ioe;
}
StringBuffer reply = new StringBuffer(10240);
reply.append("HTTP/1.1 200 OK\n");
reply.append("Connection: close\n");
reply.append("Cache-control: no-cache\n");
reply.append("Content-type: text/plain\n\n");
reply.append(content);
try {
out.write(reply.toString().getBytes());
out.close();
} catch (IOException ioe) {
if (_log.shouldLog(Log.WARN))
_log.warn("Error writing out the admin reply:\n" + content);
throw ioe;
}
}
private String getProfile(String cmd) {
Set peers = ProfileOrganizer._getInstance().selectAllPeers();
for (Iterator iter = peers.iterator(); iter.hasNext(); ) {
Hash peer = (Hash)iter.next();
if (cmd.indexOf(peer.toBase64().substring(0,10)) >= 0) {
try {
ByteArrayOutputStream baos = new ByteArrayOutputStream(64*1024);
ProfileOrganizer._getInstance().exportProfile(peer, baos);
return new String(baos.toByteArray());
} catch (IOException ioe) {
_log.error("Error exporting the profile", ioe);
return "Error exporting the peer profile\n";
}
}
}
return "No such peer is being profiled\n";
Set peers = _context.profileOrganizer().selectAllPeers();
for (Iterator iter = peers.iterator(); iter.hasNext(); ) {
Hash peer = (Hash)iter.next();
if (cmd.indexOf(peer.toBase64().substring(0,10)) >= 0) {
try {
ByteArrayOutputStream baos = new ByteArrayOutputStream(64*1024);
_context.profileOrganizer().exportProfile(peer, baos);
return new String(baos.toByteArray());
} catch (IOException ioe) {
_log.error("Error exporting the profile", ioe);
return "Error exporting the peer profile\n";
}
}
}
return "No such peer is being profiled\n";
}
}

View File

@ -18,190 +18,196 @@ import net.i2p.stat.Rate;
import net.i2p.stat.RateStat;
import net.i2p.stat.StatManager;
import net.i2p.util.Log;
import net.i2p.router.RouterContext;
/**
* Dump the stats to the web admin interface
*/
public class StatsGenerator {
private final static Log _log = new Log(StatsGenerator.class);
public static String generateStatsPage() {
ByteArrayOutputStream baos = new ByteArrayOutputStream(32*1024);
try {
generateStatsPage(baos);
} catch (IOException ioe) {
_log.error("Error generating stats", ioe);
}
return new String(baos.toByteArray());
private Log _log;
private RouterContext _context;
public StatsGenerator(RouterContext context) {
_context = context;
_log = context.logManager().getLog(StatsGenerator.class);
}
public static void generateStatsPage(OutputStream out) throws IOException {
PrintWriter pw = new PrintWriter(out);
pw.println("<html><head><title>I2P Router Stats</title></head><body>");
pw.println("<h1>Router statistics</h1>");
pw.println("<i><a href=\"/routerConsole.html\">console</a> | <a href=\"/routerStats.html\">stats</a></i><hr />");
Map groups = StatManager.getInstance().getStatsByGroup();
pw.println("<form action=\"/routerStats.html\">");
pw.println("<select name=\"go\" onChange='location.href=this.value'>");
for (Iterator iter = groups.keySet().iterator(); iter.hasNext(); ) {
String group = (String)iter.next();
Set stats = (Set)groups.get(group);
pw.print("<option value=\"/routerStats.html#");
pw.print(group);
pw.print("\">");
pw.print(group);
pw.println("</option>\n");
for (Iterator statIter = stats.iterator(); statIter.hasNext(); ) {
String stat = (String)statIter.next();
pw.print("<option value=\"/routerStats.html#");
pw.print(stat);
pw.print("\">...");
pw.print(stat);
pw.println("</option>\n");
}
}
pw.println("</select>");
pw.println("</form>");
pw.print("Statistics gathered during this router's uptime (");
long uptime = Router.getInstance().getUptime();
pw.print(DataHelper.formatDuration(uptime));
pw.println("). The data gathered is quantized over a 1 minute period, so should just be used as an estimate<p />");
for (Iterator iter = groups.keySet().iterator(); iter.hasNext(); ) {
String group = (String)iter.next();
Set stats = (Set)groups.get(group);
pw.print("<h2><a name=\"");
pw.print(group);
pw.print("\">");
pw.print(group);
pw.println("</a></h2>");
pw.println("<ul>");
for (Iterator statIter = stats.iterator(); statIter.hasNext(); ) {
String stat = (String)statIter.next();
pw.print("<li><b><a name=\"");
pw.print(stat);
pw.print("\">");
pw.print(stat);
pw.println("</a></b><br />");
if (StatManager.getInstance().isFrequency(stat))
renderFrequency(stat, pw);
else
renderRate(stat, pw);
}
pw.println("</ul><hr />");
}
pw.println("</body></html>");
pw.flush();
public String generateStatsPage() {
ByteArrayOutputStream baos = new ByteArrayOutputStream(32*1024);
try {
generateStatsPage(baos);
} catch (IOException ioe) {
_log.error("Error generating stats", ioe);
}
return new String(baos.toByteArray());
}
private static void renderFrequency(String name, PrintWriter pw) throws IOException {
FrequencyStat freq = StatManager.getInstance().getFrequency(name);
pw.print("<i>");
pw.print(freq.getDescription());
pw.println("</i><br />");
long periods[] = freq.getPeriods();
Arrays.sort(periods);
for (int i = 0; i < periods.length; i++) {
renderPeriod(pw, periods[i], "frequency");
Frequency curFreq = freq.getFrequency(periods[i]);
pw.print(" <i>avg per period:</i> (");
pw.print(num(curFreq.getAverageEventsPerPeriod()));
pw.print(", max ");
pw.print(num(curFreq.getMaxAverageEventsPerPeriod()));
if ( (curFreq.getMaxAverageEventsPerPeriod() > 0) && (curFreq.getAverageEventsPerPeriod() > 0) ) {
pw.print(", current is ");
pw.print(pct(curFreq.getAverageEventsPerPeriod()/curFreq.getMaxAverageEventsPerPeriod()));
pw.print(" of max");
}
pw.print(")");
//buf.append(" <i>avg interval between updates:</i> (").append(num(curFreq.getAverageInterval())).append("ms, min ");
//buf.append(num(curFreq.getMinAverageInterval())).append("ms)");
pw.print(" <i>strict average per period:</i> ");
pw.print(num(curFreq.getStrictAverageEventsPerPeriod()));
pw.print(" events (averaged ");
pw.print(" using the lifetime of ");
pw.print(num(curFreq.getEventCount()));
pw.print(" events)");
pw.println("<br />");
}
pw.println("<br />");
public void generateStatsPage(OutputStream out) throws IOException {
PrintWriter pw = new PrintWriter(out);
pw.println("<html><head><title>I2P Router Stats</title></head><body>");
pw.println("<h1>Router statistics</h1>");
pw.println("<i><a href=\"/routerConsole.html\">console</a> | <a href=\"/routerStats.html\">stats</a></i><hr />");
Map groups = _context.statManager().getStatsByGroup();
pw.println("<form action=\"/routerStats.html\">");
pw.println("<select name=\"go\" onChange='location.href=this.value'>");
for (Iterator iter = groups.keySet().iterator(); iter.hasNext(); ) {
String group = (String)iter.next();
Set stats = (Set)groups.get(group);
pw.print("<option value=\"/routerStats.html#");
pw.print(group);
pw.print("\">");
pw.print(group);
pw.println("</option>\n");
for (Iterator statIter = stats.iterator(); statIter.hasNext(); ) {
String stat = (String)statIter.next();
pw.print("<option value=\"/routerStats.html#");
pw.print(stat);
pw.print("\">...");
pw.print(stat);
pw.println("</option>\n");
}
}
pw.println("</select>");
pw.println("</form>");
pw.print("Statistics gathered during this router's uptime (");
long uptime = _context.router().getUptime();
pw.print(DataHelper.formatDuration(uptime));
pw.println("). The data gathered is quantized over a 1 minute period, so should just be used as an estimate<p />");
for (Iterator iter = groups.keySet().iterator(); iter.hasNext(); ) {
String group = (String)iter.next();
Set stats = (Set)groups.get(group);
pw.print("<h2><a name=\"");
pw.print(group);
pw.print("\">");
pw.print(group);
pw.println("</a></h2>");
pw.println("<ul>");
for (Iterator statIter = stats.iterator(); statIter.hasNext(); ) {
String stat = (String)statIter.next();
pw.print("<li><b><a name=\"");
pw.print(stat);
pw.print("\">");
pw.print(stat);
pw.println("</a></b><br />");
if (_context.statManager().isFrequency(stat))
renderFrequency(stat, pw);
else
renderRate(stat, pw);
}
pw.println("</ul><hr />");
}
pw.println("</body></html>");
pw.flush();
}
private static void renderRate(String name, PrintWriter pw) throws IOException {
RateStat rate = StatManager.getInstance().getRate(name);
pw.print("<i>");
pw.print(rate.getDescription());
pw.println("</i><br />");
long periods[] = rate.getPeriods();
Arrays.sort(periods);
pw.println("<ul>");
for (int i = 0; i < periods.length; i++) {
pw.println("<li>");
renderPeriod(pw, periods[i], "rate");
Rate curRate = rate.getRate(periods[i]);
pw.print( "<i>avg value:</i> (");
pw.print(num(curRate.getAverageValue()));
pw.print(" peak ");
pw.print(num(curRate.getExtremeAverageValue()));
pw.print(", [");
pw.print(pct(curRate.getPercentageOfExtremeValue()));
pw.print(" of max");
pw.print(", and ");
pw.print(pct(curRate.getPercentageOfLifetimeValue()));
pw.print(" of lifetime average]");
pw.print(")");
pw.print(" <i>highest total period value:</i> (");
pw.print(num(curRate.getExtremeTotalValue()));
pw.print(")");
if (curRate.getLifetimeTotalEventTime() > 0) {
pw.print(" <i>saturation:</i> (");
pw.print(pct(curRate.getLastEventSaturation()));
pw.print(")");
pw.print(" <i>saturated limit:</i> (");
pw.print(num(curRate.getLastSaturationLimit()));
pw.print(")");
pw.print(" <i>peak saturation:</i> (");
pw.print(pct(curRate.getExtremeEventSaturation()));
pw.print(")");
pw.print(" <i>peak saturated limit:</i> (");
pw.print(num(curRate.getExtremeSaturationLimit()));
pw.print(")");
}
pw.print(" <i>events per period:</i> ");
pw.print(num(curRate.getLastEventCount()));
long numPeriods = curRate.getLifetimePeriods();
if (numPeriods > 0) {
double avgFrequency = curRate.getLifetimeEventCount() / (double)numPeriods;
double peakFrequency = curRate.getExtremeEventCount();
pw.print(" (lifetime average: ");
pw.print(num(avgFrequency));
pw.print(", peak average: ");
pw.print(num(curRate.getExtremeEventCount()));
pw.println(")");
}
pw.print("</li>");
if (i + 1 == periods.length) {
// last one, so lets display the strict average
pw.print("<li><b>lifetime average value:</b> ");
pw.print(num(curRate.getLifetimeAverageValue()));
pw.print(" over ");
pw.print(num(curRate.getLifetimeEventCount()));
pw.println(" events<br /></li>");
}
}
pw.print("</ul>");
pw.println("<br />");
private void renderFrequency(String name, PrintWriter pw) throws IOException {
FrequencyStat freq = _context.statManager().getFrequency(name);
pw.print("<i>");
pw.print(freq.getDescription());
pw.println("</i><br />");
long periods[] = freq.getPeriods();
Arrays.sort(periods);
for (int i = 0; i < periods.length; i++) {
renderPeriod(pw, periods[i], "frequency");
Frequency curFreq = freq.getFrequency(periods[i]);
pw.print(" <i>avg per period:</i> (");
pw.print(num(curFreq.getAverageEventsPerPeriod()));
pw.print(", max ");
pw.print(num(curFreq.getMaxAverageEventsPerPeriod()));
if ( (curFreq.getMaxAverageEventsPerPeriod() > 0) && (curFreq.getAverageEventsPerPeriod() > 0) ) {
pw.print(", current is ");
pw.print(pct(curFreq.getAverageEventsPerPeriod()/curFreq.getMaxAverageEventsPerPeriod()));
pw.print(" of max");
}
pw.print(")");
//buf.append(" <i>avg interval between updates:</i> (").append(num(curFreq.getAverageInterval())).append("ms, min ");
//buf.append(num(curFreq.getMinAverageInterval())).append("ms)");
pw.print(" <i>strict average per period:</i> ");
pw.print(num(curFreq.getStrictAverageEventsPerPeriod()));
pw.print(" events (averaged ");
pw.print(" using the lifetime of ");
pw.print(num(curFreq.getEventCount()));
pw.print(" events)");
pw.println("<br />");
}
pw.println("<br />");
}
private void renderRate(String name, PrintWriter pw) throws IOException {
RateStat rate = _context.statManager().getRate(name);
pw.print("<i>");
pw.print(rate.getDescription());
pw.println("</i><br />");
long periods[] = rate.getPeriods();
Arrays.sort(periods);
pw.println("<ul>");
for (int i = 0; i < periods.length; i++) {
pw.println("<li>");
renderPeriod(pw, periods[i], "rate");
Rate curRate = rate.getRate(periods[i]);
pw.print( "<i>avg value:</i> (");
pw.print(num(curRate.getAverageValue()));
pw.print(" peak ");
pw.print(num(curRate.getExtremeAverageValue()));
pw.print(", [");
pw.print(pct(curRate.getPercentageOfExtremeValue()));
pw.print(" of max");
pw.print(", and ");
pw.print(pct(curRate.getPercentageOfLifetimeValue()));
pw.print(" of lifetime average]");
pw.print(")");
pw.print(" <i>highest total period value:</i> (");
pw.print(num(curRate.getExtremeTotalValue()));
pw.print(")");
if (curRate.getLifetimeTotalEventTime() > 0) {
pw.print(" <i>saturation:</i> (");
pw.print(pct(curRate.getLastEventSaturation()));
pw.print(")");
pw.print(" <i>saturated limit:</i> (");
pw.print(num(curRate.getLastSaturationLimit()));
pw.print(")");
pw.print(" <i>peak saturation:</i> (");
pw.print(pct(curRate.getExtremeEventSaturation()));
pw.print(")");
pw.print(" <i>peak saturated limit:</i> (");
pw.print(num(curRate.getExtremeSaturationLimit()));
pw.print(")");
}
pw.print(" <i>events per period:</i> ");
pw.print(num(curRate.getLastEventCount()));
long numPeriods = curRate.getLifetimePeriods();
if (numPeriods > 0) {
double avgFrequency = curRate.getLifetimeEventCount() / (double)numPeriods;
double peakFrequency = curRate.getExtremeEventCount();
pw.print(" (lifetime average: ");
pw.print(num(avgFrequency));
pw.print(", peak average: ");
pw.print(num(curRate.getExtremeEventCount()));
pw.println(")");
}
pw.print("</li>");
if (i + 1 == periods.length) {
// last one, so lets display the strict average
pw.print("<li><b>lifetime average value:</b> ");
pw.print(num(curRate.getLifetimeAverageValue()));
pw.print(" over ");
pw.print(num(curRate.getLifetimeEventCount()));
pw.println(" events<br /></li>");
}
}
pw.print("</ul>");
pw.println("<br />");
}
private static void renderPeriod(PrintWriter pw, long period, String name) throws IOException {
pw.print("<b>");
pw.print(DataHelper.formatDuration(period));
pw.print(" ");
pw.print(name);
pw.print(":</b> ");
pw.print("<b>");
pw.print(DataHelper.formatDuration(period));
pw.print(" ");
pw.print(name);
pw.print(":</b> ");
}
private final static DecimalFormat _fmt = new DecimalFormat("###,##0.00");

View File

@ -34,6 +34,7 @@ import net.i2p.router.Job;
import net.i2p.router.JobImpl;
import net.i2p.router.JobQueue;
import net.i2p.router.NetworkDatabaseFacade;
import net.i2p.router.RouterContext;
import net.i2p.util.Clock;
import net.i2p.util.Log;
import net.i2p.util.RandomSource;
@ -44,7 +45,8 @@ import net.i2p.util.RandomSource;
* @author jrandom
*/
public class ClientConnectionRunner {
private final static Log _log = new Log(ClientConnectionRunner.class);
private Log _log;
private RouterContext _context;
private ClientManager _manager;
/** socket for this particular peer connection */
private Socket _socket;
@ -76,14 +78,16 @@ public class ClientConnectionRunner {
* Create a new runner against the given socket
*
*/
public ClientConnectionRunner(ClientManager manager, Socket socket) {
_manager = manager;
_socket = socket;
_config = null;
_messages = new HashMap();
_alreadyProcessed = new LinkedList();
_acceptedPending = new HashSet();
_dead = false;
public ClientConnectionRunner(RouterContext context, ClientManager manager, Socket socket) {
_context = context;
_log = _context.logManager().getLog(ClientConnectionRunner.class);
_manager = manager;
_socket = socket;
_config = null;
_messages = new HashMap();
_alreadyProcessed = new LinkedList();
_acceptedPending = new HashSet();
_dead = false;
}
/**
@ -93,35 +97,37 @@ public class ClientConnectionRunner {
*
*/
public void startRunning() {
try {
_reader = new I2CPMessageReader(_socket.getInputStream(), new ClientMessageEventListener(this));
_out = _socket.getOutputStream();
_reader.startReading();
} catch (IOException ioe) {
_log.error("Error starting up the runner", ioe);
}
try {
_reader = new I2CPMessageReader(_socket.getInputStream(), new ClientMessageEventListener(_context, this));
_out = _socket.getOutputStream();
_reader.startReading();
} catch (IOException ioe) {
_log.error("Error starting up the runner", ioe);
}
}
/** die a horrible death */
void stopRunning() {
if (_dead) return;
_log.error("Stop the I2CP connection! current leaseSet: " + _currentLeaseSet, new Exception("Stop client connection"));
_dead = true;
// we need these keys to unpublish the leaseSet
if (_reader != null) _reader.stopReading();
if (_socket != null) try { _socket.close(); } catch (IOException ioe) { }
synchronized (_messages) {
_messages.clear();
}
_manager.unregisterConnection(this);
if (_currentLeaseSet != null)
NetworkDatabaseFacade.getInstance().unpublish(_currentLeaseSet);
_leaseRequest = null;
synchronized (_alreadyProcessed) {
_alreadyProcessed.clear();
}
_config = null;
_manager = null;
if (_dead) return;
_log.error("Stop the I2CP connection! current leaseSet: "
+ _currentLeaseSet, new Exception("Stop client connection"));
_dead = true;
// we need these keys to unpublish the leaseSet
if (_reader != null) _reader.stopReading();
if (_socket != null) try { _socket.close(); } catch (IOException ioe) { }
synchronized (_messages) {
_messages.clear();
}
_manager.unregisterConnection(this);
if (_currentLeaseSet != null)
_context.netDb().unpublish(_currentLeaseSet);
_leaseRequest = null;
synchronized (_alreadyProcessed) {
_alreadyProcessed.clear();
}
_config = null;
_manager = null;
_context = null;
}
/** current client's config */
@ -144,43 +150,43 @@ public class ClientConnectionRunner {
void removePayload(MessageId id) { synchronized (_messages) { _messages.remove(id); } }
void sessionEstablished(SessionConfig config) {
_config = config;
_manager.destinationEstablished(this);
_config = config;
_manager.destinationEstablished(this);
}
void updateMessageDeliveryStatus(MessageId id, boolean delivered) {
if (_dead) return;
JobQueue.getInstance().addJob(new MessageDeliveryStatusUpdate(id, delivered));
if (_dead) return;
_context.jobQueue().addJob(new MessageDeliveryStatusUpdate(id, delivered));
}
/**
* called after a new leaseSet is granted by the client, the NetworkDb has been
* updated. This takes care of all the LeaseRequestState stuff (including firing any jobs)
*/
void leaseSetCreated(LeaseSet ls) {
if (_leaseRequest == null) {
_log.error("LeaseRequest is null and we've received a new lease?! WTF");
return;
} else {
_leaseRequest.setIsSuccessful(true);
if (_leaseRequest.getOnGranted() != null)
JobQueue.getInstance().addJob(_leaseRequest.getOnGranted());
_leaseRequest = null;
_currentLeaseSet = ls;
}
if (_leaseRequest == null) {
_log.error("LeaseRequest is null and we've received a new lease?! WTF");
return;
} else {
_leaseRequest.setIsSuccessful(true);
if (_leaseRequest.getOnGranted() != null)
_context.jobQueue().addJob(_leaseRequest.getOnGranted());
_leaseRequest = null;
_currentLeaseSet = ls;
}
}
void disconnectClient(String reason) {
_log.error("Disconnecting the client: " + reason, new Exception("Disconnecting!"));
DisconnectMessage msg = new DisconnectMessage();
msg.setReason(reason);
try {
doSend(msg);
} catch (I2CPMessageException ime) {
_log.error("Error writing out the disconnect message", ime);
} catch (IOException ioe) {
_log.error("Error writing out the disconnect message", ioe);
}
stopRunning();
_log.error("Disconnecting the client: " + reason, new Exception("Disconnecting!"));
DisconnectMessage msg = new DisconnectMessage();
msg.setReason(reason);
try {
doSend(msg);
} catch (I2CPMessageException ime) {
_log.error("Error writing out the disconnect message", ime);
} catch (IOException ioe) {
_log.error("Error writing out the disconnect message", ioe);
}
stopRunning();
}
/**
@ -190,17 +196,20 @@ public class ClientConnectionRunner {
*
*/
MessageId distributeMessage(SendMessageMessage message) {
Payload payload = message.getPayload();
Destination dest = message.getDestination();
MessageId id = new MessageId();
id.setMessageId(getNextMessageId());
synchronized (_acceptedPending) {
_acceptedPending.add(id);
}
_log.debug("** Recieving message [" + id.getMessageId() + "] with payload of size [" + payload.getSize() + "]" + " for session [" + _sessionId.getSessionId() + "]");
// the following blocks as described above
_manager.distributeMessage(_config.getDestination(), message.getDestination(), message.getPayload(), id);
return id;
Payload payload = message.getPayload();
Destination dest = message.getDestination();
MessageId id = new MessageId();
id.setMessageId(getNextMessageId());
synchronized (_acceptedPending) {
_acceptedPending.add(id);
}
if (_log.shouldLog(Log.DEBUG))
_log.debug("** Recieving message [" + id.getMessageId() + "] with payload of size ["
+ payload.getSize() + "]" + " for session [" + _sessionId.getSessionId()
+ "]");
// the following blocks as described above
_manager.distributeMessage(_config.getDestination(), message.getDestination(), message.getPayload(), id);
return id;
}
/**
@ -209,23 +218,25 @@ public class ClientConnectionRunner {
*
*/
void ackSendMessage(MessageId id, long nonce) {
_log.debug("Acking message send [accepted]" + id + " / " + nonce + " for sessionId " + _sessionId, new Exception("sendAccepted"));
MessageStatusMessage status = new MessageStatusMessage();
status.setMessageId(id);
status.setSessionId(_sessionId);
status.setSize(0L);
status.setNonce(nonce);
status.setStatus(MessageStatusMessage.STATUS_SEND_ACCEPTED);
try {
doSend(status);
synchronized (_acceptedPending) {
_acceptedPending.remove(id);
}
} catch (I2CPMessageException ime) {
_log.error("Error writing out the message status message", ime);
} catch (IOException ioe) {
_log.error("Error writing out the message status message", ioe);
}
if (_log.shouldLog(Log.DEBUG))
_log.debug("Acking message send [accepted]" + id + " / " + nonce + " for sessionId "
+ _sessionId, new Exception("sendAccepted"));
MessageStatusMessage status = new MessageStatusMessage();
status.setMessageId(id);
status.setSessionId(_sessionId);
status.setSize(0L);
status.setNonce(nonce);
status.setStatus(MessageStatusMessage.STATUS_SEND_ACCEPTED);
try {
doSend(status);
synchronized (_acceptedPending) {
_acceptedPending.remove(id);
}
} catch (I2CPMessageException ime) {
_log.error("Error writing out the message status message", ime);
} catch (IOException ioe) {
_log.error("Error writing out the message status message", ioe);
}
}
/**
@ -233,8 +244,8 @@ public class ClientConnectionRunner {
*
*/
void receiveMessage(Destination toDest, Destination fromDest, Payload payload) {
if (_dead) return;
JobQueue.getInstance().addJob(new MessageReceivedJob(this, toDest, fromDest, payload));
if (_dead) return;
_context.jobQueue().addJob(new MessageReceivedJob(_context, this, toDest, fromDest, payload));
}
/**
@ -242,8 +253,8 @@ public class ClientConnectionRunner {
*
*/
public void reportAbuse(String reason, int severity) {
if (_dead) return;
JobQueue.getInstance().addJob(new ReportAbuseJob(this, reason, severity));
if (_dead) return;
_context.jobQueue().addJob(new ReportAbuseJob(_context, this, reason, severity));
}
/**
@ -259,13 +270,13 @@ public class ClientConnectionRunner {
* @param onFailedJob Job to run after the timeout passes without receiving authorization
*/
void requestLeaseSet(LeaseSet set, long expirationTime, Job onCreateJob, Job onFailedJob) {
if (_dead) return;
JobQueue.getInstance().addJob(new RequestLeaseSetJob(this, set, expirationTime, onCreateJob, onFailedJob));
if (_dead) return;
_context.jobQueue().addJob(new RequestLeaseSetJob(_context, this, set, expirationTime, onCreateJob, onFailedJob));
}
void disconnected() {
_log.error("Disconnected", new Exception("Disconnected?"));
stopRunning();
_log.error("Disconnected", new Exception("Disconnected?"));
stopRunning();
}
////
@ -276,29 +287,30 @@ public class ClientConnectionRunner {
*
*/
void doSend(I2CPMessage msg) throws I2CPMessageException, IOException {
if (_out == null) throw new I2CPMessageException("Output stream is not initialized");
long before = Clock.getInstance().now();
try {
synchronized (_out) {
msg.writeMessage(_out);
_out.flush();
}
} catch (I2CPMessageException ime) {
_log.error("Message exception sending I2CP message", ime);
throw ime;
} catch (IOException ioe) {
_log.error("IO exception sending I2CP message", ioe);
throw ioe;
} catch (Throwable t) {
_log.log(Log.CRIT, "Unhandled exception sending I2CP message", t);
throw new IOException("Unhandled exception sending I2CP message: " + t.getMessage());
} finally {
long after = Clock.getInstance().now();
long lag = after - before;
if (lag > 300) {
_log.error("synchronization on the i2cp message send took too long (" + lag + "ms): " + msg, new Exception("I2CP Lag"));
}
}
if (_out == null) throw new I2CPMessageException("Output stream is not initialized");
long before = _context.clock().now();
try {
synchronized (_out) {
msg.writeMessage(_out);
_out.flush();
}
} catch (I2CPMessageException ime) {
_log.error("Message exception sending I2CP message", ime);
throw ime;
} catch (IOException ioe) {
_log.error("IO exception sending I2CP message", ioe);
throw ioe;
} catch (Throwable t) {
_log.log(Log.CRIT, "Unhandled exception sending I2CP message", t);
throw new IOException("Unhandled exception sending I2CP message: " + t.getMessage());
} finally {
long after = _context.clock().now();
long lag = after - before;
if (lag > 300) {
_log.error("synchronization on the i2cp message send took too long (" + lag
+ "ms): " + msg, new Exception("I2CP Lag"));
}
}
}
// this *should* be mod 65536, but UnsignedInteger is still b0rked. FIXME
@ -307,12 +319,12 @@ public class ClientConnectionRunner {
private static Object _messageIdLock = new Object();
static int getNextMessageId() {
synchronized (_messageIdLock) {
int messageId = (++_messageId)%MAX_MESSAGE_ID;
if (_messageId >= MAX_MESSAGE_ID)
_messageId = 0;
return messageId;
}
synchronized (_messageIdLock) {
int messageId = (++_messageId)%MAX_MESSAGE_ID;
if (_messageId >= MAX_MESSAGE_ID)
_messageId = 0;
return messageId;
}
}
/**
@ -321,20 +333,20 @@ public class ClientConnectionRunner {
*
*/
private boolean alreadyAccepted(MessageId id) {
if (_dead) return false;
boolean isPending = false;
int pending = 0;
String buf = null;
synchronized (_acceptedPending) {
if (_acceptedPending.contains(id))
isPending = true;
pending = _acceptedPending.size();
buf = _acceptedPending.toString();
}
if (pending >= 1) {
_log.warn("Pending acks: " + pending + ": " + buf);
}
return !isPending;
if (_dead) return false;
boolean isPending = false;
int pending = 0;
String buf = null;
synchronized (_acceptedPending) {
if (_acceptedPending.contains(id))
isPending = true;
pending = _acceptedPending.size();
buf = _acceptedPending.toString();
}
if (pending >= 1) {
_log.warn("Pending acks: " + pending + ": " + buf);
}
return !isPending;
}
/**
@ -346,59 +358,73 @@ public class ClientConnectionRunner {
private final static long REQUEUE_DELAY = 500;
private class MessageDeliveryStatusUpdate extends JobImpl {
private MessageId _messageId;
private boolean _success;
private long _lastTried;
public MessageDeliveryStatusUpdate(MessageId id, boolean success) {
_messageId = id;
_success = success;
_lastTried = 0;
}
public String getName() { return "Update Delivery Status"; }
public void runJob() {
if (_dead) return;
MessageStatusMessage msg = new MessageStatusMessage();
msg.setMessageId(_messageId);
msg.setSessionId(_sessionId);
msg.setNonce(2);
msg.setSize(0);
if (_success)
msg.setStatus(MessageStatusMessage.STATUS_SEND_GUARANTEED_SUCCESS);
else
msg.setStatus(MessageStatusMessage.STATUS_SEND_GUARANTEED_FAILURE);
private MessageId _messageId;
private boolean _success;
private long _lastTried;
public MessageDeliveryStatusUpdate(MessageId id, boolean success) {
super(ClientConnectionRunner.this._context);
_messageId = id;
_success = success;
_lastTried = 0;
}
if (!alreadyAccepted(_messageId)) {
_log.warn("Almost send an update for message " + _messageId + " to " + MessageStatusMessage.getStatusString(msg.getStatus()) + " for session [" + _sessionId.getSessionId() + "] before they knew the messageId! delaying .5s");
_lastTried = Clock.getInstance().now();
requeue(REQUEUE_DELAY);
return;
}
public String getName() { return "Update Delivery Status"; }
public void runJob() {
if (_dead) return;
synchronized (_alreadyProcessed) {
if (_alreadyProcessed.contains(_messageId)) {
_log.warn("Status already updated");
return;
} else {
_alreadyProcessed.add(_messageId);
while (_alreadyProcessed.size() > 10)
_alreadyProcessed.remove(0);
}
}
if (_lastTried > 0)
_log.info("Updating message status for message " + _messageId + " to " + MessageStatusMessage.getStatusString(msg.getStatus()) + " for session [" + _sessionId.getSessionId() + "] (with nonce=2), retrying after [" + (Clock.getInstance().now() - _lastTried) + "]", getAddedBy());
else
_log.debug("Updating message status for message " + _messageId + " to " + MessageStatusMessage.getStatusString(msg.getStatus()) + " for session [" + _sessionId.getSessionId() + "] (with nonce=2)");
MessageStatusMessage msg = new MessageStatusMessage();
msg.setMessageId(_messageId);
msg.setSessionId(_sessionId);
msg.setNonce(2);
msg.setSize(0);
if (_success)
msg.setStatus(MessageStatusMessage.STATUS_SEND_GUARANTEED_SUCCESS);
else
msg.setStatus(MessageStatusMessage.STATUS_SEND_GUARANTEED_FAILURE);
try {
doSend(msg);
} catch (I2CPMessageException ime) {
_log.warn("Error updating the status for message ID " + _messageId, ime);
} catch (IOException ioe) {
_log.warn("Error updating the status for message ID " + _messageId, ioe);
}
}
if (!alreadyAccepted(_messageId)) {
_log.warn("Almost send an update for message " + _messageId + " to "
+ MessageStatusMessage.getStatusString(msg.getStatus())
+ " for session [" + _sessionId.getSessionId()
+ "] before they knew the messageId! delaying .5s");
_lastTried = ClientConnectionRunner.this._context.clock().now();
requeue(REQUEUE_DELAY);
return;
}
synchronized (_alreadyProcessed) {
if (_alreadyProcessed.contains(_messageId)) {
_log.warn("Status already updated");
return;
} else {
_alreadyProcessed.add(_messageId);
while (_alreadyProcessed.size() > 10)
_alreadyProcessed.remove(0);
}
}
if (_lastTried > 0) {
if (_log.shouldLog(Log.DEBUG))
_log.info("Updating message status for message " + _messageId + " to "
+ MessageStatusMessage.getStatusString(msg.getStatus())
+ " for session [" + _sessionId.getSessionId()
+ "] (with nonce=2), retrying after ["
+ (ClientConnectionRunner.this._context.clock().now() - _lastTried)
+ "]", getAddedBy());
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Updating message status for message " + _messageId + " to "
+ MessageStatusMessage.getStatusString(msg.getStatus())
+ " for session [" + _sessionId.getSessionId() + "] (with nonce=2)");
}
try {
doSend(msg);
} catch (I2CPMessageException ime) {
_log.warn("Error updating the status for message ID " + _messageId, ime);
} catch (IOException ioe) {
_log.warn("Error updating the status for message ID " + _messageId, ioe);
}
}
}
}

View File

@ -14,6 +14,7 @@ import java.net.Socket;
import net.i2p.client.I2PClient;
import net.i2p.util.Log;
import net.i2p.router.RouterContext;
/**
* Listen for connections on the specified port, and toss them onto the client manager's
@ -22,17 +23,20 @@ import net.i2p.util.Log;
* @author jrandom
*/
public class ClientListenerRunner implements Runnable {
private final static Log _log = new Log(ClientListenerRunner.class);
private Log _log;
private RouterContext _context;
private ClientManager _manager;
private ServerSocket _socket;
private int _port;
private boolean _running;
private long _nextFailDelay = 1000;
public ClientListenerRunner(ClientManager manager, int port) {
_manager = manager;
_port = port;
_running = false;
public ClientListenerRunner(RouterContext context, ClientManager manager, int port) {
_context = context;
_log = _context.logManager().getLog(ClientListenerRunner.class);
_manager = manager;
_port = port;
_running = false;
}
public void setPort(int port) { _port = port; }
@ -49,83 +53,83 @@ public class ClientListenerRunner implements Runnable {
*
*/
public void runServer() {
_running = true;
int curDelay = 0;
while ( (_running) && (curDelay < MAX_FAIL_DELAY) ) {
try {
_log.info("Starting up listening for connections on port " + _port);
_socket = new ServerSocket(_port);
curDelay = 0;
while (_running) {
try {
Socket socket = _socket.accept();
if (validate(socket)) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Connection received");
runConnection(socket);
} else {
socket.close();
if (_log.shouldLog(Log.WARN))
_log.warn("Refused connection from " + socket.getInetAddress().toString());
}
} catch (IOException ioe) {
_log.error("Server error accepting", ioe);
} catch (Throwable t) {
_log.error("Fatal error running client listener - killing the thread!", t);
return;
}
}
} catch (IOException ioe) {
_log.error("Error listening on port " + _port, ioe);
}
if (_socket != null) {
try { _socket.close(); } catch (IOException ioe) {}
_socket = null;
}
_log.error("Error listening, waiting " + _nextFailDelay + "ms before we try again");
try { Thread.sleep(_nextFailDelay); } catch (InterruptedException ie) {}
curDelay += _nextFailDelay;
_nextFailDelay *= 5;
}
_log.error("CANCELING I2CP LISTEN. delay = " + curDelay, new Exception("I2CP Listen cancelled!!!"));
_running = false;
_running = true;
int curDelay = 0;
while ( (_running) && (curDelay < MAX_FAIL_DELAY) ) {
try {
_log.info("Starting up listening for connections on port " + _port);
_socket = new ServerSocket(_port);
curDelay = 0;
while (_running) {
try {
Socket socket = _socket.accept();
if (validate(socket)) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Connection received");
runConnection(socket);
} else {
socket.close();
if (_log.shouldLog(Log.WARN))
_log.warn("Refused connection from " + socket.getInetAddress());
}
} catch (IOException ioe) {
_log.error("Server error accepting", ioe);
} catch (Throwable t) {
_log.error("Fatal error running client listener - killing the thread!", t);
return;
}
}
} catch (IOException ioe) {
_log.error("Error listening on port " + _port, ioe);
}
if (_socket != null) {
try { _socket.close(); } catch (IOException ioe) {}
_socket = null;
}
_log.error("Error listening, waiting " + _nextFailDelay + "ms before we try again");
try { Thread.sleep(_nextFailDelay); } catch (InterruptedException ie) {}
curDelay += _nextFailDelay;
_nextFailDelay *= 5;
}
_log.error("CANCELING I2CP LISTEN. delay = " + curDelay, new Exception("I2CP Listen cancelled!!!"));
_running = false;
}
/** give the i2cp client 5 seconds to show that they're really i2cp clients */
private final static int CONNECT_TIMEOUT = 5*1000;
private boolean validate(Socket socket) {
try {
socket.setSoTimeout(CONNECT_TIMEOUT);
int read = socket.getInputStream().read();
if (read != I2PClient.PROTOCOL_BYTE)
return false;
socket.setSoTimeout(0);
return true;
} catch (IOException ioe) {
if (_log.shouldLog(Log.WARN))
_log.warn("Peer did not authenticate themselves as I2CP quickly enough, dropping");
return false;
}
try {
socket.setSoTimeout(CONNECT_TIMEOUT);
int read = socket.getInputStream().read();
if (read != I2PClient.PROTOCOL_BYTE)
return false;
socket.setSoTimeout(0);
return true;
} catch (IOException ioe) {
if (_log.shouldLog(Log.WARN))
_log.warn("Peer did not authenticate themselves as I2CP quickly enough, dropping");
return false;
}
}
/**
* Handle the connection by passing it off to a {@link ClientConnectionRunner ClientConnectionRunner}
*
*/
protected void runConnection(Socket socket) throws IOException {
ClientConnectionRunner runner = new ClientConnectionRunner(_manager, socket);
_manager.registerConnection(runner);
ClientConnectionRunner runner = new ClientConnectionRunner(_context, _manager, socket);
_manager.registerConnection(runner);
}
public void stopListening() {
_running = false;
if (_socket != null) try {
_socket.close();
_socket = null;
} catch (IOException ioe) {}
_running = false;
if (_socket != null) try {
_socket.close();
_socket = null;
} catch (IOException ioe) {}
}
public void run() { runServer(); }
}

View File

@ -29,6 +29,7 @@ import net.i2p.stat.StatManager;
import net.i2p.util.Clock;
import net.i2p.util.I2PThread;
import net.i2p.util.Log;
import net.i2p.router.RouterContext;
/**
* Coordinate connections and various tasks
@ -36,108 +37,109 @@ import net.i2p.util.Log;
* @author jrandom
*/
public class ClientManager {
private final static Log _log = new Log(ClientManager.class);
private Log _log;
private ClientListenerRunner _listener;
private HashMap _runners; // Destination --> ClientConnectionRunner
private Set _pendingRunners; // ClientConnectionRunner for clients w/out a Dest yet
private RouterContext _context;
/** ms to wait before rechecking for inbound messages to deliver to clients */
private final static int INBOUND_POLL_INTERVAL = 300;
static {
StatManager.getInstance().createRateStat("client.receiveMessageSize", "How large are messages received by the client?", "Client Messages", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
}
public ClientManager(int port) {
_runners = new HashMap();
_pendingRunners = new HashSet();
_listener = new ClientListenerRunner(this, port);
Thread t = new I2PThread(_listener);
t.setName("ClientListener");
t.setDaemon(true);
t.start();
//JobQueue.getInstance().addJob(new CheckInboundMessagesJob());
public ClientManager(RouterContext context, int port) {
_context = context;
_log = context.logManager().getLog(ClientManager.class);
_context.statManager().createRateStat("client.receiveMessageSize",
"How large are messages received by the client?",
"Client Messages",
new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
_runners = new HashMap();
_pendingRunners = new HashSet();
_listener = new ClientListenerRunner(_context, this, port);
Thread t = new I2PThread(_listener);
t.setName("ClientListener");
t.setDaemon(true);
t.start();
}
public void shutdown() {
_log.info("Shutting down the ClientManager");
_listener.stopListening();
Set runners = new HashSet();
synchronized (_runners) {
for (Iterator iter = _runners.values().iterator(); iter.hasNext();) {
ClientConnectionRunner runner = (ClientConnectionRunner)iter.next();
runners.add(runner);
}
}
synchronized (_pendingRunners) {
for (Iterator iter = _pendingRunners.iterator(); iter.hasNext();) {
ClientConnectionRunner runner = (ClientConnectionRunner)iter.next();
runners.add(runner);
}
}
for (Iterator iter = runners.iterator(); iter.hasNext(); ) {
ClientConnectionRunner runner = (ClientConnectionRunner)iter.next();
runner.stopRunning();
}
_log.info("Shutting down the ClientManager");
_listener.stopListening();
Set runners = new HashSet();
synchronized (_runners) {
for (Iterator iter = _runners.values().iterator(); iter.hasNext();) {
ClientConnectionRunner runner = (ClientConnectionRunner)iter.next();
runners.add(runner);
}
}
synchronized (_pendingRunners) {
for (Iterator iter = _pendingRunners.iterator(); iter.hasNext();) {
ClientConnectionRunner runner = (ClientConnectionRunner)iter.next();
runners.add(runner);
}
}
for (Iterator iter = runners.iterator(); iter.hasNext(); ) {
ClientConnectionRunner runner = (ClientConnectionRunner)iter.next();
runner.stopRunning();
}
}
public void registerConnection(ClientConnectionRunner runner) {
synchronized (_pendingRunners) {
_pendingRunners.add(runner);
}
runner.startRunning();
synchronized (_pendingRunners) {
_pendingRunners.add(runner);
}
runner.startRunning();
}
public void unregisterConnection(ClientConnectionRunner runner) {
_log.warn("Unregistering (dropping) a client connection");
synchronized (_pendingRunners) {
_pendingRunners.remove(runner);
}
if ( (runner.getConfig() != null) && (runner.getConfig().getDestination() != null) ) {
// after connection establishment
synchronized (_runners) {
_runners.remove(runner.getConfig().getDestination());
}
}
_log.warn("Unregistering (dropping) a client connection");
synchronized (_pendingRunners) {
_pendingRunners.remove(runner);
}
if ( (runner.getConfig() != null) && (runner.getConfig().getDestination() != null) ) {
// after connection establishment
synchronized (_runners) {
_runners.remove(runner.getConfig().getDestination());
}
}
}
public void destinationEstablished(ClientConnectionRunner runner) {
synchronized (_pendingRunners) {
_pendingRunners.remove(runner);
}
synchronized (_runners) {
_runners.put(runner.getConfig().getDestination(), runner);
}
synchronized (_pendingRunners) {
_pendingRunners.remove(runner);
}
synchronized (_runners) {
_runners.put(runner.getConfig().getDestination(), runner);
}
}
void distributeMessage(Destination fromDest, Destination toDest, Payload payload, MessageId msgId) {
// check if there is a runner for it
ClientConnectionRunner runner = getRunner(toDest);
if (runner != null) {
_log.debug("Message " + msgId + " is targeting a local destination. distribute it as such");
runner.receiveMessage(toDest, fromDest, payload);
if (fromDest != null) {
ClientConnectionRunner sender = getRunner(fromDest);
if (sender != null) {
sender.updateMessageDeliveryStatus(msgId, true);
} else {
_log.log(Log.CRIT, "Um, wtf, we're sending a local message, but we can't find who sent it?", new Exception("wtf"));
}
}
} else {
// remote. w00t
_log.debug("Message " + msgId + " is targeting a REMOTE destination! Added to the client message pool");
runner = getRunner(fromDest);
ClientMessage msg = new ClientMessage();
msg.setDestination(toDest);
msg.setPayload(payload);
msg.setReceptionInfo(null);
msg.setSenderConfig(runner.getConfig());
msg.setFromDestination(runner.getConfig().getDestination());
msg.setMessageId(msgId);
ClientMessagePool.getInstance().add(msg);
}
// check if there is a runner for it
ClientConnectionRunner runner = getRunner(toDest);
if (runner != null) {
_log.debug("Message " + msgId + " is targeting a local destination. distribute it as such");
runner.receiveMessage(toDest, fromDest, payload);
if (fromDest != null) {
ClientConnectionRunner sender = getRunner(fromDest);
if (sender != null) {
sender.updateMessageDeliveryStatus(msgId, true);
} else {
_log.log(Log.CRIT, "Um, wtf, we're sending a local message, but we can't find who sent it?", new Exception("wtf"));
}
}
} else {
// remote. w00t
_log.debug("Message " + msgId + " is targeting a REMOTE destination! Added to the client message pool");
runner = getRunner(fromDest);
ClientMessage msg = new ClientMessage();
msg.setDestination(toDest);
msg.setPayload(payload);
msg.setReceptionInfo(null);
msg.setSenderConfig(runner.getConfig());
msg.setFromDestination(runner.getConfig().getDestination());
msg.setMessageId(msgId);
_context.clientMessagePool().add(msg);
}
}
@ -155,39 +157,40 @@ public class ClientManager {
* @param onFailedJob Job to run after the timeout passes without receiving authorization
*/
public void requestLeaseSet(Destination dest, LeaseSet set, long timeout, Job onCreateJob, Job onFailedJob) {
ClientConnectionRunner runner = getRunner(dest);
if (runner == null) {
if (_log.shouldLog(Log.WARN))
_log.warn("Cannot request the lease set, as we can't find a client runner for " + dest.calculateHash().toBase64() + ". disconnected?");
JobQueue.getInstance().addJob(onFailedJob);
} else {
runner.requestLeaseSet(set, Clock.getInstance().now() + timeout, onCreateJob, onFailedJob);
}
ClientConnectionRunner runner = getRunner(dest);
if (runner == null) {
if (_log.shouldLog(Log.WARN))
_log.warn("Cannot request the lease set, as we can't find a client runner for "
+ dest.calculateHash().toBase64() + ". disconnected?");
_context.jobQueue().addJob(onFailedJob);
} else {
runner.requestLeaseSet(set, _context.clock().now() + timeout, onCreateJob, onFailedJob);
}
}
public boolean isLocal(Destination dest) {
synchronized (_runners) {
return (_runners.containsKey(dest));
}
synchronized (_runners) {
return (_runners.containsKey(dest));
}
}
public boolean isLocal(Hash destHash) {
if (destHash == null) return false;
Set dests = new HashSet();
synchronized (_runners) {
dests.addAll(_runners.keySet());
}
for (Iterator iter = dests.iterator(); iter.hasNext();) {
Destination d = (Destination)iter.next();
if (d.calculateHash().equals(destHash)) return true;
}
return false;
if (destHash == null) return false;
Set dests = new HashSet();
synchronized (_runners) {
dests.addAll(_runners.keySet());
}
for (Iterator iter = dests.iterator(); iter.hasNext();) {
Destination d = (Destination)iter.next();
if (d.calculateHash().equals(destHash)) return true;
}
return false;
}
private ClientConnectionRunner getRunner(Destination dest) {
synchronized (_runners) {
return (ClientConnectionRunner)_runners.get(dest);
}
synchronized (_runners) {
return (ClientConnectionRunner)_runners.get(dest);
}
}
/**
@ -195,111 +198,118 @@ public class ClientManager {
*
*/
public SessionConfig getClientSessionConfig(Destination dest) {
ClientConnectionRunner runner = getRunner(dest);
if (runner != null)
return runner.getConfig();
else
return null;
ClientConnectionRunner runner = getRunner(dest);
if (runner != null)
return runner.getConfig();
else
return null;
}
private ClientConnectionRunner getRunner(Hash destHash) {
if (destHash == null)
return null;
Set dests = new HashSet();
synchronized (_runners) {
dests.addAll(_runners.keySet());
}
for (Iterator iter = dests.iterator(); iter.hasNext(); ) {
Destination d = (Destination)iter.next();
if (d.calculateHash().equals(destHash))
return getRunner(d);
}
return null;
if (destHash == null)
return null;
Set dests = new HashSet();
synchronized (_runners) {
dests.addAll(_runners.keySet());
}
for (Iterator iter = dests.iterator(); iter.hasNext(); ) {
Destination d = (Destination)iter.next();
if (d.calculateHash().equals(destHash))
return getRunner(d);
}
return null;
}
public void messageDeliveryStatusUpdate(Destination fromDest, MessageId id, boolean delivered) {
ClientConnectionRunner runner = getRunner(fromDest);
if (runner != null) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Delivering status [" + (delivered?"success":"failure") + "] to " + fromDest.calculateHash().toBase64() + " for message " + id);
runner.updateMessageDeliveryStatus(id, delivered);
} else {
if (_log.shouldLog(Log.WARN))
_log.warn("Cannot deliver status [" + (delivered?"success":"failure") + "] to " + fromDest.calculateHash().toBase64() + " for message " + id);
}
ClientConnectionRunner runner = getRunner(fromDest);
if (runner != null) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Delivering status [" + (delivered?"success":"failure") + "] to "
+ fromDest.calculateHash().toBase64() + " for message " + id);
runner.updateMessageDeliveryStatus(id, delivered);
} else {
if (_log.shouldLog(Log.WARN))
_log.warn("Cannot deliver status [" + (delivered?"success":"failure") + "] to "
+ fromDest.calculateHash().toBase64() + " for message " + id);
}
}
private Set getRunnerDestinations() {
Set dests = new HashSet();
synchronized (_runners) {
dests.addAll(_runners.keySet());
}
return dests;
Set dests = new HashSet();
synchronized (_runners) {
dests.addAll(_runners.keySet());
}
return dests;
}
public void reportAbuse(Destination dest, String reason, int severity) {
if (dest != null) {
ClientConnectionRunner runner = getRunner(dest);
if (runner != null) {
runner.reportAbuse(reason, severity);
}
} else {
Set dests = getRunnerDestinations();
for (Iterator iter = dests.iterator(); iter.hasNext(); ) {
Destination d = (Destination)iter.next();
reportAbuse(d, reason, severity);
}
}
if (dest != null) {
ClientConnectionRunner runner = getRunner(dest);
if (runner != null) {
runner.reportAbuse(reason, severity);
}
} else {
Set dests = getRunnerDestinations();
for (Iterator iter = dests.iterator(); iter.hasNext(); ) {
Destination d = (Destination)iter.next();
reportAbuse(d, reason, severity);
}
}
}
public String renderStatusHTML() {
StringBuffer buf = new StringBuffer();
buf.append("<h2>Clients</h2><ul>");
Map runners = null;
synchronized (_runners) {
runners = (Map)_runners.clone();
}
for (Iterator iter = runners.keySet().iterator(); iter.hasNext(); ) {
Destination dest = (Destination)iter.next();
ClientConnectionRunner runner = (ClientConnectionRunner)runners.get(dest);
buf.append("<li>").append(dest.calculateHash().toBase64()).append("</li>\n");
// toss out some general warnings
if (runner.getLeaseSet() == null)
buf.append("<font color=\"red\"><b>No leases! If you didn't just start a client, please restart it (and perhaps check your router's logs for ERROR messages)</b></font><br />\n");
else if (runner.getLeaseSet().getEarliestLeaseDate() < Clock.getInstance().now())
buf.append("<font color=\"red\"><b>wtf, lease has already expired! please restart your client</b></font><br />\n");
buf.append("<pre>\n");
buf.append(runner.getLeaseSet()).append("</pre>\n");
}
buf.append("</ul>\n");
return buf.toString();
StringBuffer buf = new StringBuffer();
buf.append("<h2>Clients</h2><ul>");
Map runners = null;
synchronized (_runners) {
runners = (Map)_runners.clone();
}
for (Iterator iter = runners.keySet().iterator(); iter.hasNext(); ) {
Destination dest = (Destination)iter.next();
ClientConnectionRunner runner = (ClientConnectionRunner)runners.get(dest);
buf.append("<li>").append(dest.calculateHash().toBase64()).append("</li>\n");
// toss out some general warnings
if (runner.getLeaseSet() == null)
buf.append("<font color=\"red\"><b>No leases! If you didn't just start a client, please restart it (and perhaps check your router's logs for ERROR messages)</b></font><br />\n");
else if (runner.getLeaseSet().getEarliestLeaseDate() < _context.clock().now())
buf.append("<font color=\"red\"><b>wtf, lease has already expired! please wait a minute, and if this message remains, restart your client</b></font><br />\n");
buf.append("<pre>\n");
buf.append(runner.getLeaseSet()).append("</pre>\n");
}
buf.append("</ul>\n");
return buf.toString();
}
public void messageReceived(ClientMessage msg) {
JobQueue.getInstance().addJob(new HandleJob(msg));
_context.jobQueue().addJob(new HandleJob(msg));
}
private class HandleJob extends JobImpl {
private ClientMessage _msg;
public HandleJob(ClientMessage msg) {
_msg = msg;
}
public String getName() { return "Handle Inbound Client Messages"; }
public void runJob() {
ClientConnectionRunner runner = null;
if (_msg.getDestination() != null)
runner = getRunner(_msg.getDestination());
else
runner = getRunner(_msg.getDestinationHash());
if (runner != null) {
StatManager.getInstance().addRateData("client.receiveMessageSize", _msg.getPayload().getSize(), 0);
runner.receiveMessage(_msg.getDestination(), null, _msg.getPayload());
} else {
// no client connection...
// we should pool these somewhere...
_log.warn("Message received but we don't have a connection to " + _msg.getDestination() + "/" + _msg.getDestinationHash() + " currently. DROPPED");
}
}
private ClientMessage _msg;
public HandleJob(ClientMessage msg) {
super(ClientManager.this._context);
_msg = msg;
}
public String getName() { return "Handle Inbound Client Messages"; }
public void runJob() {
ClientConnectionRunner runner = null;
if (_msg.getDestination() != null)
runner = getRunner(_msg.getDestination());
else
runner = getRunner(_msg.getDestinationHash());
if (runner != null) {
HandleJob.this._context.statManager().addRateData("client.receiveMessageSize",
_msg.getPayload().getSize(), 0);
runner.receiveMessage(_msg.getDestination(), null, _msg.getPayload());
} else {
// no client connection...
// we should pool these somewhere...
if (_log.shouldLog(Log.WARN))
_log.warn("Message received but we don't have a connection to "
+ _msg.getDestination() + "/" + _msg.getDestinationHash()
+ " currently. DROPPED");
}
}
}
}

View File

@ -17,6 +17,7 @@ import net.i2p.router.ClientManagerFacade;
import net.i2p.router.ClientMessage;
import net.i2p.router.Job;
import net.i2p.router.Router;
import net.i2p.router.RouterContext;
import net.i2p.util.Log;
/**
@ -27,33 +28,35 @@ import net.i2p.util.Log;
public class ClientManagerFacadeImpl extends ClientManagerFacade {
private final static Log _log = new Log(ClientManagerFacadeImpl.class);
private ClientManager _manager;
private RouterContext _context;
public final static String PROP_CLIENT_PORT = "i2cp.port";
public final static int DEFAULT_PORT = 7654;
public ClientManagerFacadeImpl() {
_manager = null;
_log.debug("Client manager facade created");
public ClientManagerFacadeImpl(RouterContext context) {
_context = context;
_manager = null;
_log.debug("Client manager facade created");
}
public void startup() {
_log.info("Starting up the client subsystem");
String portStr = Router.getInstance().getConfigSetting(PROP_CLIENT_PORT);
if (portStr != null) {
try {
int port = Integer.parseInt(portStr);
_manager = new ClientManager(port);
} catch (NumberFormatException nfe) {
_log.error("Error setting the port: " + portStr + " is not valid", nfe);
_manager = new ClientManager(DEFAULT_PORT);
}
} else {
_manager = new ClientManager(DEFAULT_PORT);
}
_log.info("Starting up the client subsystem");
String portStr = _context.router().getConfigSetting(PROP_CLIENT_PORT);
if (portStr != null) {
try {
int port = Integer.parseInt(portStr);
_manager = new ClientManager(_context, port);
} catch (NumberFormatException nfe) {
_log.error("Error setting the port: " + portStr + " is not valid", nfe);
_manager = new ClientManager(_context, DEFAULT_PORT);
}
} else {
_manager = new ClientManager(_context, DEFAULT_PORT);
}
}
public void shutdown() {
if (_manager != null)
_manager.shutdown();
if (_manager != null)
_manager.shutdown();
}
/**
@ -70,10 +73,10 @@ public class ClientManagerFacadeImpl extends ClientManagerFacade {
* @param onFailedJob Job to run after the timeout passes without receiving authorization
*/
public void requestLeaseSet(Destination dest, LeaseSet set, long timeout, Job onCreateJob, Job onFailedJob) {
if (_manager != null)
_manager.requestLeaseSet(dest, set, timeout, onCreateJob, onFailedJob);
else
_log.error("Null manager on requestLeaseSet!");
if (_manager != null)
_manager.requestLeaseSet(dest, set, timeout, onCreateJob, onFailedJob);
else
_log.error("Null manager on requestLeaseSet!");
}
/**
@ -85,10 +88,10 @@ public class ClientManagerFacadeImpl extends ClientManagerFacade {
* @param severity How severe the abuse is, with 0 being not severe and 255 is the max
*/
public void reportAbuse(Destination dest, String reason, int severity) {
if (_manager != null)
_manager.reportAbuse(dest, reason, severity);
else
_log.error("Null manager on reportAbuse!");
if (_manager != null)
_manager.reportAbuse(dest, reason, severity);
else
_log.error("Null manager on reportAbuse!");
}
/**
* Determine if the destination specified is managed locally. This call
@ -97,12 +100,12 @@ public class ClientManagerFacadeImpl extends ClientManagerFacade {
* @param dest Destination to be checked
*/
public boolean isLocal(Destination dest) {
if (_manager != null)
return _manager.isLocal(dest);
else {
_log.debug("Null manager on isLocal(dest)!");
return false;
}
if (_manager != null)
return _manager.isLocal(dest);
else {
_log.debug("Null manager on isLocal(dest)!");
return false;
}
}
/**
* Determine if the destination specified is managed locally. This call
@ -111,26 +114,26 @@ public class ClientManagerFacadeImpl extends ClientManagerFacade {
* @param destHash Hash of Destination to be checked
*/
public boolean isLocal(Hash destHash) {
if (_manager != null)
return _manager.isLocal(destHash);
else {
_log.debug("Null manager on isLocal(hash)!");
return false;
}
if (_manager != null)
return _manager.isLocal(destHash);
else {
_log.debug("Null manager on isLocal(hash)!");
return false;
}
}
public void messageDeliveryStatusUpdate(Destination fromDest, MessageId id, boolean delivered) {
if (_manager != null)
_manager.messageDeliveryStatusUpdate(fromDest, id, delivered);
else
_log.error("Null manager on messageDeliveryStatusUpdate!");
if (_manager != null)
_manager.messageDeliveryStatusUpdate(fromDest, id, delivered);
else
_log.error("Null manager on messageDeliveryStatusUpdate!");
}
public void messageReceived(ClientMessage msg) {
if (_manager != null)
_manager.messageReceived(msg);
else
_log.error("Null manager on messageReceived!");
if (_manager != null)
_manager.messageReceived(msg);
else
_log.error("Null manager on messageReceived!");
}
/**
@ -138,20 +141,20 @@ public class ClientManagerFacadeImpl extends ClientManagerFacade {
*
*/
public SessionConfig getClientSessionConfig(Destination dest) {
if (_manager != null)
return _manager.getClientSessionConfig(dest);
else {
_log.error("Null manager on getClientSessionConfig!");
return null;
}
if (_manager != null)
return _manager.getClientSessionConfig(dest);
else {
_log.error("Null manager on getClientSessionConfig!");
return null;
}
}
public String renderStatusHTML() {
if (_manager != null)
return _manager.renderStatusHTML();
else {
_log.error("Null manager on renderStatusHTML!");
return null;
}
if (_manager != null)
return _manager.renderStatusHTML();
else {
_log.error("Null manager on renderStatusHTML!");
return null;
}
}
}

View File

@ -32,6 +32,7 @@ import net.i2p.router.NetworkDatabaseFacade;
import net.i2p.util.Clock;
import net.i2p.util.Log;
import net.i2p.util.RandomSource;
import net.i2p.router.RouterContext;
/**
* Receive events from the client and handle them accordingly (updating the runner when
@ -39,11 +40,14 @@ import net.i2p.util.RandomSource;
*
*/
class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventListener {
private static final Log _log = new Log(ClientMessageEventListener.class);
private Log _log;
private RouterContext _context;
private ClientConnectionRunner _runner;
public ClientMessageEventListener(ClientConnectionRunner runner) {
_runner = runner;
public ClientMessageEventListener(RouterContext context, ClientConnectionRunner runner) {
_context = context;
_log = _context.logManager().getLog(ClientMessageEventListener.class);
_runner = runner;
}
/**
@ -51,36 +55,37 @@ class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventLi
*
*/
public void messageReceived(I2CPMessageReader reader, I2CPMessage message) {
if (_runner.isDead()) return;
_log.info("Message recieved: \n" + message);
switch (message.getType()) {
case GetDateMessage.MESSAGE_TYPE:
handleGetDate(reader, (GetDateMessage)message);
break;
case SetDateMessage.MESSAGE_TYPE:
handleSetDate(reader, (SetDateMessage)message);
break;
case CreateSessionMessage.MESSAGE_TYPE:
handleCreateSession(reader, (CreateSessionMessage)message);
break;
case SendMessageMessage.MESSAGE_TYPE:
handleSendMessage(reader, (SendMessageMessage)message);
break;
case ReceiveMessageBeginMessage.MESSAGE_TYPE:
handleReceiveBegin(reader, (ReceiveMessageBeginMessage)message);
break;
case ReceiveMessageEndMessage.MESSAGE_TYPE:
handleReceiveEnd(reader, (ReceiveMessageEndMessage)message);
break;
case CreateLeaseSetMessage.MESSAGE_TYPE:
handleCreateLeaseSet(reader, (CreateLeaseSetMessage)message);
break;
case DestroySessionMessage.MESSAGE_TYPE:
handleDestroySession(reader, (DestroySessionMessage)message);
break;
default:
_log.warn("Unhandled I2CP type received: " + message.getType());
}
if (_runner.isDead()) return;
if (_log.shouldLog(Log.INFO))
_log.info("Message recieved: \n" + message);
switch (message.getType()) {
case GetDateMessage.MESSAGE_TYPE:
handleGetDate(reader, (GetDateMessage)message);
break;
case SetDateMessage.MESSAGE_TYPE:
handleSetDate(reader, (SetDateMessage)message);
break;
case CreateSessionMessage.MESSAGE_TYPE:
handleCreateSession(reader, (CreateSessionMessage)message);
break;
case SendMessageMessage.MESSAGE_TYPE:
handleSendMessage(reader, (SendMessageMessage)message);
break;
case ReceiveMessageBeginMessage.MESSAGE_TYPE:
handleReceiveBegin(reader, (ReceiveMessageBeginMessage)message);
break;
case ReceiveMessageEndMessage.MESSAGE_TYPE:
handleReceiveEnd(reader, (ReceiveMessageEndMessage)message);
break;
case CreateLeaseSetMessage.MESSAGE_TYPE:
handleCreateLeaseSet(reader, (CreateLeaseSetMessage)message);
break;
case DestroySessionMessage.MESSAGE_TYPE:
handleDestroySession(reader, (DestroySessionMessage)message);
break;
default:
_log.warn("Unhandled I2CP type received: " + message.getType());
}
}
/**
@ -88,27 +93,27 @@ class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventLi
*
*/
public void readError(I2CPMessageReader reader, Exception error) {
if (_runner.isDead()) return;
_log.error("Error occurred", error);
_runner.stopRunning();
if (_runner.isDead()) return;
_log.error("Error occurred", error);
_runner.stopRunning();
}
public void disconnected(I2CPMessageReader reader) {
if (_runner.isDead()) return;
_runner.disconnected();
if (_runner.isDead()) return;
_runner.disconnected();
}
private void handleGetDate(I2CPMessageReader reader, GetDateMessage message) {
try {
_runner.doSend(new SetDateMessage());
} catch (I2CPMessageException ime) {
_log.error("Error writing out the setDate message", ime);
} catch (IOException ioe) {
_log.error("Error writing out the setDate message", ioe);
}
try {
_runner.doSend(new SetDateMessage());
} catch (I2CPMessageException ime) {
_log.error("Error writing out the setDate message", ime);
} catch (IOException ioe) {
_log.error("Error writing out the setDate message", ioe);
}
}
private void handleSetDate(I2CPMessageReader reader, SetDateMessage message) {
Clock.getInstance().setNow(message.getDate().getTime());
_context.clock().setNow(message.getDate().getTime());
}
@ -117,30 +122,30 @@ class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventLi
*
*/
private void handleCreateSession(I2CPMessageReader reader, CreateSessionMessage message) {
if (message.getSessionConfig().verifySignature()) {
_log.debug("Signature verified correctly on create session message");
} else {
_log.error("Signature verification *FAILED* on a create session message. Hijack attempt?");
_runner.disconnectClient("Invalid signature on CreateSessionMessage");
return;
}
if (message.getSessionConfig().verifySignature()) {
_log.debug("Signature verified correctly on create session message");
} else {
_log.error("Signature verification *FAILED* on a create session message. Hijack attempt?");
_runner.disconnectClient("Invalid signature on CreateSessionMessage");
return;
}
SessionStatusMessage msg = new SessionStatusMessage();
SessionId sessionId = new SessionId();
sessionId.setSessionId(getNextSessionId());
_runner.setSessionId(sessionId);
msg.setSessionId(sessionId);
msg.setStatus(SessionStatusMessage.STATUS_CREATED);
try {
_runner.doSend(msg);
_runner.sessionEstablished(message.getSessionConfig());
} catch (I2CPMessageException ime) {
_log.error("Error writing out the session status message", ime);
} catch (IOException ioe) {
_log.error("Error writing out the session status message", ioe);
}
JobQueue.getInstance().addJob(new CreateSessionJob(_runner));
SessionStatusMessage msg = new SessionStatusMessage();
SessionId sessionId = new SessionId();
sessionId.setSessionId(getNextSessionId());
_runner.setSessionId(sessionId);
msg.setSessionId(sessionId);
msg.setStatus(SessionStatusMessage.STATUS_CREATED);
try {
_runner.doSend(msg);
_runner.sessionEstablished(message.getSessionConfig());
} catch (I2CPMessageException ime) {
_log.error("Error writing out the session status message", ime);
} catch (IOException ioe) {
_log.error("Error writing out the session status message", ioe);
}
_context.jobQueue().addJob(new CreateSessionJob(_context, _runner));
}
@ -150,9 +155,9 @@ class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventLi
*
*/
private void handleSendMessage(I2CPMessageReader reader, SendMessageMessage message) {
_log.debug("handleSendMessage called");
MessageId id = _runner.distributeMessage(message);
_runner.ackSendMessage(id, message.getNonce());
_log.debug("handleSendMessage called");
MessageId id = _runner.distributeMessage(message);
_runner.ackSendMessage(id, message.getNonce());
}
@ -161,24 +166,25 @@ class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventLi
*
*/
private void handleReceiveBegin(I2CPMessageReader reader, ReceiveMessageBeginMessage message) {
if (_runner.isDead()) return;
_log.debug("Handling recieve begin: id = " + message.getMessageId());
MessagePayloadMessage msg = new MessagePayloadMessage();
msg.setMessageId(message.getMessageId());
msg.setSessionId(_runner.getSessionId());
Payload payload = _runner.getPayload(message.getMessageId());
if (payload == null) {
_log.error("Payload for message id [" + message.getMessageId() + "] is null! Unknown message id?");
return;
}
msg.setPayload(payload);
try {
_runner.doSend(msg);
} catch (IOException ioe) {
_log.error("Error delivering the payload", ioe);
} catch (I2CPMessageException ime) {
_log.error("Error delivering the payload", ime);
}
if (_runner.isDead()) return;
_log.debug("Handling recieve begin: id = " + message.getMessageId());
MessagePayloadMessage msg = new MessagePayloadMessage();
msg.setMessageId(message.getMessageId());
msg.setSessionId(_runner.getSessionId());
Payload payload = _runner.getPayload(message.getMessageId());
if (payload == null) {
_log.error("Payload for message id [" + message.getMessageId()
+ "] is null! Unknown message id?");
return;
}
msg.setPayload(payload);
try {
_runner.doSend(msg);
} catch (IOException ioe) {
_log.error("Error delivering the payload", ioe);
} catch (I2CPMessageException ime) {
_log.error("Error delivering the payload", ime);
}
}
/**
@ -188,26 +194,26 @@ class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventLi
*
*/
private void handleReceiveEnd(I2CPMessageReader reader, ReceiveMessageEndMessage message) {
_runner.removePayload(message.getMessageId());
_runner.removePayload(message.getMessageId());
}
private void handleDestroySession(I2CPMessageReader reader, DestroySessionMessage message) {
_log.info("Destroying client session " + _runner.getSessionId());
_runner.stopRunning();
_log.info("Destroying client session " + _runner.getSessionId());
_runner.stopRunning();
}
private void handleCreateLeaseSet(I2CPMessageReader reader, CreateLeaseSetMessage message) {
if ( (message.getLeaseSet() == null) || (message.getPrivateKey() == null) || (message.getSigningPrivateKey() == null) ) {
_log.error("Null lease set granted: " + message);
return;
}
_log.info("New lease set granted for destination " + message.getLeaseSet().getDestination().calculateHash().toBase64());
KeyManager.getInstance().registerKeys(message.getLeaseSet().getDestination(), message.getSigningPrivateKey(), message.getPrivateKey());
NetworkDatabaseFacade.getInstance().publish(message.getLeaseSet());
// leaseSetCreated takes care of all the LeaseRequestState stuff (including firing any jobs)
_runner.leaseSetCreated(message.getLeaseSet());
if ( (message.getLeaseSet() == null) || (message.getPrivateKey() == null) || (message.getSigningPrivateKey() == null) ) {
_log.error("Null lease set granted: " + message);
return;
}
_log.info("New lease set granted for destination " + message.getLeaseSet().getDestination().calculateHash().toBase64());
_context.keyManager().registerKeys(message.getLeaseSet().getDestination(), message.getSigningPrivateKey(), message.getPrivateKey());
_context.netDb().publish(message.getLeaseSet());
// leaseSetCreated takes care of all the LeaseRequestState stuff (including firing any jobs)
_runner.leaseSetCreated(message.getLeaseSet());
}
// this *should* be mod 65536, but UnsignedInteger is still b0rked. FIXME
@ -218,11 +224,11 @@ class ClientMessageEventListener implements I2CPMessageReader.I2CPMessageEventLi
/** generate a new sessionId */
private final static int getNextSessionId() {
synchronized (_sessionIdLock) {
int id = (++_id)%MAX_SESSION_ID;
if (_id >= MAX_SESSION_ID)
_id = 0;
return id;
}
synchronized (_sessionIdLock) {
int id = (++_id)%MAX_SESSION_ID;
if (_id >= MAX_SESSION_ID)
_id = 0;
return id;
}
}
}

View File

@ -1,9 +1,9 @@
package net.i2p.router.client;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
@ -15,6 +15,7 @@ import net.i2p.router.ClientTunnelSettings;
import net.i2p.router.JobImpl;
import net.i2p.router.TunnelManagerFacade;
import net.i2p.util.Log;
import net.i2p.router.RouterContext;
/**
* Given an established connection, walk through the process of establishing the
@ -24,39 +25,41 @@ import net.i2p.util.Log;
*
*/
class CreateSessionJob extends JobImpl {
private final static Log _log = new Log(CreateSessionJob.class);
private Log _log;
private ClientConnectionRunner _runner;
private final static long LEASE_CREATION_TIMEOUT = 30*1000;
public CreateSessionJob(ClientConnectionRunner runner) {
_runner = runner;
public CreateSessionJob(RouterContext context, ClientConnectionRunner runner) {
super(context);
_log = context.logManager().getLog(CreateSessionJob.class);
_runner = runner;
}
public String getName() { return "Request tunnels for a new client"; }
public void runJob() {
SessionConfig cfg = _runner.getConfig();
if ( (cfg == null) || (cfg.getDestination() == null) ) return;
if (_log.shouldLog(Log.INFO))
_log.info("Requesting lease set for destination " + cfg.getDestination().calculateHash().toBase64());
ClientTunnelSettings settings = new ClientTunnelSettings();
Properties props = new Properties();
// We're NOT going to force all clients to use the router's defaults, since that may be
// excessive. This means that unless the user says otherwise, we'll be satisfied with whatever
// is available. Otherwise, when the router starts up, if there aren't sufficient tunnels with the
// adequate number of hops, the user will have to wait. Once peer profiles are persistent, we can
// reenable this, since on startup we'll have a sufficient number of high enough ranked peers to
// tunnel through. (perhaps).
// XXX take the router's defaults
// XXX props.putAll(Router.getInstance().getConfigMap());
// override them by the client's settings
props.putAll(_runner.getConfig().getOptions());
// and load 'em up (using anything not yet set as the software defaults)
settings.readFromProperties(props);
TunnelManagerFacade.getInstance().createTunnels(_runner.getConfig().getDestination(), settings, LEASE_CREATION_TIMEOUT);
SessionConfig cfg = _runner.getConfig();
if ( (cfg == null) || (cfg.getDestination() == null) ) return;
if (_log.shouldLog(Log.INFO))
_log.info("Requesting lease set for destination " + cfg.getDestination().calculateHash().toBase64());
ClientTunnelSettings settings = new ClientTunnelSettings();
Properties props = new Properties();
// We're NOT going to force all clients to use the router's defaults, since that may be
// excessive. This means that unless the user says otherwise, we'll be satisfied with whatever
// is available. Otherwise, when the router starts up, if there aren't sufficient tunnels with the
// adequate number of hops, the user will have to wait. Once peer profiles are persistent, we can
// reenable this, since on startup we'll have a sufficient number of high enough ranked peers to
// tunnel through. (perhaps).
// XXX take the router's defaults
// XXX props.putAll(Router.getInstance().getConfigMap());
// override them by the client's settings
props.putAll(_runner.getConfig().getOptions());
// and load 'em up (using anything not yet set as the software defaults)
settings.readFromProperties(props);
_context.tunnelManager().createTunnels(_runner.getConfig().getDestination(), settings, LEASE_CREATION_TIMEOUT);
}
}

View File

@ -1,9 +1,9 @@
package net.i2p.router.client;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
@ -16,6 +16,7 @@ import net.i2p.data.i2cp.I2CPMessageException;
import net.i2p.data.i2cp.MessageId;
import net.i2p.data.i2cp.MessageStatusMessage;
import net.i2p.router.JobImpl;
import net.i2p.router.RouterContext;
import net.i2p.util.Log;
/**
@ -23,27 +24,29 @@ import net.i2p.util.Log;
*
*/
class MessageReceivedJob extends JobImpl {
private final static Log _log = new Log(MessageReceivedJob.class);
private Log _log;
private ClientConnectionRunner _runner;
private Destination _to;
private Destination _from;
private Payload _payload;
public MessageReceivedJob(ClientConnectionRunner runner, Destination toDest, Destination fromDest, Payload payload) {
_runner = runner;
_to = toDest;
_from = fromDest;
_payload = payload;
public MessageReceivedJob(RouterContext ctx, ClientConnectionRunner runner, Destination toDest, Destination fromDest, Payload payload) {
super(ctx);
_log = ctx.logManager().getLog(MessageReceivedJob.class);
_runner = runner;
_to = toDest;
_from = fromDest;
_payload = payload;
}
public String getName() { return "Deliver New Message"; }
public void runJob() {
if (_runner.isDead()) return;
MessageId id = new MessageId();
id.setMessageId(ClientConnectionRunner.getNextMessageId());
_runner.setPayload(id, _payload);
messageAvailable(id, _payload.getSize());
if (_runner.isDead()) return;
MessageId id = new MessageId();
id.setMessageId(ClientConnectionRunner.getNextMessageId());
_runner.setPayload(id, _payload);
messageAvailable(id, _payload.getSize());
}
/**
* Deliver notification to the client that the given message is available.
* This is synchronous and returns true if the notification was sent safely,
@ -51,19 +54,19 @@ class MessageReceivedJob extends JobImpl {
*
*/
public void messageAvailable(MessageId id, long size) {
_log.debug("Sending message available: " + id + " to sessionId " + _runner.getSessionId() + " (with nonce=1)", new Exception("available"));
MessageStatusMessage msg = new MessageStatusMessage();
msg.setMessageId(id);
msg.setSessionId(_runner.getSessionId());
msg.setSize(size);
msg.setNonce(1);
msg.setStatus(MessageStatusMessage.STATUS_AVAILABLE);
try {
_runner.doSend(msg);
} catch (I2CPMessageException ime) {
_log.error("Error writing out the message status message", ime);
} catch (IOException ioe) {
_log.error("Error writing out the message status message", ioe);
}
_log.debug("Sending message available: " + id + " to sessionId " + _runner.getSessionId() + " (with nonce=1)", new Exception("available"));
MessageStatusMessage msg = new MessageStatusMessage();
msg.setMessageId(id);
msg.setSessionId(_runner.getSessionId());
msg.setSize(size);
msg.setNonce(1);
msg.setStatus(MessageStatusMessage.STATUS_AVAILABLE);
try {
_runner.doSend(msg);
} catch (I2CPMessageException ime) {
_log.error("Error writing out the message status message", ime);
} catch (IOException ioe) {
_log.error("Error writing out the message status message", ioe);
}
}
}

View File

@ -1,9 +1,9 @@
package net.i2p.router.client;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
@ -16,40 +16,43 @@ import net.i2p.data.i2cp.I2CPMessageException;
import net.i2p.data.i2cp.ReportAbuseMessage;
import net.i2p.router.JobImpl;
import net.i2p.util.Log;
import net.i2p.router.RouterContext;
/**
* Async job to send an abuse message to the client
*
*/
class ReportAbuseJob extends JobImpl {
private final static Log _log = new Log(ReportAbuseJob.class);
private Log _log;
private ClientConnectionRunner _runner;
private String _reason;
private int _severity;
public ReportAbuseJob(ClientConnectionRunner runner, String reason, int severity) {
_runner = runner;
_reason = reason;
_severity = severity;
public ReportAbuseJob(RouterContext context, ClientConnectionRunner runner, String reason, int severity) {
super(context);
_log = context.logManager().getLog(ReportAbuseJob.class);
_runner = runner;
_reason = reason;
_severity = severity;
}
public String getName() { return "Report Abuse"; }
public void runJob() {
if (_runner.isDead()) return;
AbuseReason res = new AbuseReason();
res.setReason(_reason);
AbuseSeverity sev = new AbuseSeverity();
sev.setSeverity(_severity);
ReportAbuseMessage msg = new ReportAbuseMessage();
msg.setMessageId(null);
msg.setReason(res);
msg.setSessionId(_runner.getSessionId());
msg.setSeverity(sev);
try {
_runner.doSend(msg);
} catch (I2CPMessageException ime) {
_log.error("Error reporting abuse", ime);
} catch (IOException ioe) {
_log.error("Error reporting abuse", ioe);
}
if (_runner.isDead()) return;
AbuseReason res = new AbuseReason();
res.setReason(_reason);
AbuseSeverity sev = new AbuseSeverity();
sev.setSeverity(_severity);
ReportAbuseMessage msg = new ReportAbuseMessage();
msg.setMessageId(null);
msg.setReason(res);
msg.setSessionId(_runner.getSessionId());
msg.setSeverity(sev);
try {
_runner.doSend(msg);
} catch (I2CPMessageException ime) {
_log.error("Error reporting abuse", ime);
} catch (IOException ioe) {
_log.error("Error reporting abuse", ioe);
}
}
}

View File

@ -1,9 +1,9 @@
package net.i2p.router.client;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
@ -19,110 +19,114 @@ import net.i2p.router.JobImpl;
import net.i2p.router.JobQueue;
import net.i2p.util.Clock;
import net.i2p.util.Log;
import net.i2p.router.RouterContext;
/**
* Async job to walk the client through generating a lease set. First sends it
* to the client and then queues up a CheckLeaseRequestStatus job for
* to the client and then queues up a CheckLeaseRequestStatus job for
* processing after the expiration. When that CheckLeaseRequestStatus is run,
* if the client still hasn't provided the signed leaseSet, fire off the onFailed
* job from the intermediary LeaseRequestState and drop the client.
*
*/
class RequestLeaseSetJob extends JobImpl {
private static final Log _log = new Log(RequestLeaseSetJob.class);
private Log _log;
private ClientConnectionRunner _runner;
private LeaseSet _ls;
private long _expiration;
private Job _onCreate;
private Job _onFail;
public RequestLeaseSetJob(ClientConnectionRunner runner, LeaseSet set, long expiration, Job onCreate, Job onFail) {
_runner = runner;
_ls = set;
_expiration = expiration;
_onCreate = onCreate;
_onFail = onFail;
public RequestLeaseSetJob(RouterContext ctx, ClientConnectionRunner runner, LeaseSet set, long expiration, Job onCreate, Job onFail) {
super(ctx);
_log = ctx.logManager().getLog(RequestLeaseSetJob.class);
_runner = runner;
_ls = set;
_expiration = expiration;
_onCreate = onCreate;
_onFail = onFail;
}
public String getName() { return "Request Lease Set"; }
public void runJob() {
if (_runner.isDead()) return;
LeaseRequestState oldReq = _runner.getLeaseRequest();
if (oldReq != null) {
if (oldReq.getExpiration() > Clock.getInstance().now()) {
_log.error("Old *current* leaseRequest already exists! Why are we trying to request too quickly?", getAddedBy());
return;
} else {
_log.error("Old *expired* leaseRequest exists! Why did the old request not get killed? (expiration = " + new Date(oldReq.getExpiration()) + ")", getAddedBy());
}
}
LeaseRequestState state = new LeaseRequestState(_onCreate, _onFail, _expiration, _ls);
RequestLeaseSetMessage msg = new RequestLeaseSetMessage();
Date end = null;
// get the earliest end date
for (int i = 0; i < state.getRequested().getLeaseCount(); i++) {
if ( (end == null) || (end.getTime() > state.getRequested().getLease(i).getEndDate().getTime()) )
end = state.getRequested().getLease(i).getEndDate();
}
msg.setEndDate(end);
msg.setSessionId(_runner.getSessionId());
for (int i = 0; i < state.getRequested().getLeaseCount(); i++) {
msg.addEndpoint(state.getRequested().getLease(i).getRouterIdentity(), state.getRequested().getLease(i).getTunnelId());
}
try {
_runner.setLeaseRequest(state);
_runner.doSend(msg);
JobQueue.getInstance().addJob(new CheckLeaseRequestStatus(state));
return;
} catch (I2CPMessageException ime) {
_log.error("Error sending I2CP message requesting the lease set", ime);
state.setIsSuccessful(false);
_runner.setLeaseRequest(null);
_runner.disconnectClient("I2CP error requesting leaseSet");
return;
} catch (IOException ioe) {
_log.error("Error sending I2CP message requesting the lease set", ioe);
state.setIsSuccessful(false);
_runner.setLeaseRequest(null);
_runner.disconnectClient("IO error requesting leaseSet");
return;
}
if (_runner.isDead()) return;
LeaseRequestState oldReq = _runner.getLeaseRequest();
if (oldReq != null) {
if (oldReq.getExpiration() > _context.clock().now()) {
_log.error("Old *current* leaseRequest already exists! Why are we trying to request too quickly?", getAddedBy());
return;
} else {
_log.error("Old *expired* leaseRequest exists! Why did the old request not get killed? (expiration = " + new Date(oldReq.getExpiration()) + ")", getAddedBy());
}
}
LeaseRequestState state = new LeaseRequestState(_onCreate, _onFail, _expiration, _ls);
RequestLeaseSetMessage msg = new RequestLeaseSetMessage();
Date end = null;
// get the earliest end date
for (int i = 0; i < state.getRequested().getLeaseCount(); i++) {
if ( (end == null) || (end.getTime() > state.getRequested().getLease(i).getEndDate().getTime()) )
end = state.getRequested().getLease(i).getEndDate();
}
msg.setEndDate(end);
msg.setSessionId(_runner.getSessionId());
for (int i = 0; i < state.getRequested().getLeaseCount(); i++) {
msg.addEndpoint(state.getRequested().getLease(i).getRouterIdentity(), state.getRequested().getLease(i).getTunnelId());
}
try {
_runner.setLeaseRequest(state);
_runner.doSend(msg);
_context.jobQueue().addJob(new CheckLeaseRequestStatus(state));
return;
} catch (I2CPMessageException ime) {
_log.error("Error sending I2CP message requesting the lease set", ime);
state.setIsSuccessful(false);
_runner.setLeaseRequest(null);
_runner.disconnectClient("I2CP error requesting leaseSet");
return;
} catch (IOException ioe) {
_log.error("Error sending I2CP message requesting the lease set", ioe);
state.setIsSuccessful(false);
_runner.setLeaseRequest(null);
_runner.disconnectClient("IO error requesting leaseSet");
return;
}
}
/**
* Schedule this job to be run after the request's expiration, so that if
* Schedule this job to be run after the request's expiration, so that if
* it wasn't yet successful, we fire off the failure job and disconnect the
* client (but if it was, noop)
*
*/
private class CheckLeaseRequestStatus extends JobImpl {
private LeaseRequestState _req;
public CheckLeaseRequestStatus(LeaseRequestState state) {
_req = state;
getTiming().setStartAfter(state.getExpiration());
}
public void runJob() {
if (_runner.isDead()) return;
if (_req.getIsSuccessful()) {
// we didn't fail
return;
} else {
_log.error("Failed to receive a leaseSet in the time allotted (" + new Date(_req.getExpiration()) + ")");
_runner.disconnectClient("Took too long to request leaseSet");
if (_req.getOnFailed() != null)
JobQueue.getInstance().addJob(_req.getOnFailed());
// only zero out the request if its the one we know about
if (_req == _runner.getLeaseRequest())
_runner.setLeaseRequest(null);
}
}
public String getName() { return "Check LeaseRequest Status"; }
private LeaseRequestState _req;
public CheckLeaseRequestStatus(LeaseRequestState state) {
super(RequestLeaseSetJob.this._context);
_req = state;
getTiming().setStartAfter(state.getExpiration());
}
public void runJob() {
if (_runner.isDead()) return;
if (_req.getIsSuccessful()) {
// we didn't fail
return;
} else {
_log.error("Failed to receive a leaseSet in the time allotted (" + new Date(_req.getExpiration()) + ")");
_runner.disconnectClient("Took too long to request leaseSet");
if (_req.getOnFailed() != null)
RequestLeaseSetJob.this._context.jobQueue().addJob(_req.getOnFailed());
// only zero out the request if its the one we know about
if (_req == _runner.getLeaseRequest())
_runner.setLeaseRequest(null);
}
}
public String getName() { return "Check LeaseRequest Status"; }
}
}

View File

@ -1,67 +0,0 @@
package net.i2p.router.message;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
import net.i2p.data.Hash;
import net.i2p.data.RouterInfo;
import net.i2p.router.Job;
import net.i2p.router.JobImpl;
import net.i2p.router.TunnelInfo;
import net.i2p.util.Log;
/**
* Build a TunnelCreateMessage that is sent to the target requesting that they
* participate in the tunnel. If they reply back saying they will, fire off the
* onCreateSuccessful job, otherwise fire off the onCreateFailed job after a timeout.
* The test message is sent at the specified priority.
*
* The message algorithm is:
* = check to see if we have working outbound tunnels
* - if true, send a tunnel message out the tunnel containing a garlic aimed directly at the peer in question.
* - if false, send a message garlic'ed through a few routers before reaching the peer in question.
*
* the source route block will always point at an inbound tunnel - even if there aren't any real ones (in
* which case, the tunnel gateway is the local router)
*
*/
class BuildCreateTunnelMessageJob extends JobImpl {
private final static Log _log = new Log(BuildCreateTunnelMessageJob.class);
private RouterInfo _target;
private Hash _replyTo;
private TunnelInfo _tunnelConfig;
private Job _onCreateSuccessful;
private Job _onCreateFailed;
private long _timeoutMs;
private int _priority;
/**
*
* @param target router to participate in the tunnel
* @param replyTo our address
* @param info data regarding the tunnel configuration
* @param onCreateSuccessfulJob after the peer replies back saying they'll participate
* @param onCreateFailedJob after the peer replies back saying they won't participate, or timeout
* @param timeoutMs how long to wait before timing out
* @param priority how high priority to send this test
*/
public BuildCreateTunnelMessageJob(RouterInfo target, Hash replyTo, TunnelInfo info, Job onCreateSuccessfulJob, Job onCreateFailedJob, long timeoutMs, int priority) {
super();
_target = target;
_replyTo = replyTo;
_tunnelConfig = info;
_onCreateSuccessful = onCreateSuccessfulJob;
_onCreateFailed = onCreateFailedJob;
_timeoutMs = timeoutMs;
_priority = priority;
}
public String getName() { return "Build Create Tunnel Message"; }
public void runJob() {}
}

View File

@ -1,9 +1,9 @@
package net.i2p.router.message;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
@ -30,16 +30,17 @@ import net.i2p.router.Router;
import net.i2p.util.Clock;
import net.i2p.util.Log;
import net.i2p.util.RandomSource;
import net.i2p.router.RouterContext;
/**
* Build a test message that will be sent to the target to make sure they're alive.
* Once that is verified, onSendJob is enqueued. If their reachability isn't
* Once that is verified, onSendJob is enqueued. If their reachability isn't
* known (or they're unreachable) within timeoutMs, onSendFailedJob is enqueued.
* The test message is sent at the specified priority.
*
*/
public class BuildTestMessageJob extends JobImpl {
private final static Log _log = new Log(BuildTestMessageJob.class);
private Log _log;
private RouterInfo _target;
private Hash _replyTo;
private Job _onSend;
@ -47,7 +48,7 @@ public class BuildTestMessageJob extends JobImpl {
private long _timeoutMs;
private int _priority;
private long _testMessageKey;
/**
*
* @param target router being tested
@ -56,144 +57,149 @@ public class BuildTestMessageJob extends JobImpl {
* @param timeoutMs how long to wait before timing out
* @param priority how high priority to send this test
*/
public BuildTestMessageJob(RouterInfo target, Hash replyTo, Job onSendJob, Job onSendFailedJob, long timeoutMs, int priority) {
super();
_target = target;
_replyTo = replyTo;
_onSend = onSendJob;
_onSendFailed = onSendFailedJob;
_timeoutMs = timeoutMs;
_priority = priority;
_testMessageKey = -1;
public BuildTestMessageJob(RouterContext ctx, RouterInfo target, Hash replyTo,
Job onSendJob, Job onSendFailedJob, long timeoutMs, int priority) {
super(ctx);
_log = ctx.logManager().getLog(BuildTestMessageJob.class);
_target = target;
_replyTo = replyTo;
_onSend = onSendJob;
_onSendFailed = onSendFailedJob;
_timeoutMs = timeoutMs;
_priority = priority;
_testMessageKey = -1;
}
public String getName() { return "Build Test Message"; }
public void runJob() {
// This is a test message - build a garlic with a DeliveryStatusMessage that
// first goes to the peer then back to us.
if (_log.shouldLog(Log.DEBUG))
_log.debug("Building garlic message to test " + _target.getIdentity().getHash().toBase64());
GarlicConfig config = buildGarlicCloveConfig();
// TODO: make the last params on this specify the correct sessionKey and tags used
ReplyJob replyJob = new JobReplyJob(_onSend, config.getRecipient().getIdentity().getPublicKey(), config.getId(), null, new HashSet());
MessageSelector sel = buildMessageSelector();
SendGarlicJob job = new SendGarlicJob(config, null, _onSendFailed, replyJob, _onSendFailed, _timeoutMs, _priority, sel);
JobQueue.getInstance().addJob(job);
// This is a test message - build a garlic with a DeliveryStatusMessage that
// first goes to the peer then back to us.
if (_log.shouldLog(Log.DEBUG))
_log.debug("Building garlic message to test " + _target.getIdentity().getHash().toBase64());
GarlicConfig config = buildGarlicCloveConfig();
// TODO: make the last params on this specify the correct sessionKey and tags used
ReplyJob replyJob = new JobReplyJob(_context, _onSend, config.getRecipient().getIdentity().getPublicKey(), config.getId(), null, new HashSet());
MessageSelector sel = buildMessageSelector();
SendGarlicJob job = new SendGarlicJob(_context, config, null, _onSendFailed, replyJob, _onSendFailed, _timeoutMs, _priority, sel);
_context.jobQueue().addJob(job);
}
private MessageSelector buildMessageSelector() {
return new TestMessageSelector(_testMessageKey, _timeoutMs + Clock.getInstance().now());
return new TestMessageSelector(_testMessageKey, _timeoutMs + _context.clock().now());
}
private GarlicConfig buildGarlicCloveConfig() {
_testMessageKey = RandomSource.getInstance().nextInt(Integer.MAX_VALUE);
if (_log.shouldLog(Log.INFO))
_log.info("Test message key: " + _testMessageKey);
GarlicConfig config = new GarlicConfig();
PayloadGarlicConfig ackClove = buildAckClove();
config.addClove(ackClove);
DeliveryInstructions instructions = new DeliveryInstructions();
instructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_ROUTER);
instructions.setDelayRequested(false);
instructions.setDelaySeconds(0);
instructions.setEncrypted(false);
instructions.setEncryptionKey(null);
instructions.setRouter(_target.getIdentity().getHash());
instructions.setTunnelId(null);
config.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null));
config.setDeliveryInstructions(instructions);
config.setId(RandomSource.getInstance().nextInt(Integer.MAX_VALUE));
config.setExpiration(_timeoutMs+Clock.getInstance().now()+2*Router.CLOCK_FUDGE_FACTOR);
config.setRecipient(_target);
config.setRequestAck(false);
return config;
_testMessageKey = _context.random().nextInt(Integer.MAX_VALUE);
if (_log.shouldLog(Log.INFO))
_log.info("Test message key: " + _testMessageKey);
GarlicConfig config = new GarlicConfig();
PayloadGarlicConfig ackClove = buildAckClove();
config.addClove(ackClove);
DeliveryInstructions instructions = new DeliveryInstructions();
instructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_ROUTER);
instructions.setDelayRequested(false);
instructions.setDelaySeconds(0);
instructions.setEncrypted(false);
instructions.setEncryptionKey(null);
instructions.setRouter(_target.getIdentity().getHash());
instructions.setTunnelId(null);
config.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null));
config.setDeliveryInstructions(instructions);
config.setId(_context.random().nextInt(Integer.MAX_VALUE));
config.setExpiration(_timeoutMs+_context.clock().now()+2*Router.CLOCK_FUDGE_FACTOR);
config.setRecipient(_target);
config.setRequestAck(false);
return config;
}
/**
* Build a clove that sends a DeliveryStatusMessage to us
*/
private PayloadGarlicConfig buildAckClove() {
PayloadGarlicConfig ackClove = new PayloadGarlicConfig();
DeliveryInstructions ackInstructions = new DeliveryInstructions();
ackInstructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_ROUTER);
ackInstructions.setRouter(_replyTo); // yikes!
ackInstructions.setDelayRequested(false);
ackInstructions.setDelaySeconds(0);
ackInstructions.setEncrypted(false);
DeliveryStatusMessage msg = new DeliveryStatusMessage();
msg.setArrival(new Date(Clock.getInstance().now()));
msg.setMessageId(_testMessageKey);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Delivery status message key: " + _testMessageKey + " arrival: " + msg.getArrival());
ackClove.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null));
ackClove.setDeliveryInstructions(ackInstructions);
ackClove.setExpiration(_timeoutMs+Clock.getInstance().now());
ackClove.setId(RandomSource.getInstance().nextInt(Integer.MAX_VALUE));
ackClove.setPayload(msg);
ackClove.setRecipient(_target);
ackClove.setRequestAck(false);
return ackClove;
PayloadGarlicConfig ackClove = new PayloadGarlicConfig();
DeliveryInstructions ackInstructions = new DeliveryInstructions();
ackInstructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_ROUTER);
ackInstructions.setRouter(_replyTo); // yikes!
ackInstructions.setDelayRequested(false);
ackInstructions.setDelaySeconds(0);
ackInstructions.setEncrypted(false);
DeliveryStatusMessage msg = new DeliveryStatusMessage(_context);
msg.setArrival(new Date(_context.clock().now()));
msg.setMessageId(_testMessageKey);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Delivery status message key: " + _testMessageKey + " arrival: " + msg.getArrival());
ackClove.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null));
ackClove.setDeliveryInstructions(ackInstructions);
ackClove.setExpiration(_timeoutMs+_context.clock().now());
ackClove.setId(_context.random().nextInt(Integer.MAX_VALUE));
ackClove.setPayload(msg);
ackClove.setRecipient(_target);
ackClove.setRequestAck(false);
return ackClove;
}
/**
* Search inbound messages for delivery status messages with our key
*/
private final static class TestMessageSelector implements MessageSelector {
private long _testMessageKey;
private long _timeout;
public TestMessageSelector(long key, long timeout) {
_testMessageKey = key;
_timeout = timeout;
}
public boolean continueMatching() { return false; }
public long getExpiration() { return _timeout; }
public boolean isMatch(I2NPMessage inMsg) {
if (inMsg.getType() == DeliveryStatusMessage.MESSAGE_TYPE) {
return ((DeliveryStatusMessage)inMsg).getMessageId() == _testMessageKey;
} else {
return false;
}
}
private long _testMessageKey;
private long _timeout;
public TestMessageSelector(long key, long timeout) {
_testMessageKey = key;
_timeout = timeout;
}
public boolean continueMatching() { return false; }
public long getExpiration() { return _timeout; }
public boolean isMatch(I2NPMessage inMsg) {
if (inMsg.getType() == DeliveryStatusMessage.MESSAGE_TYPE) {
return ((DeliveryStatusMessage)inMsg).getMessageId() == _testMessageKey;
} else {
return false;
}
}
}
/**
* On reply, fire off the specified job
*
*/
private final static class JobReplyJob extends JobImpl implements ReplyJob {
private Job _job;
private PublicKey _target;
private long _msgId;
private Set _sessionTagsDelivered;
private SessionKey _keyDelivered;
public JobReplyJob(Job job, PublicKey target, long msgId, SessionKey keyUsed, Set tagsDelivered) {
_job = job;
_target = target;
_msgId = msgId;
_keyDelivered = keyUsed;
_sessionTagsDelivered = tagsDelivered;
}
public String getName() { return "Reply To Test Message Received"; }
public void runJob() {
if ( (_keyDelivered != null) && (_sessionTagsDelivered != null) && (_sessionTagsDelivered.size() > 0) )
SessionKeyManager.getInstance().tagsDelivered(_target, _keyDelivered, _sessionTagsDelivered);
JobQueue.getInstance().addJob(_job);
}
public void setMessage(I2NPMessage message) {
// ignored, this is just a ping
}
private static final class JobReplyJob extends JobImpl implements ReplyJob {
private Job _job;
private PublicKey _target;
private long _msgId;
private Set _sessionTagsDelivered;
private SessionKey _keyDelivered;
public JobReplyJob(RouterContext ctx, Job job, PublicKey target, long msgId, SessionKey keyUsed, Set tagsDelivered) {
super(ctx);
_job = job;
_target = target;
_msgId = msgId;
_keyDelivered = keyUsed;
_sessionTagsDelivered = tagsDelivered;
}
public String getName() { return "Reply To Test Message Received"; }
public void runJob() {
if ( (_keyDelivered != null) &&
(_sessionTagsDelivered != null) &&
(_sessionTagsDelivered.size() > 0) )
_context.sessionKeyManager().tagsDelivered(_target, _keyDelivered, _sessionTagsDelivered);
_context.jobQueue().addJob(_job);
}
public void setMessage(I2NPMessage message) {
// ignored, this is just a ping
}
}
}

View File

@ -1,9 +1,9 @@
package net.i2p.router.message;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
@ -28,179 +28,181 @@ import net.i2p.data.i2np.I2NPMessage;
import net.i2p.data.i2np.SourceRouteBlock;
import net.i2p.router.MessageHistory;
import net.i2p.util.Log;
import net.i2p.router.RouterContext;
/**
* Build garlic messages based on a GarlicConfig
*
*/
public class GarlicMessageBuilder {
private final static Log _log = new Log(GarlicMessageBuilder.class);
public static GarlicMessage buildMessage(GarlicConfig config) {
return buildMessage(config, new SessionKey(), new HashSet());
public static GarlicMessage buildMessage(RouterContext ctx, GarlicConfig config) {
return buildMessage(ctx, config, new SessionKey(), new HashSet());
}
public static GarlicMessage buildMessage(GarlicConfig config, SessionKey wrappedKey, Set wrappedTags) {
if (config == null)
throw new IllegalArgumentException("Null config specified");
PublicKey key = config.getRecipientPublicKey();
if (key == null) {
if (config.getRecipient() == null) {
throw new IllegalArgumentException("Null recipient specified");
} else if (config.getRecipient().getIdentity() == null) {
throw new IllegalArgumentException("Null recipient.identity specified");
} else if (config.getRecipient().getIdentity().getPublicKey() == null) {
throw new IllegalArgumentException("Null recipient.identity.publicKey specified");
} else
key = config.getRecipient().getIdentity().getPublicKey();
}
GarlicMessage msg = new GarlicMessage();
noteWrap(msg, config);
_log.info("Encrypted with public key " + key + " to expire on " + new Date(config.getExpiration()));
byte cloveSet[] = buildCloveSet(config);
SessionKey curKey = SessionKeyManager.getInstance().getCurrentKey(key);
if (curKey == null)
curKey = SessionKeyManager.getInstance().createSession(key);
wrappedKey.setData(curKey.getData());
int availTags = SessionKeyManager.getInstance().getAvailableTags(key, curKey);
_log.debug("Available tags for encryption to " + key + ": " + availTags);
if (availTags < 10) { // arbitrary threshold
for (int i = 0; i < 20; i++)
wrappedTags.add(new SessionTag(true));
_log.info("Less than 10 tags are available (" + availTags + "), so we're including 20 more");
} else if (SessionKeyManager.getInstance().getAvailableTimeLeft(key, curKey) < 30*1000) {
// if we have > 10 tags, but they expire in under 30 seconds, we want more
for (int i = 0; i < 20; i++)
wrappedTags.add(new SessionTag(true));
_log.info("Tags are almost expired, adding 20 new ones");
} else {
// always tack on at least one more - not necessary.
//wrappedTags.add(new SessionTag(true));
}
SessionTag curTag = SessionKeyManager.getInstance().consumeNextAvailableTag(key, curKey);
byte encData[] = ElGamalAESEngine.encrypt(cloveSet, key, curKey, wrappedTags, curTag, 1024);
msg.setData(encData);
Date exp = new Date(config.getExpiration());
msg.setMessageExpiration(exp);
return msg;
public static GarlicMessage buildMessage(RouterContext ctx, GarlicConfig config, SessionKey wrappedKey, Set wrappedTags) {
Log log = ctx.logManager().getLog(GarlicMessageBuilder.class);
if (config == null)
throw new IllegalArgumentException("Null config specified");
PublicKey key = config.getRecipientPublicKey();
if (key == null) {
if (config.getRecipient() == null) {
throw new IllegalArgumentException("Null recipient specified");
} else if (config.getRecipient().getIdentity() == null) {
throw new IllegalArgumentException("Null recipient.identity specified");
} else if (config.getRecipient().getIdentity().getPublicKey() == null) {
throw new IllegalArgumentException("Null recipient.identity.publicKey specified");
} else
key = config.getRecipient().getIdentity().getPublicKey();
}
GarlicMessage msg = new GarlicMessage(ctx);
noteWrap(ctx, msg, config);
log.info("Encrypted with public key " + key + " to expire on " + new Date(config.getExpiration()));
byte cloveSet[] = buildCloveSet(ctx, config);
SessionKey curKey = ctx.sessionKeyManager().getCurrentKey(key);
if (curKey == null)
curKey = ctx.sessionKeyManager().createSession(key);
wrappedKey.setData(curKey.getData());
int availTags = ctx.sessionKeyManager().getAvailableTags(key, curKey);
log.debug("Available tags for encryption to " + key + ": " + availTags);
if (availTags < 10) { // arbitrary threshold
for (int i = 0; i < 20; i++)
wrappedTags.add(new SessionTag(true));
log.info("Less than 10 tags are available (" + availTags + "), so we're including 20 more");
} else if (ctx.sessionKeyManager().getAvailableTimeLeft(key, curKey) < 30*1000) {
// if we have > 10 tags, but they expire in under 30 seconds, we want more
for (int i = 0; i < 20; i++)
wrappedTags.add(new SessionTag(true));
log.info("Tags are almost expired, adding 20 new ones");
} else {
// always tack on at least one more - not necessary.
//wrappedTags.add(new SessionTag(true));
}
SessionTag curTag = ctx.sessionKeyManager().consumeNextAvailableTag(key, curKey);
byte encData[] = ctx.elGamalAESEngine().encrypt(cloveSet, key, curKey, wrappedTags, curTag, 1024);
msg.setData(encData);
Date exp = new Date(config.getExpiration());
msg.setMessageExpiration(exp);
return msg;
}
private static void noteWrap(GarlicMessage wrapper, GarlicConfig contained) {
for (int i = 0; i < contained.getCloveCount(); i++) {
GarlicConfig config = contained.getClove(i);
if (config instanceof PayloadGarlicConfig) {
I2NPMessage msg = ((PayloadGarlicConfig)config).getPayload();
String bodyType = msg.getClass().getName();
MessageHistory.getInstance().wrap(bodyType, msg.getUniqueId(), GarlicMessage.class.getName(), wrapper.getUniqueId());
}
}
private static void noteWrap(RouterContext ctx, GarlicMessage wrapper, GarlicConfig contained) {
for (int i = 0; i < contained.getCloveCount(); i++) {
GarlicConfig config = contained.getClove(i);
if (config instanceof PayloadGarlicConfig) {
I2NPMessage msg = ((PayloadGarlicConfig)config).getPayload();
String bodyType = msg.getClass().getName();
ctx.messageHistory().wrap(bodyType, msg.getUniqueId(), GarlicMessage.class.getName(), wrapper.getUniqueId());
}
}
}
/**
* Build an unencrypted set of cloves specified by the config.
* Build an unencrypted set of cloves specified by the config.
*
*/
private static byte[] buildCloveSet(GarlicConfig config) {
ByteArrayOutputStream baos = new ByteArrayOutputStream(1024);
try {
if (config instanceof PayloadGarlicConfig) {
DataHelper.writeLong(baos, 1, 1);
baos.write(buildClove((PayloadGarlicConfig)config));
} else {
DataHelper.writeLong(baos, 1, config.getCloveCount());
for (int i = 0; i < config.getCloveCount(); i++) {
GarlicConfig c = config.getClove(i);
byte clove[] = null;
if (c instanceof PayloadGarlicConfig) {
_log.debug("Subclove IS a payload garlic clove");
clove = buildClove((PayloadGarlicConfig)c);
} else {
_log.debug("Subclove IS NOT a payload garlic clove");
clove = buildClove(c);
}
if (clove == null)
throw new DataFormatException("Unable to build clove");
else
baos.write(clove);
}
}
config.getCertificate().writeBytes(baos);
DataHelper.writeLong(baos, 4, config.getId());
DataHelper.writeDate(baos, new Date(config.getExpiration()));
} catch (IOException ioe) {
_log.error("Error building the clove set", ioe);
} catch (DataFormatException dfe) {
_log.error("Error building the clove set", dfe);
}
return baos.toByteArray();
private static byte[] buildCloveSet(RouterContext ctx, GarlicConfig config) {
ByteArrayOutputStream baos = new ByteArrayOutputStream(1024);
Log log = ctx.logManager().getLog(GarlicMessageBuilder.class);
try {
if (config instanceof PayloadGarlicConfig) {
DataHelper.writeLong(baos, 1, 1);
baos.write(buildClove(ctx, (PayloadGarlicConfig)config));
} else {
DataHelper.writeLong(baos, 1, config.getCloveCount());
for (int i = 0; i < config.getCloveCount(); i++) {
GarlicConfig c = config.getClove(i);
byte clove[] = null;
if (c instanceof PayloadGarlicConfig) {
log.debug("Subclove IS a payload garlic clove");
clove = buildClove(ctx, (PayloadGarlicConfig)c);
} else {
log.debug("Subclove IS NOT a payload garlic clove");
clove = buildClove(ctx, c);
}
if (clove == null)
throw new DataFormatException("Unable to build clove");
else
baos.write(clove);
}
}
config.getCertificate().writeBytes(baos);
DataHelper.writeLong(baos, 4, config.getId());
DataHelper.writeDate(baos, new Date(config.getExpiration()));
} catch (IOException ioe) {
log.error("Error building the clove set", ioe);
} catch (DataFormatException dfe) {
log.error("Error building the clove set", dfe);
}
return baos.toByteArray();
}
private static byte[] buildClove(PayloadGarlicConfig config) throws DataFormatException, IOException {
GarlicClove clove = new GarlicClove();
clove.setData(config.getPayload());
return buildCommonClove(clove, config);
private static byte[] buildClove(RouterContext ctx, PayloadGarlicConfig config) throws DataFormatException, IOException {
GarlicClove clove = new GarlicClove(ctx);
clove.setData(config.getPayload());
return buildCommonClove(ctx, clove, config);
}
private static byte[] buildClove(GarlicConfig config) throws DataFormatException, IOException {
GarlicClove clove = new GarlicClove();
GarlicMessage msg = buildMessage(config);
if (msg == null)
throw new DataFormatException("Unable to build message from clove config");
clove.setData(msg);
return buildCommonClove(clove, config);
private static byte[] buildClove(RouterContext ctx, GarlicConfig config) throws DataFormatException, IOException {
GarlicClove clove = new GarlicClove(ctx);
GarlicMessage msg = buildMessage(ctx, config);
if (msg == null)
throw new DataFormatException("Unable to build message from clove config");
clove.setData(msg);
return buildCommonClove(ctx, clove, config);
}
private static byte[] buildCommonClove(GarlicClove clove, GarlicConfig config) throws DataFormatException, IOException {
clove.setCertificate(config.getCertificate());
clove.setCloveId(config.getId());
clove.setExpiration(new Date(config.getExpiration()));
clove.setInstructions(config.getDeliveryInstructions());
specifySourceRouteBlock(clove, config);
ByteArrayOutputStream baos = new ByteArrayOutputStream(1024);
clove.writeBytes(baos);
return baos.toByteArray();
private static byte[] buildCommonClove(RouterContext ctx, GarlicClove clove, GarlicConfig config) throws DataFormatException, IOException {
clove.setCertificate(config.getCertificate());
clove.setCloveId(config.getId());
clove.setExpiration(new Date(config.getExpiration()));
clove.setInstructions(config.getDeliveryInstructions());
specifySourceRouteBlock(ctx, clove, config);
ByteArrayOutputStream baos = new ByteArrayOutputStream(1024);
clove.writeBytes(baos);
return baos.toByteArray();
}
private static void specifySourceRouteBlock(GarlicClove clove, GarlicConfig config) throws DataFormatException {
boolean includeBlock = false;
if (config.getRequestAck()) {
clove.setSourceRouteBlockAction(GarlicClove.ACTION_STATUS);
includeBlock = true;
} else if (config.getReplyInstructions() != null) {
clove.setSourceRouteBlockAction(GarlicClove.ACTION_MESSAGE_SPECIFIC);
includeBlock = true;
} else {
clove.setSourceRouteBlockAction(GarlicClove.ACTION_NONE);
}
if (includeBlock) {
_log.debug("Specifying source route block");
SessionKey replySessionKey = KeyGenerator.getInstance().generateSessionKey();
SessionTag tag = new SessionTag(true);
// make it so we'll read the session tag correctly and use the right session key
HashSet tags = new HashSet(1);
tags.add(tag);
SessionKeyManager.getInstance().tagsReceived(replySessionKey, tags);
SourceRouteBlock block = new SourceRouteBlock();
PublicKey pk = config.getReplyThroughRouter().getIdentity().getPublicKey();
block.setData(config.getReplyInstructions(), config.getReplyBlockMessageId(),
config.getReplyBlockCertificate(), config.getReplyBlockExpiration(), pk);
block.setRouter(config.getReplyThroughRouter().getIdentity().getHash());
block.setKey(replySessionKey);
block.setTag(tag);
clove.setSourceRouteBlock(block);
} else {
clove.setSourceRouteBlock(null);
}
private static void specifySourceRouteBlock(RouterContext ctx, GarlicClove clove, GarlicConfig config) throws DataFormatException {
Log log = ctx.logManager().getLog(GarlicMessageBuilder.class);
boolean includeBlock = false;
if (config.getRequestAck()) {
clove.setSourceRouteBlockAction(GarlicClove.ACTION_STATUS);
includeBlock = true;
} else if (config.getReplyInstructions() != null) {
clove.setSourceRouteBlockAction(GarlicClove.ACTION_MESSAGE_SPECIFIC);
includeBlock = true;
} else {
clove.setSourceRouteBlockAction(GarlicClove.ACTION_NONE);
}
if (includeBlock) {
log.debug("Specifying source route block");
SessionKey replySessionKey = ctx.keyGenerator().generateSessionKey();
SessionTag tag = new SessionTag(true);
// make it so we'll read the session tag correctly and use the right session key
HashSet tags = new HashSet(1);
tags.add(tag);
ctx.sessionKeyManager().tagsReceived(replySessionKey, tags);
SourceRouteBlock block = new SourceRouteBlock();
PublicKey pk = config.getReplyThroughRouter().getIdentity().getPublicKey();
block.setData(ctx, config.getReplyInstructions(), config.getReplyBlockMessageId(),
config.getReplyBlockCertificate(), config.getReplyBlockExpiration(), pk);
block.setRouter(config.getReplyThroughRouter().getIdentity().getHash());
block.setKey(replySessionKey);
block.setTag(tag);
clove.setSourceRouteBlock(block);
} else {
clove.setSourceRouteBlock(null);
}
}
}

View File

@ -1,9 +1,9 @@
package net.i2p.router.message;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
@ -15,17 +15,23 @@ import net.i2p.data.i2np.I2NPMessage;
import net.i2p.data.i2np.SourceRouteBlock;
import net.i2p.router.HandlerJobBuilder;
import net.i2p.router.Job;
import net.i2p.router.RouterContext;
/**
* HandlerJobBuilder to build jobs to handle GarlicMessages
*
*/
public class GarlicMessageHandler implements HandlerJobBuilder {
private RouterContext _context;
public GarlicMessageHandler(RouterContext context) {
_context = context;
}
public Job createJob(I2NPMessage receivedMessage, RouterIdentity from, Hash fromHash, SourceRouteBlock replyBlock) {
// ignore the reply block for the moment
HandleGarlicMessageJob job = new HandleGarlicMessageJob((GarlicMessage)receivedMessage, from, fromHash);
return job;
// ignore the reply block for the moment
HandleGarlicMessageJob job = new HandleGarlicMessageJob(_context, (GarlicMessage)receivedMessage, from, fromHash);
return job;
}
}

View File

@ -1,9 +1,9 @@
package net.i2p.router.message;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
@ -22,71 +22,75 @@ import net.i2p.data.PrivateKey;
import net.i2p.data.i2np.GarlicClove;
import net.i2p.data.i2np.GarlicMessage;
import net.i2p.util.Log;
import net.i2p.router.RouterContext;
/**
* Read a GarlicMessage, decrypt it, and return the resulting CloveSet
* Read a GarlicMessage, decrypt it, and return the resulting CloveSet
*
*/
public class GarlicMessageParser {
private final static Log _log = new Log(GarlicMessageParser.class);
private static GarlicMessageParser _instance = new GarlicMessageParser();
public static GarlicMessageParser getInstance() { return _instance; }
private GarlicMessageParser() {}
private Log _log;
private RouterContext _context;
public GarlicMessageParser(RouterContext context) {
_context = context;
_log = _context.logManager().getLog(GarlicMessageParser.class);
}
public CloveSet getGarlicCloves(GarlicMessage message, PrivateKey encryptionKey) {
byte encData[] = message.getData();
byte decrData[] = null;
try {
_log.debug("Decrypting with private key " + encryptionKey);
decrData = ElGamalAESEngine.decrypt(encData, encryptionKey);
} catch (DataFormatException dfe) {
_log.warn("Error decrypting", dfe);
}
if (decrData == null) {
_log.debug("Decryption of garlic message failed");
return null;
} else {
return readCloveSet(decrData);
}
byte encData[] = message.getData();
byte decrData[] = null;
try {
_log.debug("Decrypting with private key " + encryptionKey);
decrData = _context.elGamalAESEngine().decrypt(encData, encryptionKey);
} catch (DataFormatException dfe) {
_log.warn("Error decrypting", dfe);
}
if (decrData == null) {
_log.debug("Decryption of garlic message failed");
return null;
} else {
return readCloveSet(decrData);
}
}
private CloveSet readCloveSet(byte data[]) {
Set cloves = new HashSet();
ByteArrayInputStream bais = new ByteArrayInputStream(data);
try {
CloveSet set = new CloveSet();
int numCloves = (int)DataHelper.readLong(bais, 1);
_log.debug("# cloves to read: " + numCloves);
for (int i = 0; i < numCloves; i++) {
_log.debug("Reading clove " + i);
try {
GarlicClove clove = new GarlicClove();
clove.readBytes(bais);
set.addClove(clove);
} catch (DataFormatException dfe) {
_log.warn("Unable to read clove " + i, dfe);
} catch (IOException ioe) {
_log.warn("Unable to read clove " + i, ioe);
}
_log.debug("After reading clove " + i);
}
Certificate cert = new Certificate();
cert.readBytes(bais);
long msgId = DataHelper.readLong(bais, 4);
Date expiration = DataHelper.readDate(bais);
set.setCertificate(cert);
set.setMessageId(msgId);
set.setExpiration(expiration.getTime());
return set;
} catch (IOException ioe) {
_log.error("Error reading clove set", ioe);
return null;
} catch (DataFormatException dfe) {
_log.error("Error reading clove set", dfe);
return null;
}
Set cloves = new HashSet();
ByteArrayInputStream bais = new ByteArrayInputStream(data);
try {
CloveSet set = new CloveSet();
int numCloves = (int)DataHelper.readLong(bais, 1);
_log.debug("# cloves to read: " + numCloves);
for (int i = 0; i < numCloves; i++) {
_log.debug("Reading clove " + i);
try {
GarlicClove clove = new GarlicClove(_context);
clove.readBytes(bais);
set.addClove(clove);
} catch (DataFormatException dfe) {
_log.warn("Unable to read clove " + i, dfe);
} catch (IOException ioe) {
_log.warn("Unable to read clove " + i, ioe);
}
_log.debug("After reading clove " + i);
}
Certificate cert = new Certificate();
cert.readBytes(bais);
long msgId = DataHelper.readLong(bais, 4);
Date expiration = DataHelper.readDate(bais);
set.setCertificate(cert);
set.setMessageId(msgId);
set.setExpiration(expiration.getTime());
return set;
} catch (IOException ioe) {
_log.error("Error reading clove set", ioe);
return null;
} catch (DataFormatException dfe) {
_log.error("Error reading clove set", dfe);
return null;
}
}
}

View File

@ -28,6 +28,7 @@ import net.i2p.router.Router;
import net.i2p.stat.StatManager;
import net.i2p.util.Clock;
import net.i2p.util.Log;
import net.i2p.router.RouterContext;
/**
* Unencrypt a garlic message and handle each of the cloves - locally destined
@ -37,40 +38,42 @@ import net.i2p.util.Log;
*
*/
public class HandleGarlicMessageJob extends JobImpl {
private final static Log _log = new Log(HandleGarlicMessageJob.class);
private Log _log;
private GarlicMessage _message;
private RouterIdentity _from;
private Hash _fromHash;
private static Map _cloves; // map of clove Id --> Expiration of cloves we've already seen
static {
StatManager.getInstance().createRateStat("crypto.garlic.decryptFail", "How often garlic messages are undecryptable", "Encryption", new long[] { 5*60*1000, 60*60*1000, 24*60*60*1000 });
}
private Map _cloves; // map of clove Id --> Expiration of cloves we've already seen
private MessageHandler _handler;
private GarlicMessageParser _parser;
private final static int FORWARD_PRIORITY = 50;
public HandleGarlicMessageJob(GarlicMessage msg, RouterIdentity from, Hash fromHash) {
super();
public HandleGarlicMessageJob(RouterContext context, GarlicMessage msg, RouterIdentity from, Hash fromHash) {
super(context);
_log = context.logManager().getLog(HandleGarlicMessageJob.class);
_context.statManager().createRateStat("crypto.garlic.decryptFail", "How often garlic messages are undecryptable", "Encryption", new long[] { 5*60*1000, 60*60*1000, 24*60*60*1000 });
if (_log.shouldLog(Log.DEBUG))
_log.debug("New handle garlicMessageJob called w/ message from [" + from + "]", new Exception("Debug"));
_message = msg;
_from = from;
_fromHash = fromHash;
_cloves = new HashMap();
_handler = new MessageHandler(context);
_parser = new GarlicMessageParser(context);
}
public String getName() { return "Handle Inbound Garlic Message"; }
public void runJob() {
CloveSet set = GarlicMessageParser.getInstance().getGarlicCloves(_message, KeyManager.getInstance().getPrivateKey());
CloveSet set = _parser.getGarlicCloves(_message, _context.keyManager().getPrivateKey());
if (set == null) {
Set keys = KeyManager.getInstance().getAllKeys();
Set keys = _context.keyManager().getAllKeys();
if (_log.shouldLog(Log.DEBUG))
_log.debug("Decryption with the router's key failed, now try with the " + keys.size() + " leaseSet keys");
// our router key failed, which means that it was either encrypted wrong
// or it was encrypted to a LeaseSet's PublicKey
for (Iterator iter = keys.iterator(); iter.hasNext();) {
LeaseSetKeys lskeys = (LeaseSetKeys)iter.next();
set = GarlicMessageParser.getInstance().getGarlicCloves(_message, lskeys.getDecryptionKey());
set = _parser.getGarlicCloves(_message, lskeys.getDecryptionKey());
if (set != null) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Decrypted garlic message with lease set key for destination "
@ -96,14 +99,14 @@ public class HandleGarlicMessageJob extends JobImpl {
_log.error("CloveMessageParser failed to decrypt the message [" + _message.getUniqueId()
+ "] to us when received from [" + _fromHash + "] / [" + _from + "]",
new Exception("Decrypt garlic failed"));
StatManager.getInstance().addRateData("crypto.garlic.decryptFail", 1, 0);
MessageHistory.getInstance().messageProcessingError(_message.getUniqueId(),
_context.statManager().addRateData("crypto.garlic.decryptFail", 1, 0);
_context.messageHistory().messageProcessingError(_message.getUniqueId(),
_message.getClass().getName(),
"Garlic could not be decrypted");
}
}
private static boolean isKnown(long cloveId) {
private boolean isKnown(long cloveId) {
boolean known = false;
synchronized (_cloves) {
known = _cloves.containsKey(new Long(cloveId));
@ -113,11 +116,11 @@ public class HandleGarlicMessageJob extends JobImpl {
return known;
}
private static void cleanupCloves() {
private void cleanupCloves() {
// this should be in its own thread perhaps? and maybe _cloves should be
// synced to disk?
List toRemove = new ArrayList(32);
long now = Clock.getInstance().now();
long now = _context.clock().now();
synchronized (_cloves) {
for (Iterator iter = _cloves.keySet().iterator(); iter.hasNext();) {
Long id = (Long)iter.next();
@ -131,7 +134,7 @@ public class HandleGarlicMessageJob extends JobImpl {
}
}
private static boolean isValid(GarlicClove clove) {
private boolean isValid(GarlicClove clove) {
if (isKnown(clove.getCloveId())) {
_log.error("Duplicate garlic clove received - replay attack in progress? [cloveId = "
+ clove.getCloveId() + " expiration = " + clove.getExpiration());
@ -140,7 +143,7 @@ public class HandleGarlicMessageJob extends JobImpl {
_log.debug("Clove " + clove.getCloveId() + " expiring on " + clove.getExpiration()
+ " is not known");
}
long now = Clock.getInstance().now();
long now = _context.clock().now();
if (clove.getExpiration().getTime() < now) {
if (clove.getExpiration().getTime() < now + Router.CLOCK_FUDGE_FACTOR) {
_log.warn("Expired garlic received, but within our fudge factor ["
@ -149,7 +152,7 @@ public class HandleGarlicMessageJob extends JobImpl {
if (_log.shouldLog(Log.DEBUG))
_log.error("Expired garlic clove received - replay attack in progress? [cloveId = "
+ clove.getCloveId() + " expiration = " + clove.getExpiration()
+ " now = " + (new Date(Clock.getInstance().now())));
+ " now = " + (new Date(_context.clock().now())));
return false;
}
}
@ -168,15 +171,15 @@ public class HandleGarlicMessageJob extends JobImpl {
}
boolean requestAck = (clove.getSourceRouteBlockAction() == GarlicClove.ACTION_STATUS);
long sendExpiration = clove.getExpiration().getTime();
MessageHandler.getInstance().handleMessage(clove.getInstructions(), clove.getData(),
requestAck, clove.getSourceRouteBlock(),
clove.getCloveId(), _from, _fromHash,
sendExpiration, FORWARD_PRIORITY);
_handler.handleMessage(clove.getInstructions(), clove.getData(),
requestAck, clove.getSourceRouteBlock(),
clove.getCloveId(), _from, _fromHash,
sendExpiration, FORWARD_PRIORITY);
}
public void dropped() {
MessageHistory.getInstance().messageProcessingError(_message.getUniqueId(),
_message.getClass().getName(),
"Dropped due to overload");
_context.messageHistory().messageProcessingError(_message.getUniqueId(),
_message.getClass().getName(),
"Dropped due to overload");
}
}

View File

@ -26,6 +26,7 @@ import net.i2p.router.MessageHistory;
import net.i2p.router.Router;
import net.i2p.util.Clock;
import net.i2p.util.Log;
import net.i2p.router.RouterContext;
/**
* Handle a source route reply - decrypt the instructions and forward the message
@ -33,111 +34,126 @@ import net.i2p.util.Log;
*
*/
public class HandleSourceRouteReplyMessageJob extends JobImpl {
private final static Log _log = new Log(HandleSourceRouteReplyMessageJob.class);
private Log _log;
private SourceRouteReplyMessage _message;
private RouterIdentity _from;
private Hash _fromHash;
private static Map _seenMessages; // Long msgId --> Date seen
private Map _seenMessages; // Long msgId --> Date seen
private MessageHandler _handler;
public final static int PRIORITY = 150;
public HandleSourceRouteReplyMessageJob(SourceRouteReplyMessage msg, RouterIdentity from, Hash fromHash) {
super();
_message = msg;
_from = from;
_fromHash = fromHash;
_seenMessages = new HashMap();
public HandleSourceRouteReplyMessageJob(RouterContext context, SourceRouteReplyMessage msg, RouterIdentity from, Hash fromHash) {
super(context);
_log = _context.logManager().getLog(HandleSourceRouteReplyMessageJob.class);
_message = msg;
_from = from;
_fromHash = fromHash;
_seenMessages = new HashMap();
_handler = new MessageHandler(context);
}
public String getName() { return "Handle Source Route Reply Message"; }
public void runJob() {
try {
long before = Clock.getInstance().now();
_message.decryptHeader(KeyManager.getInstance().getPrivateKey());
long after = Clock.getInstance().now();
if ( (after-before) > 1000) {
_log.warn("Took more than a second (" + (after-before) + ") to decrypt the sourceRoute header");
} else {
_log.debug("Took LESS than a second (" + (after-before) + ") to decrypt the sourceRoute header");
}
} catch (DataFormatException dfe) {
if (_log.shouldLog(Log.ERROR))
_log.error("Error decrypting the source route message's header (message " + _message.getUniqueId() + ")", dfe);
if (_log.shouldLog(Log.WARN))
_log.warn("Message header could not be decrypted: " + _message, getAddedBy());
MessageHistory.getInstance().messageProcessingError(_message.getUniqueId(), _message.getClass().getName(), "Source route message header could not be decrypted");
return;
}
if (!isValid()) {
if (_log.shouldLog(Log.ERROR))
_log.error("Error validating source route message, dropping: " + _message);
return;
}
DeliveryInstructions instructions = _message.getDecryptedInstructions();
long now = Clock.getInstance().now();
long expiration = _message.getDecryptedExpiration();
// if its expiring really soon, jack the expiration 30 seconds
if (expiration < now+10*1000)
expiration = now + 60*1000;
boolean requestAck = false;
MessageHandler.getInstance().handleMessage(instructions, _message.getMessage(), requestAck, null,
_message.getDecryptedMessageId(), _from, _fromHash, expiration, PRIORITY);
try {
long before = _context.clock().now();
_message.decryptHeader(_context.keyManager().getPrivateKey());
long after = _context.clock().now();
if ( (after-before) > 1000) {
if (_log.shouldLog(Log.WARN))
_log.warn("Took more than a second (" + (after-before)
+ ") to decrypt the sourceRoute header");
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Took LESS than a second (" + (after-before)
+ ") to decrypt the sourceRoute header");
}
} catch (DataFormatException dfe) {
if (_log.shouldLog(Log.ERROR))
_log.error("Error decrypting the source route message's header (message "
+ _message.getUniqueId() + ")", dfe);
if (_log.shouldLog(Log.WARN))
_log.warn("Message header could not be decrypted: " + _message, getAddedBy());
_context.messageHistory().messageProcessingError(_message.getUniqueId(),
_message.getClass().getName(),
"Source route message header could not be decrypted");
return;
}
if (!isValid()) {
if (_log.shouldLog(Log.ERROR))
_log.error("Error validating source route message, dropping: " + _message);
return;
}
DeliveryInstructions instructions = _message.getDecryptedInstructions();
long now = _context.clock().now();
long expiration = _message.getDecryptedExpiration();
// if its expiring really soon, jack the expiration 30 seconds
if (expiration < now+10*1000)
expiration = now + 60*1000;
boolean requestAck = false;
_handler.handleMessage(instructions, _message.getMessage(), requestAck, null,
_message.getDecryptedMessageId(), _from, _fromHash, expiration, PRIORITY);
}
private boolean isValid() {
long now = Clock.getInstance().now();
if (_message.getDecryptedExpiration() < now) {
if (_message.getDecryptedExpiration() < now + Router.CLOCK_FUDGE_FACTOR) {
_log.info("Expired message received, but within our fudge factor");
} else {
_log.error("Source route reply message expired. Replay attack? msgId = " + _message.getDecryptedMessageId() + " expiration = " + new Date(_message.getDecryptedExpiration()));
return false;
}
}
if (!isValidMessageId(_message.getDecryptedMessageId(), _message.getDecryptedExpiration())) {
_log.error("Source route reply message already received! Replay attack? msgId = " + _message.getDecryptedMessageId() + " expiration = " + new Date(_message.getDecryptedExpiration()));
return false;
}
return true;
long now = _context.clock().now();
if (_message.getDecryptedExpiration() < now) {
if (_message.getDecryptedExpiration() < now + Router.CLOCK_FUDGE_FACTOR) {
_log.info("Expired message received, but within our fudge factor");
} else {
_log.error("Source route reply message expired. Replay attack? msgId = "
+ _message.getDecryptedMessageId() + " expiration = "
+ new Date(_message.getDecryptedExpiration()));
return false;
}
}
if (!isValidMessageId(_message.getDecryptedMessageId(), _message.getDecryptedExpiration())) {
_log.error("Source route reply message already received! Replay attack? msgId = "
+ _message.getDecryptedMessageId() + " expiration = "
+ new Date(_message.getDecryptedExpiration()));
return false;
}
return true;
}
private static boolean isValidMessageId(long msgId, long expiration) {
synchronized (_seenMessages) {
if (_seenMessages.containsKey(new Long(msgId)))
return false;
_seenMessages.put(new Long(msgId), new Date(expiration));
}
// essentially random
if ((msgId % 10) == 0) {
cleanupMessages();
}
return true;
private boolean isValidMessageId(long msgId, long expiration) {
synchronized (_seenMessages) {
if (_seenMessages.containsKey(new Long(msgId)))
return false;
_seenMessages.put(new Long(msgId), new Date(expiration));
}
// essentially random
if ((msgId % 10) == 0) {
cleanupMessages();
}
return true;
}
private static void cleanupMessages() {
// this should be in its own thread perhaps, or job? and maybe _seenMessages should be
// synced to disk?
List toRemove = new ArrayList(32);
long now = Clock.getInstance().now()-Router.CLOCK_FUDGE_FACTOR;
synchronized (_seenMessages) {
for (Iterator iter = _seenMessages.keySet().iterator(); iter.hasNext();) {
Long id = (Long)iter.next();
Date exp = (Date)_seenMessages.get(id);
if (now > exp.getTime())
toRemove.add(id);
}
for (int i = 0; i < toRemove.size(); i++)
_seenMessages.remove(toRemove.get(i));
}
private void cleanupMessages() {
// this should be in its own thread perhaps, or job? and maybe _seenMessages should be
// synced to disk?
List toRemove = new ArrayList(32);
long now = _context.clock().now()-Router.CLOCK_FUDGE_FACTOR;
synchronized (_seenMessages) {
for (Iterator iter = _seenMessages.keySet().iterator(); iter.hasNext();) {
Long id = (Long)iter.next();
Date exp = (Date)_seenMessages.get(id);
if (now > exp.getTime())
toRemove.add(id);
}
for (int i = 0; i < toRemove.size(); i++)
_seenMessages.remove(toRemove.get(i));
}
}
public void dropped() {
MessageHistory.getInstance().messageProcessingError(_message.getUniqueId(), _message.getClass().getName(), "Dropped due to overload");
_context.messageHistory().messageProcessingError(_message.getUniqueId(),
_message.getClass().getName(),
"Dropped due to overload");
}
}

View File

@ -45,26 +45,26 @@ import net.i2p.router.TunnelManagerFacade;
import net.i2p.stat.StatManager;
import net.i2p.util.Clock;
import net.i2p.util.Log;
import net.i2p.router.RouterContext;
public class HandleTunnelMessageJob extends JobImpl {
private final static Log _log = new Log(HandleTunnelMessageJob.class);
private Log _log;
private TunnelMessage _message;
private RouterIdentity _from;
private Hash _fromHash;
private final static I2NPMessageHandler _handler = new I2NPMessageHandler();
private I2NPMessageHandler _handler;
private final static long FORWARD_TIMEOUT = 60*1000;
private final static int FORWARD_PRIORITY = 400;
static {
StatManager.getInstance().createRateStat("tunnel.unknownTunnelTimeLeft", "How much time is left on tunnel messages we receive that are for unknown tunnels?", "Tunnels", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
StatManager.getInstance().createRateStat("tunnel.gatewayMessageSize", "How large are the messages we are forwarding on as an inbound gateway?", "Tunnels", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
StatManager.getInstance().createRateStat("tunnel.relayMessageSize", "How large are the messages we are forwarding on as a participant in a tunnel?", "Tunnels", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
StatManager.getInstance().createRateStat("tunnel.endpointMessageSize", "How large are the messages we are forwarding in as an outbound endpoint?", "Tunnels", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
}
public HandleTunnelMessageJob(TunnelMessage msg, RouterIdentity from, Hash fromHash) {
super();
public HandleTunnelMessageJob(RouterContext ctx, TunnelMessage msg, RouterIdentity from, Hash fromHash) {
super(ctx);
_log = ctx.logManager().getLog(HandleTunnelMessageJob.class);
_handler = new I2NPMessageHandler(ctx);
ctx.statManager().createRateStat("tunnel.unknownTunnelTimeLeft", "How much time is left on tunnel messages we receive that are for unknown tunnels?", "Tunnels", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("tunnel.gatewayMessageSize", "How large are the messages we are forwarding on as an inbound gateway?", "Tunnels", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("tunnel.relayMessageSize", "How large are the messages we are forwarding on as a participant in a tunnel?", "Tunnels", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("tunnel.endpointMessageSize", "How large are the messages we are forwarding in as an outbound endpoint?", "Tunnels", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
_message = msg;
_from = from;
_fromHash = fromHash;
@ -73,18 +73,18 @@ public class HandleTunnelMessageJob extends JobImpl {
public String getName() { return "Handle Inbound Tunnel Message"; }
public void runJob() {
TunnelId id = _message.getTunnelId();
TunnelInfo info = TunnelManagerFacade.getInstance().getTunnelInfo(id);
TunnelInfo info = _context.tunnelManager().getTunnelInfo(id);
if (info == null) {
Hash from = _fromHash;
if (_from != null)
from = _from.getHash();
MessageHistory.getInstance().droppedTunnelMessage(id, from);
_context.messageHistory().droppedTunnelMessage(id, from);
if (_log.shouldLog(Log.ERROR))
_log.error("Received a message for an unknown tunnel [" + id.getTunnelId()
+ "], dropping it: " + _message, getAddedBy());
long timeRemaining = _message.getMessageExpiration().getTime() - Clock.getInstance().now();
StatManager.getInstance().addRateData("tunnel.unknownTunnelTimeLeft", timeRemaining, 0);
long timeRemaining = _message.getMessageExpiration().getTime() - _context.clock().now();
_context.statManager().addRateData("tunnel.unknownTunnelTimeLeft", timeRemaining, 0);
return;
}
@ -92,8 +92,8 @@ public class HandleTunnelMessageJob extends JobImpl {
if (info == null) {
if (_log.shouldLog(Log.ERROR))
_log.error("We are not part of a known tunnel?? wtf! drop.", getAddedBy());
long timeRemaining = _message.getMessageExpiration().getTime() - Clock.getInstance().now();
StatManager.getInstance().addRateData("tunnel.unknownTunnelTimeLeft", timeRemaining, 0);
long timeRemaining = _message.getMessageExpiration().getTime() - _context.clock().now();
_context.statManager().addRateData("tunnel.unknownTunnelTimeLeft", timeRemaining, 0);
return;
} else {
if (_log.shouldLog(Log.DEBUG))
@ -108,7 +108,7 @@ public class HandleTunnelMessageJob extends JobImpl {
_log.debug("We are the gateway to tunnel " + id.getTunnelId());
byte data[] = _message.getData();
I2NPMessage msg = getBody(data);
JobQueue.getInstance().addJob(new HandleGatewayMessageJob(msg, info, data.length));
_context.jobQueue().addJob(new HandleGatewayMessageJob(msg, info, data.length));
return;
} else {
if (_log.shouldLog(Log.DEBUG))
@ -116,23 +116,23 @@ public class HandleTunnelMessageJob extends JobImpl {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Process locally");
if (info.getDestination() != null) {
if (!ClientManagerFacade.getInstance().isLocal(info.getDestination())) {
if (!_context.clientManager().isLocal(info.getDestination())) {
if (_log.shouldLog(Log.WARN))
_log.warn("Received a message on a tunnel allocated to a client that has disconnected - dropping it!");
if (_log.shouldLog(Log.DEBUG))
_log.debug("Dropping message for disconnected client: " + _message);
MessageHistory.getInstance().droppedOtherMessage(_message);
MessageHistory.getInstance().messageProcessingError(_message.getUniqueId(),
_message.getClass().getName(),
"Disconnected client");
_context.messageHistory().droppedOtherMessage(_message);
_context.messageHistory().messageProcessingError(_message.getUniqueId(),
_message.getClass().getName(),
"Disconnected client");
return;
}
}
I2NPMessage body = getBody(_message.getData());
if (body != null) {
JobQueue.getInstance().addJob(new HandleLocallyJob(body, info));
_context.jobQueue().addJob(new HandleLocallyJob(body, info));
return;
} else {
if (_log.shouldLog(Log.ERROR))
@ -152,7 +152,7 @@ public class HandleTunnelMessageJob extends JobImpl {
} else {
// participant
TunnelVerificationStructure struct = _message.getVerificationStructure();
boolean ok = struct.verifySignature(info.getVerificationKey().getKey());
boolean ok = struct.verifySignature(_context, info.getVerificationKey().getKey());
if (!ok) {
if (_log.shouldLog(Log.WARN))
_log.warn("Failed tunnel verification! Spoofing / tagging attack? " + _message, getAddedBy());
@ -164,16 +164,18 @@ public class HandleTunnelMessageJob extends JobImpl {
+ " received where we're not the gateway and there are remaining hops, so forward it on to "
+ info.getNextHop().toBase64() + " via SendTunnelMessageJob");
StatManager.getInstance().addRateData("tunnel.relayMessageSize",
_message.getData().length, 0);
_context.statManager().addRateData("tunnel.relayMessageSize",
_message.getData().length, 0);
JobQueue.getInstance().addJob(new SendMessageDirectJob(_message, info.getNextHop(),
Clock.getInstance().now() + FORWARD_TIMEOUT, FORWARD_PRIORITY));
_context.jobQueue().addJob(new SendMessageDirectJob(_context, _message,
info.getNextHop(),
_context.clock().now() + FORWARD_TIMEOUT,
FORWARD_PRIORITY));
return;
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("No more hops, unwrap and follow the instructions");
JobQueue.getInstance().addJob(new HandleEndpointJob(info));
_context.jobQueue().addJob(new HandleEndpointJob(info));
return;
}
}
@ -210,20 +212,20 @@ public class HandleTunnelMessageJob extends JobImpl {
_log.error("Unable to recover the body from the tunnel", getAddedBy());
return;
} else {
JobQueue.getInstance().addJob(new ProcessBodyLocallyJob(body, instructions, ourPlace));
_context.jobQueue().addJob(new ProcessBodyLocallyJob(body, instructions, ourPlace));
}
}
}
private void honorInstructions(DeliveryInstructions instructions, I2NPMessage body) {
StatManager.getInstance().addRateData("tunnel.endpointMessageSize", _message.getData().length, 0);
_context.statManager().addRateData("tunnel.endpointMessageSize", _message.getData().length, 0);
switch (instructions.getDeliveryMode()) {
case DeliveryInstructions.DELIVERY_MODE_LOCAL:
sendToLocal(body);
break;
case DeliveryInstructions.DELIVERY_MODE_ROUTER:
if (Router.getInstance().getRouterInfo().getIdentity().getHash().equals(instructions.getRouter())) {
if (_context.routerHash().equals(instructions.getRouter())) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Delivery instructions point at a router, but we're that router, so send to local");
sendToLocal(body);
@ -244,7 +246,7 @@ public class HandleTunnelMessageJob extends JobImpl {
private void sendToDest(Hash dest, I2NPMessage body) {
if (body instanceof DataMessage) {
boolean isLocal = ClientManagerFacade.getInstance().isLocal(dest);
boolean isLocal = _context.clientManager().isLocal(dest);
if (isLocal) {
deliverMessage(null, dest, (DataMessage)body);
return;
@ -265,17 +267,17 @@ public class HandleTunnelMessageJob extends JobImpl {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Sending on to requested tunnel " + id.getTunnelId() + " on router "
+ router.toBase64());
TunnelMessage msg = new TunnelMessage();
TunnelMessage msg = new TunnelMessage(_context);
msg.setTunnelId(id);
try {
ByteArrayOutputStream baos = new ByteArrayOutputStream(1024);
body.writeBytes(baos);
msg.setData(baos.toByteArray());
long exp = Clock.getInstance().now() + FORWARD_TIMEOUT;
JobQueue.getInstance().addJob(new SendMessageDirectJob(msg, router, exp, FORWARD_PRIORITY));
long exp = _context.clock().now() + FORWARD_TIMEOUT;
_context.jobQueue().addJob(new SendMessageDirectJob(_context, msg, router, exp, FORWARD_PRIORITY));
String bodyType = body.getClass().getName();
MessageHistory.getInstance().wrap(bodyType, body.getUniqueId(), TunnelMessage.class.getName(), msg.getUniqueId());
_context.messageHistory().wrap(bodyType, body.getUniqueId(), TunnelMessage.class.getName(), msg.getUniqueId());
} catch (DataFormatException dfe) {
if (_log.shouldLog(Log.ERROR))
_log.error("Error writing out the message to forward to the tunnel", dfe);
@ -289,8 +291,8 @@ public class HandleTunnelMessageJob extends JobImpl {
// TODO: we may want to send it via a tunnel later on, but for now, direct will do.
if (_log.shouldLog(Log.DEBUG))
_log.debug("Sending on to requested router " + router.toBase64());
long exp = Clock.getInstance().now() + FORWARD_TIMEOUT;
JobQueue.getInstance().addJob(new SendMessageDirectJob(body, router, exp, FORWARD_PRIORITY));
long exp = _context.clock().now() + FORWARD_TIMEOUT;
_context.jobQueue().addJob(new SendMessageDirectJob(_context, body, router, exp, FORWARD_PRIORITY));
}
private void sendToLocal(I2NPMessage body) {
@ -298,18 +300,18 @@ public class HandleTunnelMessageJob extends JobImpl {
msg.setMessage(body);
msg.setFromRouter(_from);
msg.setFromRouterHash(_fromHash);
InNetMessagePool.getInstance().add(msg);
_context.inNetMessagePool().add(msg);
}
private void deliverMessage(Destination dest, Hash destHash, DataMessage msg) {
boolean valid = MessageValidator.getInstance().validateMessage(msg.getUniqueId(), msg.getMessageExpiration().getTime());
boolean valid = _context.messageValidator().validateMessage(msg.getUniqueId(), msg.getMessageExpiration().getTime());
if (!valid) {
if (_log.shouldLog(Log.WARN))
_log.warn("Duplicate data message received [" + msg.getUniqueId()
+ " expiring on " + msg.getMessageExpiration() + "]");
MessageHistory.getInstance().droppedOtherMessage(msg);
MessageHistory.getInstance().messageProcessingError(msg.getUniqueId(), msg.getClass().getName(),
"Duplicate payload");
_context.messageHistory().droppedOtherMessage(msg);
_context.messageHistory().messageProcessingError(msg.getUniqueId(), msg.getClass().getName(),
"Duplicate payload");
return;
}
@ -327,9 +329,9 @@ public class HandleTunnelMessageJob extends JobImpl {
cmsg.setPayload(payload);
cmsg.setReceptionInfo(info);
MessageHistory.getInstance().receivePayloadMessage(msg.getUniqueId());
_context.messageHistory().receivePayloadMessage(msg.getUniqueId());
// if the destination isn't local, the ClientMessagePool forwards it off as an OutboundClientMessageJob
ClientMessagePool.getInstance().add(cmsg);
_context.clientMessagePool().add(cmsg);
}
private I2NPMessage getBody(byte body[]) {
@ -347,9 +349,9 @@ public class HandleTunnelMessageJob extends JobImpl {
private I2NPMessage decryptBody(byte encryptedMessage[], SessionKey key) {
byte iv[] = new byte[16];
Hash h = SHA256Generator.getInstance().calculateHash(key.getData());
Hash h = _context.sha().calculateHash(key.getData());
System.arraycopy(h.getData(), 0, iv, 0, iv.length);
byte decrypted[] = AESEngine.getInstance().safeDecrypt(encryptedMessage, key, iv);
byte decrypted[] = _context.AESEngine().safeDecrypt(encryptedMessage, key, iv);
if (decrypted == null) {
if (_log.shouldLog(Log.ERROR))
_log.error("Error decrypting the message", getAddedBy());
@ -361,9 +363,9 @@ public class HandleTunnelMessageJob extends JobImpl {
private DeliveryInstructions getInstructions(byte encryptedInstructions[], SessionKey key) {
try {
byte iv[] = new byte[16];
Hash h = SHA256Generator.getInstance().calculateHash(key.getData());
Hash h = _context.sha().calculateHash(key.getData());
System.arraycopy(h.getData(), 0, iv, 0, iv.length);
byte decrypted[] = AESEngine.getInstance().safeDecrypt(encryptedInstructions, key, iv);
byte decrypted[] = _context.AESEngine().safeDecrypt(encryptedInstructions, key, iv);
if (decrypted == null) {
if (_log.shouldLog(Log.ERROR))
_log.error("Error decrypting the instructions", getAddedBy());
@ -383,7 +385,7 @@ public class HandleTunnelMessageJob extends JobImpl {
}
private TunnelInfo getUs(TunnelInfo info) {
Hash us = Router.getInstance().getRouterInfo().getIdentity().getHash();
Hash us = _context.routerHash();
while (info != null) {
if (us.equals(info.getThisHop()))
return info;
@ -406,7 +408,7 @@ public class HandleTunnelMessageJob extends JobImpl {
return false;
}
if (!vstruct.verifySignature(info.getVerificationKey().getKey())) {
if (!vstruct.verifySignature(_context, info.getVerificationKey().getKey())) {
if (_log.shouldLog(Log.ERROR))
_log.error("Received a tunnel message with an invalid signature!");
// shitlist the sender?
@ -414,7 +416,7 @@ public class HandleTunnelMessageJob extends JobImpl {
}
// now validate the message
Hash msgHash = SHA256Generator.getInstance().calculateHash(_message.getData());
Hash msgHash = _context.sha().calculateHash(_message.getData());
if (msgHash.equals(vstruct.getMessageHash())) {
// hash matches. good.
return true;
@ -427,8 +429,8 @@ public class HandleTunnelMessageJob extends JobImpl {
}
public void dropped() {
MessageHistory.getInstance().messageProcessingError(_message.getUniqueId(), _message.getClass().getName(),
"Dropped due to overload");
_context.messageHistory().messageProcessingError(_message.getUniqueId(), _message.getClass().getName(),
"Dropped due to overload");
}
////
@ -442,17 +444,19 @@ public class HandleTunnelMessageJob extends JobImpl {
private TunnelInfo _info;
public HandleGatewayMessageJob(I2NPMessage body, TunnelInfo tunnel, int length) {
super(HandleTunnelMessageJob.this._context);
_body = body;
_length = length;
_info = tunnel;
}
public void runJob() {
RouterContext ctx = HandleTunnelMessageJob.this._context;
if (_body != null) {
StatManager.getInstance().addRateData("tunnel.gatewayMessageSize", _length, 0);
ctx.statManager().addRateData("tunnel.gatewayMessageSize", _length, 0);
if (_log.shouldLog(Log.INFO))
_log.info("Message for tunnel " + _info.getTunnelId() + " received at the gateway (us), and since its > 0 length, forward the "
+ _body.getClass().getName() + " message on to " + _info.getNextHop().toBase64() + " via SendTunnelMessageJob");
JobQueue.getInstance().addJob(new SendTunnelMessageJob(_body, _info.getTunnelId(), null, null, null, null, FORWARD_TIMEOUT, FORWARD_PRIORITY));
ctx.jobQueue().addJob(new SendTunnelMessageJob(ctx, _body, _info.getTunnelId(), null, null, null, null, FORWARD_TIMEOUT, FORWARD_PRIORITY));
} else {
if (_log.shouldLog(Log.WARN))
_log.warn("Body of the message for the tunnel could not be parsed");
@ -469,6 +473,7 @@ public class HandleTunnelMessageJob extends JobImpl {
private TunnelInfo _info;
public HandleLocallyJob(I2NPMessage body, TunnelInfo tunnel) {
super(HandleTunnelMessageJob.this._context);
_body = body;
_info = tunnel;
}
@ -491,7 +496,7 @@ public class HandleTunnelMessageJob extends JobImpl {
msg.setFromRouter(_from);
msg.setFromRouterHash(_fromHash);
msg.setMessage(_body);
InNetMessagePool.getInstance().add(msg);
HandleLocallyJob.this._context.inNetMessagePool().add(msg);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Message added to Inbound network pool for local processing: " + _message);
}
@ -503,6 +508,7 @@ public class HandleTunnelMessageJob extends JobImpl {
private class HandleEndpointJob extends JobImpl {
private TunnelInfo _info;
public HandleEndpointJob(TunnelInfo info) {
super(HandleTunnelMessageJob.this._context);
_info = info;
}
public void runJob() {
@ -517,6 +523,7 @@ public class HandleTunnelMessageJob extends JobImpl {
private TunnelInfo _ourPlace;
private DeliveryInstructions _instructions;
public ProcessBodyLocallyJob(I2NPMessage body, DeliveryInstructions instructions, TunnelInfo ourPlace) {
super(HandleTunnelMessageJob.this._context);
_body = body;
_instructions = instructions;
_ourPlace = ourPlace;

View File

@ -32,6 +32,7 @@ import net.i2p.router.MessageValidator;
import net.i2p.router.Router;
import net.i2p.util.Clock;
import net.i2p.util.Log;
import net.i2p.router.RouterContext;
/**
* Implement the inbound message processing logic to forward based on delivery instructions and
@ -39,141 +40,165 @@ import net.i2p.util.Log;
*
*/
class MessageHandler {
private final static Log _log = new Log(MessageHandler.class);
private static MessageHandler _instance = new MessageHandler();
public static MessageHandler getInstance() { return _instance; }
private Log _log;
private RouterContext _context;
public MessageHandler(RouterContext ctx) {
_context = ctx;
_log = _context.logManager().getLog(MessageHandler.class);
}
public void handleMessage(DeliveryInstructions instructions, I2NPMessage message, boolean requestAck, SourceRouteBlock replyBlock,
long replyId, RouterIdentity from, Hash fromHash, long expiration, int priority) {
switch (instructions.getDeliveryMode()) {
case DeliveryInstructions.DELIVERY_MODE_LOCAL:
_log.debug("Instructions for LOCAL DELIVERY");
if (message.getType() == DataMessage.MESSAGE_TYPE) {
handleLocalDestination(instructions, message, fromHash);
} else {
handleLocalRouter(message, from, fromHash, replyBlock, requestAck);
}
break;
case DeliveryInstructions.DELIVERY_MODE_ROUTER:
_log.debug("Instructions for ROUTER DELIVERY to " + instructions.getRouter().toBase64());
if (Router.getInstance().getRouterInfo().getIdentity().getHash().equals(instructions.getRouter())) {
handleLocalRouter(message, from, fromHash, replyBlock, requestAck);
} else {
handleRemoteRouter(message, instructions, expiration, priority);
}
break;
case DeliveryInstructions.DELIVERY_MODE_DESTINATION:
_log.debug("Instructions for DESTINATION DELIVERY to " + instructions.getDestination().toBase64());
if (ClientManagerFacade.getInstance().isLocal(instructions.getDestination())) {
handleLocalDestination(instructions, message, fromHash);
} else {
_log.error("Instructions requests forwarding on to a non-local destination. Not yet supported");
}
break;
case DeliveryInstructions.DELIVERY_MODE_TUNNEL:
_log.debug("Instructions for TUNNEL DELIVERY to" + instructions.getTunnelId().getTunnelId() + " on " + instructions.getRouter().toBase64());
handleTunnel(instructions, expiration, message, priority);
break;
default:
_log.error("Message has instructions that are not yet implemented: mode = " + instructions.getDeliveryMode());
}
if (requestAck) {
_log.debug("SEND ACK REQUESTED");
sendAck(replyBlock, replyId);
} else {
_log.debug("No ack requested");
}
public void handleMessage(DeliveryInstructions instructions, I2NPMessage message,
boolean requestAck, SourceRouteBlock replyBlock,
long replyId, RouterIdentity from, Hash fromHash,
long expiration, int priority) {
switch (instructions.getDeliveryMode()) {
case DeliveryInstructions.DELIVERY_MODE_LOCAL:
_log.debug("Instructions for LOCAL DELIVERY");
if (message.getType() == DataMessage.MESSAGE_TYPE) {
handleLocalDestination(instructions, message, fromHash);
} else {
handleLocalRouter(message, from, fromHash, replyBlock, requestAck);
}
break;
case DeliveryInstructions.DELIVERY_MODE_ROUTER:
if (_log.shouldLog(Log.DEBUG))
_log.debug("Instructions for ROUTER DELIVERY to "
+ instructions.getRouter().toBase64());
if (_context.routerHash().equals(instructions.getRouter())) {
handleLocalRouter(message, from, fromHash, replyBlock, requestAck);
} else {
handleRemoteRouter(message, instructions, expiration, priority);
}
break;
case DeliveryInstructions.DELIVERY_MODE_DESTINATION:
if (_log.shouldLog(Log.DEBUG))
_log.debug("Instructions for DESTINATION DELIVERY to "
+ instructions.getDestination().toBase64());
if (_context.clientManager().isLocal(instructions.getDestination())) {
handleLocalDestination(instructions, message, fromHash);
} else {
_log.error("Instructions requests forwarding on to a non-local destination. Not yet supported");
}
break;
case DeliveryInstructions.DELIVERY_MODE_TUNNEL:
if (_log.shouldLog(Log.DEBUG))
_log.debug("Instructions for TUNNEL DELIVERY to"
+ instructions.getTunnelId().getTunnelId() + " on "
+ instructions.getRouter().toBase64());
handleTunnel(instructions, expiration, message, priority);
break;
default:
_log.error("Message has instructions that are not yet implemented: mode = " + instructions.getDeliveryMode());
}
if (requestAck) {
_log.debug("SEND ACK REQUESTED");
sendAck(replyBlock, replyId);
} else {
_log.debug("No ack requested");
}
}
private void sendAck(SourceRouteBlock replyBlock, long replyId) {
_log.info("Queueing up ack job via reply block " + replyBlock);
Job ackJob = new SendMessageAckJob(replyBlock, replyId);
JobQueue.getInstance().addJob(ackJob);
_log.info("Queueing up ack job via reply block " + replyBlock);
Job ackJob = new SendMessageAckJob(_context, replyBlock, replyId);
_context.jobQueue().addJob(ackJob);
}
private void handleLocalRouter(I2NPMessage message, RouterIdentity from, Hash fromHash, SourceRouteBlock replyBlock, boolean ackUsed) {
_log.info("Handle " + message.getClass().getName() + " to a local router - toss it on the inbound network pool");
InNetMessage msg = new InNetMessage();
msg.setFromRouter(from);
msg.setFromRouterHash(fromHash);
msg.setMessage(message);
if (!ackUsed)
msg.setReplyBlock(replyBlock);
InNetMessagePool.getInstance().add(msg);
_log.info("Handle " + message.getClass().getName() + " to a local router - toss it on the inbound network pool");
InNetMessage msg = new InNetMessage();
msg.setFromRouter(from);
msg.setFromRouterHash(fromHash);
msg.setMessage(message);
if (!ackUsed)
msg.setReplyBlock(replyBlock);
_context.inNetMessagePool().add(msg);
}
private void handleRemoteRouter(I2NPMessage message, DeliveryInstructions instructions, long expiration, int priority) {
private void handleRemoteRouter(I2NPMessage message, DeliveryInstructions instructions,
long expiration, int priority) {
boolean valid = _context.messageValidator().validateMessage(message.getUniqueId(), message.getMessageExpiration().getTime());
if (!valid) {
if (_log.shouldLog(Log.WARN))
_log.warn("Duplicate / expired message received to remote router [" + message.getUniqueId() + " expiring on " + message.getMessageExpiration() + "]");
_context.messageHistory().droppedOtherMessage(message);
_context.messageHistory().messageProcessingError(message.getUniqueId(), message.getClass().getName(), "Duplicate/expired to remote router");
return;
}
boolean valid = MessageValidator.getInstance().validateMessage(message.getUniqueId(), message.getMessageExpiration().getTime());
if (!valid) {
if (_log.shouldLog(Log.WARN))
_log.warn("Duplicate / expired message received to remote router [" + message.getUniqueId() + " expiring on " + message.getMessageExpiration() + "]");
MessageHistory.getInstance().droppedOtherMessage(message);
MessageHistory.getInstance().messageProcessingError(message.getUniqueId(), message.getClass().getName(), "Duplicate/expired to remote router");
return;
}
_log.info("Handle " + message.getClass().getName() + " to a remote router " + instructions.getRouter().toBase64() + " - fire a SendMessageDirectJob");
SendMessageDirectJob j = new SendMessageDirectJob(message, instructions.getRouter(), expiration, priority);
JobQueue.getInstance().addJob(j);
if (_log.shouldLog(Log.INFO))
_log.info("Handle " + message.getClass().getName() + " to a remote router "
+ instructions.getRouter().toBase64() + " - fire a SendMessageDirectJob");
SendMessageDirectJob j = new SendMessageDirectJob(_context, message, instructions.getRouter(), expiration, priority);
_context.jobQueue().addJob(j);
}
private void handleTunnel(DeliveryInstructions instructions, long expiration, I2NPMessage message, int priority) {
Hash to = instructions.getRouter();
long timeoutMs = expiration - Clock.getInstance().now();
TunnelId tunnelId = instructions.getTunnelId();
if (!Router.getInstance().getRouterInfo().getIdentity().getHash().equals(to)) {
// don't validate locally targetted tunnel messages, since then we'd have to tweak
// around message validation thats already in place for SendMessageDirectJob
boolean valid = MessageValidator.getInstance().validateMessage(message.getUniqueId(), message.getMessageExpiration().getTime());
if (!valid) {
if (_log.shouldLog(Log.WARN))
_log.warn("Duplicate / expired tunnel message received [" + message.getUniqueId() + " expiring on " + message.getMessageExpiration() + "]");
MessageHistory.getInstance().droppedOtherMessage(message);
MessageHistory.getInstance().messageProcessingError(message.getUniqueId(), message.getClass().getName(), "Duplicate/expired");
return;
}
}
Hash to = instructions.getRouter();
long timeoutMs = expiration - _context.clock().now();
TunnelId tunnelId = instructions.getTunnelId();
if (!_context.routerHash().equals(to)) {
// don't validate locally targetted tunnel messages, since then we'd have to tweak
// around message validation thats already in place for SendMessageDirectJob
boolean valid = _context.messageValidator().validateMessage(message.getUniqueId(), message.getMessageExpiration().getTime());
if (!valid) {
if (_log.shouldLog(Log.WARN))
_log.warn("Duplicate / expired tunnel message received [" + message.getUniqueId() + " expiring on " + message.getMessageExpiration() + "]");
_context.messageHistory().droppedOtherMessage(message);
_context.messageHistory().messageProcessingError(message.getUniqueId(),
message.getClass().getName(),
"Duplicate/expired");
return;
}
}
_log.info("Handle " + message.getClass().getName() + " to send to remote tunnel " + tunnelId.getTunnelId() + " on router " + to.toBase64());
TunnelMessage msg = new TunnelMessage();
ByteArrayOutputStream baos = new ByteArrayOutputStream(1024);
try {
message.writeBytes(baos);
msg.setData(baos.toByteArray());
msg.setTunnelId(tunnelId);
_log.debug("Placing message of type " + message.getClass().getName() + " into the new tunnel message bound for " + tunnelId.getTunnelId() + " on " + to.toBase64());
JobQueue.getInstance().addJob(new SendMessageDirectJob(msg, to, expiration, priority));
String bodyType = message.getClass().getName();
MessageHistory.getInstance().wrap(bodyType, message.getUniqueId(), TunnelMessage.class.getName(), msg.getUniqueId());
} catch (Exception e) {
_log.warn("Unable to forward on according to the instructions to the remote tunnel", e);
}
if (_log.shouldLog(Log.INFO))
_log.info("Handle " + message.getClass().getName() + " to send to remote tunnel "
+ tunnelId.getTunnelId() + " on router " + to.toBase64());
TunnelMessage msg = new TunnelMessage(_context);
ByteArrayOutputStream baos = new ByteArrayOutputStream(1024);
try {
message.writeBytes(baos);
msg.setData(baos.toByteArray());
msg.setTunnelId(tunnelId);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Placing message of type " + message.getClass().getName()
+ " into the new tunnel message bound for " + tunnelId.getTunnelId()
+ " on " + to.toBase64());
_context.jobQueue().addJob(new SendMessageDirectJob(_context, msg, to, expiration, priority));
String bodyType = message.getClass().getName();
_context.messageHistory().wrap(bodyType, message.getUniqueId(), TunnelMessage.class.getName(), msg.getUniqueId());
} catch (Exception e) {
_log.warn("Unable to forward on according to the instructions to the remote tunnel", e);
}
}
private void handleLocalDestination(DeliveryInstructions instructions, I2NPMessage message, Hash fromHash) {
boolean valid = MessageValidator.getInstance().validateMessage(message.getUniqueId(), message.getMessageExpiration().getTime());
if (!valid) {
if (_log.shouldLog(Log.WARN))
_log.warn("Duplicate / expired client message received [" + message.getUniqueId() + " expiring on " + message.getMessageExpiration() + "]");
MessageHistory.getInstance().droppedOtherMessage(message);
MessageHistory.getInstance().messageProcessingError(message.getUniqueId(), message.getClass().getName(), "Duplicate/expired client message");
return;
}
_log.debug("Handle " + message.getClass().getName() + " to a local destination - build a ClientMessage and pool it");
ClientMessage msg = new ClientMessage();
msg.setDestinationHash(instructions.getDestination());
Payload payload = new Payload();
payload.setEncryptedData(((DataMessage)message).getData());
msg.setPayload(payload);
MessageReceptionInfo info = new MessageReceptionInfo();
info.setFromPeer(fromHash);
msg.setReceptionInfo(info);
MessageHistory.getInstance().receivePayloadMessage(message.getUniqueId());
ClientMessagePool.getInstance().add(msg);
boolean valid = _context.messageValidator().validateMessage(message.getUniqueId(), message.getMessageExpiration().getTime());
if (!valid) {
if (_log.shouldLog(Log.WARN))
_log.warn("Duplicate / expired client message received [" + message.getUniqueId() + " expiring on " + message.getMessageExpiration() + "]");
_context.messageHistory().droppedOtherMessage(message);
_context.messageHistory().messageProcessingError(message.getUniqueId(), message.getClass().getName(), "Duplicate/expired client message");
return;
}
if (_log.shouldLog(Log.DEBUG))
_log.debug("Handle " + message.getClass().getName()
+ " to a local destination - build a ClientMessage and pool it");
ClientMessage msg = new ClientMessage();
msg.setDestinationHash(instructions.getDestination());
Payload payload = new Payload();
payload.setEncryptedData(((DataMessage)message).getData());
msg.setPayload(payload);
MessageReceptionInfo info = new MessageReceptionInfo();
info.setFromPeer(fromHash);
msg.setReceptionInfo(info);
_context.messageHistory().receivePayloadMessage(message.getUniqueId());
_context.clientMessagePool().add(msg);
}
}

View File

@ -39,30 +39,31 @@ import net.i2p.stat.StatManager;
import net.i2p.util.Clock;
import net.i2p.util.Log;
import net.i2p.util.RandomSource;
import net.i2p.router.RouterContext;
/**
* Send a client message, taking into consideration the fact that there may be
* multiple inbound tunnels that the target provides. This job sends it to one
* of them and if it doesnt get a confirmation within 15 seconds (SEND_TIMEOUT_MS),
* it tries the next, continuing on until a confirmation is received, the full
* it tries the next, continuing on until a confirmation is received, the full
* timeout has been reached (60 seconds, or the ms defined in the client's or
* router's "clientMessageTimeout" option).
* router's "clientMessageTimeout" option).
*
* After sending through all of the leases without success, if there's still
* time left it fails the leaseSet itself, does a new search for that leaseSet,
* and continues sending down any newly found leases.
* After sending through all of the leases without success, if there's still
* time left it fails the leaseSet itself, does a new search for that leaseSet,
* and continues sending down any newly found leases.
*
*/
public class OutboundClientMessageJob extends JobImpl {
private final static Log _log = new Log(OutboundClientMessageJob.class);
private Log _log;
private OutboundClientMessageStatus _status;
private NextStepJob _nextStep;
private LookupLeaseSetFailedJob _lookupLeaseSetFailed;
private long _overallExpiration;
/**
/**
* final timeout (in milliseconds) that the outbound message will fail in.
* This can be overridden in the router.config or the client's session config
* This can be overridden in the router.config or the client's session config
* (the client's session config takes precedence)
*/
public final static String OVERALL_TIMEOUT_MS_PARAM = "clientMessageTimeout";
@ -76,286 +77,285 @@ public class OutboundClientMessageJob extends JobImpl {
/** dont search for the lease more than 3 times */
private final static int MAX_LEASE_LOOKUPS = 3;
static {
StatManager.getInstance().createFrequencyStat("client.sendMessageFailFrequency", "How often does a client fail to send a message?", "Client Messages", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
StatManager.getInstance().createRateStat("client.sendMessageSize", "How large are messages sent by the client?", "Client Messages", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
StatManager.getInstance().createRateStat("client.sendAttemptAverage", "How many different tunnels do we have to try when sending a client message?", "Client Messages", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
}
/**
* Send the sucker
*/
public OutboundClientMessageJob(ClientMessage msg) {
super();
long timeoutMs = OVERALL_TIMEOUT_MS_DEFAULT;
String param = msg.getSenderConfig().getOptions().getProperty(OVERALL_TIMEOUT_MS_PARAM);
if (param == null)
param = Router.getInstance().getConfigSetting(OVERALL_TIMEOUT_MS_PARAM);
if (param != null) {
try {
timeoutMs = Long.parseLong(param);
} catch (NumberFormatException nfe) {
if (_log.shouldLog(Log.WARN))
_log.warn("Invalid client message timeout specified [" + param + "], defaulting to " + OVERALL_TIMEOUT_MS_DEFAULT, nfe);
timeoutMs = OVERALL_TIMEOUT_MS_DEFAULT;
}
}
_overallExpiration = timeoutMs + Clock.getInstance().now();
_status = new OutboundClientMessageStatus(msg);
_nextStep = new NextStepJob();
_lookupLeaseSetFailed = new LookupLeaseSetFailedJob();
public OutboundClientMessageJob(RouterContext ctx, ClientMessage msg) {
super(ctx);
_log = ctx.logManager().getLog(OutboundClientMessageJob.class);
ctx.statManager().createFrequencyStat("client.sendMessageFailFrequency", "How often does a client fail to send a message?", "Client Messages", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.sendMessageSize", "How large are messages sent by the client?", "Client Messages", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
ctx.statManager().createRateStat("client.sendAttemptAverage", "How many different tunnels do we have to try when sending a client message?", "Client Messages", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
long timeoutMs = OVERALL_TIMEOUT_MS_DEFAULT;
String param = msg.getSenderConfig().getOptions().getProperty(OVERALL_TIMEOUT_MS_PARAM);
if (param == null)
param = ctx.router().getConfigSetting(OVERALL_TIMEOUT_MS_PARAM);
if (param != null) {
try {
timeoutMs = Long.parseLong(param);
} catch (NumberFormatException nfe) {
if (_log.shouldLog(Log.WARN))
_log.warn("Invalid client message timeout specified [" + param + "], defaulting to " + OVERALL_TIMEOUT_MS_DEFAULT, nfe);
timeoutMs = OVERALL_TIMEOUT_MS_DEFAULT;
}
}
_overallExpiration = timeoutMs + _context.clock().now();
_status = new OutboundClientMessageStatus(msg);
_nextStep = new NextStepJob();
_lookupLeaseSetFailed = new LookupLeaseSetFailedJob();
}
public String getName() { return "Outbound client message"; }
public void runJob() {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Send outbound client message job beginning");
buildClove();
if (_log.shouldLog(Log.DEBUG))
_log.debug("Clove built");
Hash to = _status.getTo().calculateHash();
long timeoutMs = _overallExpiration - Clock.getInstance().now();
if (_log.shouldLog(Log.DEBUG))
_log.debug("Send outbound client message - sending off leaseSet lookup job");
_status.incrementLookups();
NetworkDatabaseFacade.getInstance().lookupLeaseSet(to, _nextStep, _lookupLeaseSetFailed, timeoutMs);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Send outbound client message job beginning");
buildClove();
if (_log.shouldLog(Log.DEBUG))
_log.debug("Clove built");
Hash to = _status.getTo().calculateHash();
long timeoutMs = _overallExpiration - _context.clock().now();
if (_log.shouldLog(Log.DEBUG))
_log.debug("Send outbound client message - sending off leaseSet lookup job");
_status.incrementLookups();
_context.netDb().lookupLeaseSet(to, _nextStep, _lookupLeaseSetFailed, timeoutMs);
}
/**
* Continue on sending through the next tunnel
*/
private void sendNext() {
if (_log.shouldLog(Log.DEBUG)) {
_log.debug("sendNext() called with " + _status.getNumSent() + " already sent");
}
if (_status.getSuccess()) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("sendNext() - already successful!");
return;
}
if (_status.getFailure()) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("sendNext() - already failed!");
return;
}
long now = Clock.getInstance().now();
if (now >= _overallExpiration) {
if (_log.shouldLog(Log.WARN))
_log.warn("sendNext() - Expired");
dieFatal();
return;
}
Lease nextLease = getNextLease();
if (_log.shouldLog(Log.DEBUG))
_log.debug("Send outbound client message - next lease found for [" + _status.getTo().calculateHash().toBase64() + "] - " + nextLease);
if (nextLease == null) {
if (_log.shouldLog(Log.WARN))
_log.warn("No more leases, and we still haven't heard back from the peer, refetching the leaseSet to try again");
_status.setLeaseSet(null);
long remainingMs = _overallExpiration - Clock.getInstance().now();
if (_status.getNumLookups() < MAX_LEASE_LOOKUPS) {
_status.incrementLookups();
Hash to = _status.getMessage().getDestination().calculateHash();
_status.clearAlreadySent();
NetworkDatabaseFacade.getInstance().fail(to);
NetworkDatabaseFacade.getInstance().lookupLeaseSet(to, _nextStep, _lookupLeaseSetFailed, remainingMs);
return;
} else {
if (_log.shouldLog(Log.WARN))
_log.warn("sendNext() - max # lease lookups exceeded! " + _status.getNumLookups());
dieFatal();
return;
}
}
JobQueue.getInstance().addJob(new SendJob(nextLease));
if (_log.shouldLog(Log.DEBUG)) {
_log.debug("sendNext() called with " + _status.getNumSent() + " already sent");
}
if (_status.getSuccess()) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("sendNext() - already successful!");
return;
}
if (_status.getFailure()) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("sendNext() - already failed!");
return;
}
long now = _context.clock().now();
if (now >= _overallExpiration) {
if (_log.shouldLog(Log.WARN))
_log.warn("sendNext() - Expired");
dieFatal();
return;
}
Lease nextLease = getNextLease();
if (_log.shouldLog(Log.DEBUG))
_log.debug("Send outbound client message - next lease found for [" + _status.getTo().calculateHash().toBase64() + "] - " + nextLease);
if (nextLease == null) {
if (_log.shouldLog(Log.WARN))
_log.warn("No more leases, and we still haven't heard back from the peer, refetching the leaseSet to try again");
_status.setLeaseSet(null);
long remainingMs = _overallExpiration - _context.clock().now();
if (_status.getNumLookups() < MAX_LEASE_LOOKUPS) {
_status.incrementLookups();
Hash to = _status.getMessage().getDestination().calculateHash();
_status.clearAlreadySent();
_context.netDb().fail(to);
_context.netDb().lookupLeaseSet(to, _nextStep, _lookupLeaseSetFailed, remainingMs);
return;
} else {
if (_log.shouldLog(Log.WARN))
_log.warn("sendNext() - max # lease lookups exceeded! " + _status.getNumLookups());
dieFatal();
return;
}
}
_context.jobQueue().addJob(new SendJob(nextLease));
}
/**
* fetch the next lease that we should try sending through, or null if there
* are no remaining leases available (or there weren't any in the first place...).
* This implements the logic to determine which lease should be next by picking a
/**
* fetch the next lease that we should try sending through, or null if there
* are no remaining leases available (or there weren't any in the first place...).
* This implements the logic to determine which lease should be next by picking a
* random one that has been failing the least (e.g. if there are 3 leases in the leaseSet
* and one has failed, the other two are randomly chosen as the 'next')
*
*/
private Lease getNextLease() {
LeaseSet ls = _status.getLeaseSet();
if (ls == null) {
ls = NetworkDatabaseFacade.getInstance().lookupLeaseSetLocally(_status.getTo().calculateHash());
if (ls == null) {
if (_log.shouldLog(Log.WARN))
_log.warn("Lookup locally didn't find the leaseSet");
return null;
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Lookup locally DID find the leaseSet");
}
_status.setLeaseSet(ls);
}
long now = Clock.getInstance().now();
// get the possible leases
List leases = new ArrayList(4);
for (int i = 0; i < ls.getLeaseCount(); i++) {
Lease lease = ls.getLease(i);
if (lease.isExpired(Router.CLOCK_FUDGE_FACTOR)) {
if (_log.shouldLog(Log.WARN))
_log.warn("getNextLease() - expired lease! - " + lease);
continue;
}
if (!_status.alreadySent(lease.getRouterIdentity().getHash(), lease.getTunnelId())) {
leases.add(lease);
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("getNextLease() - skipping lease we've already sent it down - " + lease);
}
}
// randomize the ordering (so leases with equal # of failures per next sort are randomly ordered)
Collections.shuffle(leases);
// ordered by lease number of failures
TreeMap orderedLeases = new TreeMap();
for (Iterator iter = leases.iterator(); iter.hasNext(); ) {
Lease lease = (Lease)iter.next();
long id = lease.getNumFailure();
while (orderedLeases.containsKey(new Long(id)))
id++;
orderedLeases.put(new Long(id), lease);
if (_log.shouldLog(Log.DEBUG))
_log.debug("getNextLease() - ranking lease we havent sent it down as " + id);
}
if (orderedLeases.size() <= 0) {
if (_log.shouldLog(Log.WARN))
_log.warn("No leases in the ordered set found! all = " + leases.size());
return null;
} else {
return (Lease)orderedLeases.get(orderedLeases.firstKey());
}
private Lease getNextLease() {
LeaseSet ls = _status.getLeaseSet();
if (ls == null) {
ls = _context.netDb().lookupLeaseSetLocally(_status.getTo().calculateHash());
if (ls == null) {
if (_log.shouldLog(Log.WARN))
_log.warn("Lookup locally didn't find the leaseSet");
return null;
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Lookup locally DID find the leaseSet");
}
_status.setLeaseSet(ls);
}
long now = _context.clock().now();
// get the possible leases
List leases = new ArrayList(4);
for (int i = 0; i < ls.getLeaseCount(); i++) {
Lease lease = ls.getLease(i);
if (lease.isExpired(Router.CLOCK_FUDGE_FACTOR)) {
if (_log.shouldLog(Log.WARN))
_log.warn("getNextLease() - expired lease! - " + lease);
continue;
}
if (!_status.alreadySent(lease.getRouterIdentity().getHash(), lease.getTunnelId())) {
leases.add(lease);
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("getNextLease() - skipping lease we've already sent it down - " + lease);
}
}
// randomize the ordering (so leases with equal # of failures per next sort are randomly ordered)
Collections.shuffle(leases);
// ordered by lease number of failures
TreeMap orderedLeases = new TreeMap();
for (Iterator iter = leases.iterator(); iter.hasNext(); ) {
Lease lease = (Lease)iter.next();
long id = lease.getNumFailure();
while (orderedLeases.containsKey(new Long(id)))
id++;
orderedLeases.put(new Long(id), lease);
if (_log.shouldLog(Log.DEBUG))
_log.debug("getNextLease() - ranking lease we havent sent it down as " + id);
}
if (orderedLeases.size() <= 0) {
if (_log.shouldLog(Log.WARN))
_log.warn("No leases in the ordered set found! all = " + leases.size());
return null;
} else {
return (Lease)orderedLeases.get(orderedLeases.firstKey());
}
}
/**
* Send the message to the specified tunnel by creating a new garlic message containing
* the (already created) payload clove as well as a new delivery status message. This garlic
* the (already created) payload clove as well as a new delivery status message. This garlic
* message is sent out one of our tunnels, destined for the lease (tunnel+router) specified, and the delivery
* status message is targetting one of our free inbound tunnels as well. We use a new
* status message is targetting one of our free inbound tunnels as well. We use a new
* reply selector to keep an eye out for that delivery status message's token
*
*/
private void send(Lease lease) {
// send it as a garlic with a DeliveryStatusMessage clove and a message selector w/ successJob on reply
long token = RandomSource.getInstance().nextInt(Integer.MAX_VALUE);
PublicKey key = _status.getLeaseSet().getEncryptionKey();
SessionKey sessKey = new SessionKey();
Set tags = new HashSet();
GarlicMessage msg = OutboundClientMessageJobHelper.createGarlicMessage(token, _overallExpiration, key, _status.getClove(), _status.getTo(), sessKey, tags, true);
if (_log.shouldLog(Log.DEBUG))
_log.debug("send(lease) - token expected " + token);
_status.sent(lease.getRouterIdentity().getHash(), lease.getTunnelId());
SendSuccessJob onReply = new SendSuccessJob(lease, sessKey, tags);
SendTimeoutJob onFail = new SendTimeoutJob(lease);
ReplySelector selector = new ReplySelector(token);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Placing GarlicMessage into the new tunnel message bound for " + lease.getTunnelId() + " on " + lease.getRouterIdentity().getHash().toBase64());
TunnelId outTunnelId = selectOutboundTunnel();
if (outTunnelId != null) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Sending tunnel message out " + outTunnelId + " to " + lease.getTunnelId() + " on " + lease.getRouterIdentity().getHash().toBase64());
SendTunnelMessageJob j = new SendTunnelMessageJob(msg, outTunnelId, lease.getRouterIdentity().getHash(), lease.getTunnelId(), null, onReply, onFail, selector, SEND_TIMEOUT_MS, SEND_PRIORITY);
JobQueue.getInstance().addJob(j);
} else {
if (_log.shouldLog(Log.ERROR))
_log.error("Could not find any outbound tunnels to send the payload through... wtf?");
JobQueue.getInstance().addJob(onFail);
}
// send it as a garlic with a DeliveryStatusMessage clove and a message selector w/ successJob on reply
long token = _context.random().nextInt(Integer.MAX_VALUE);
PublicKey key = _status.getLeaseSet().getEncryptionKey();
SessionKey sessKey = new SessionKey();
Set tags = new HashSet();
GarlicMessage msg = OutboundClientMessageJobHelper.createGarlicMessage(_context, token, _overallExpiration, key, _status.getClove(), _status.getTo(), sessKey, tags, true);
if (_log.shouldLog(Log.DEBUG))
_log.debug("send(lease) - token expected " + token);
_status.sent(lease.getRouterIdentity().getHash(), lease.getTunnelId());
SendSuccessJob onReply = new SendSuccessJob(lease, sessKey, tags);
SendTimeoutJob onFail = new SendTimeoutJob(lease);
ReplySelector selector = new ReplySelector(token);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Placing GarlicMessage into the new tunnel message bound for " + lease.getTunnelId() + " on " + lease.getRouterIdentity().getHash().toBase64());
TunnelId outTunnelId = selectOutboundTunnel();
if (outTunnelId != null) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Sending tunnel message out " + outTunnelId + " to " + lease.getTunnelId() + " on " + lease.getRouterIdentity().getHash().toBase64());
SendTunnelMessageJob j = new SendTunnelMessageJob(_context, msg, outTunnelId, lease.getRouterIdentity().getHash(), lease.getTunnelId(), null, onReply, onFail, selector, SEND_TIMEOUT_MS, SEND_PRIORITY);
_context.jobQueue().addJob(j);
} else {
if (_log.shouldLog(Log.ERROR))
_log.error("Could not find any outbound tunnels to send the payload through... wtf?");
_context.jobQueue().addJob(onFail);
}
}
/**
* Pick an arbitrary outbound tunnel to send the message through, or null if
* Pick an arbitrary outbound tunnel to send the message through, or null if
* there aren't any around
*
*/
private TunnelId selectOutboundTunnel() {
TunnelSelectionCriteria crit = new TunnelSelectionCriteria();
crit.setMaximumTunnelsRequired(1);
crit.setMinimumTunnelsRequired(1);
List tunnelIds = TunnelManagerFacade.getInstance().selectOutboundTunnelIds(crit);
if (tunnelIds.size() <= 0)
return null;
else
return (TunnelId)tunnelIds.get(0);
TunnelSelectionCriteria crit = new TunnelSelectionCriteria();
crit.setMaximumTunnelsRequired(1);
crit.setMinimumTunnelsRequired(1);
List tunnelIds = _context.tunnelManager().selectOutboundTunnelIds(crit);
if (tunnelIds.size() <= 0)
return null;
else
return (TunnelId)tunnelIds.get(0);
}
/**
/**
* give up the ghost, this message just aint going through. tell the client to fuck off.
*
* this is safe to call multiple times (only tells the client once)
*/
private void dieFatal() {
if (_status.getSuccess()) return;
boolean alreadyFailed = _status.failed();
long sendTime = Clock.getInstance().now() - _status.getStart();
ClientMessage msg = _status.getMessage();
if (alreadyFailed) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("dieFatal() - already failed sending " + msg.getMessageId()+ ", no need to do it again", new Exception("Duplicate death?"));
return;
} else {
if (_log.shouldLog(Log.ERROR))
_log.error("Failed to send the message " + msg.getMessageId() + " after " + _status.getNumSent() + " sends and " + _status.getNumLookups() + " lookups (and " + sendTime + "ms)", new Exception("Message send failure"));
}
MessageHistory.getInstance().sendPayloadMessage(msg.getMessageId().getMessageId(), false, sendTime);
ClientManagerFacade.getInstance().messageDeliveryStatusUpdate(msg.getFromDestination(), msg.getMessageId(), false);
StatManager.getInstance().updateFrequency("client.sendMessageFailFrequency");
StatManager.getInstance().addRateData("client.sendAttemptAverage", _status.getNumSent(), sendTime);
if (_status.getSuccess()) return;
boolean alreadyFailed = _status.failed();
long sendTime = _context.clock().now() - _status.getStart();
ClientMessage msg = _status.getMessage();
if (alreadyFailed) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("dieFatal() - already failed sending " + msg.getMessageId()+ ", no need to do it again", new Exception("Duplicate death?"));
return;
} else {
if (_log.shouldLog(Log.ERROR))
_log.error("Failed to send the message " + msg.getMessageId() + " after " + _status.getNumSent() + " sends and " + _status.getNumLookups() + " lookups (and " + sendTime + "ms)", new Exception("Message send failure"));
}
_context.messageHistory().sendPayloadMessage(msg.getMessageId().getMessageId(), false, sendTime);
_context.clientManager().messageDeliveryStatusUpdate(msg.getFromDestination(), msg.getMessageId(), false);
_context.statManager().updateFrequency("client.sendMessageFailFrequency");
_context.statManager().addRateData("client.sendAttemptAverage", _status.getNumSent(), sendTime);
}
/** build the payload clove that will be used for all of the messages, placing the clove in the status structure */
private void buildClove() {
PayloadGarlicConfig clove = new PayloadGarlicConfig();
DeliveryInstructions instructions = new DeliveryInstructions();
instructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_DESTINATION);
instructions.setDestination(_status.getTo().calculateHash());
instructions.setDelayRequested(false);
instructions.setDelaySeconds(0);
instructions.setEncrypted(false);
clove.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null));
clove.setDeliveryInstructions(instructions);
clove.setExpiration(_overallExpiration);
clove.setId(RandomSource.getInstance().nextInt(Integer.MAX_VALUE));
DataMessage msg = new DataMessage();
msg.setData(_status.getMessage().getPayload().getEncryptedData());
clove.setPayload(msg);
clove.setRecipientPublicKey(null);
clove.setRequestAck(false);
_status.setClove(clove);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Built payload clove with id " + clove.getId());
PayloadGarlicConfig clove = new PayloadGarlicConfig();
DeliveryInstructions instructions = new DeliveryInstructions();
instructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_DESTINATION);
instructions.setDestination(_status.getTo().calculateHash());
instructions.setDelayRequested(false);
instructions.setDelaySeconds(0);
instructions.setEncrypted(false);
clove.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null));
clove.setDeliveryInstructions(instructions);
clove.setExpiration(_overallExpiration);
clove.setId(_context.random().nextInt(Integer.MAX_VALUE));
DataMessage msg = new DataMessage(_context);
msg.setData(_status.getMessage().getPayload().getEncryptedData());
clove.setPayload(msg);
clove.setRecipientPublicKey(null);
clove.setRequestAck(false);
_status.setClove(clove);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Built payload clove with id " + clove.getId());
}
/**
@ -363,162 +363,171 @@ public class OutboundClientMessageJob extends JobImpl {
*
*/
private class OutboundClientMessageStatus {
private ClientMessage _msg;
private PayloadGarlicConfig _clove;
private LeaseSet _leaseSet;
private Set _sent;
private int _numLookups;
private boolean _success;
private boolean _failure;
private long _start;
private int _previousSent;
public OutboundClientMessageStatus(ClientMessage msg) {
_msg = msg;
_clove = null;
_leaseSet = null;
_sent = new HashSet(4);
_success = false;
_failure = false;
_numLookups = 0;
_previousSent = 0;
_start = Clock.getInstance().now();
}
/** raw payload */
public Payload getPayload() { return _msg.getPayload(); }
/** clove, if we've built it */
public PayloadGarlicConfig getClove() { return _clove; }
public void setClove(PayloadGarlicConfig clove) { _clove = clove; }
public ClientMessage getMessage() { return _msg; }
/** date we started the process on */
public long getStart() { return _start; }
public int getNumLookups() { return _numLookups; }
public void incrementLookups() { _numLookups++; }
public void clearAlreadySent() {
synchronized (_sent) {
_previousSent += _sent.size();
_sent.clear();
}
}
/** who sent the message? */
public Destination getFrom() { return _msg.getFromDestination(); }
/** who is the message going to? */
public Destination getTo() { return _msg.getDestination(); }
/** what is the target's current leaseSet (or null if we don't know yet) */
public LeaseSet getLeaseSet() { return _leaseSet; }
public void setLeaseSet(LeaseSet ls) { _leaseSet = ls; }
/** have we already sent the message down this tunnel? */
public boolean alreadySent(Hash gateway, TunnelId tunnelId) {
Tunnel t = new Tunnel(gateway, tunnelId);
synchronized (_sent) {
return _sent.contains(t);
}
}
public void sent(Hash gateway, TunnelId tunnelId) {
Tunnel t = new Tunnel(gateway, tunnelId);
synchronized (_sent) {
_sent.add(t);
}
}
/** how many messages have we sent through various leases? */
public int getNumSent() {
synchronized (_sent) {
return _sent.size() + _previousSent;
}
}
/** did we totally fail? */
public boolean getFailure() { return _failure; }
/** we failed. returns true if we had already failed before */
public boolean failed() {
boolean already = _failure;
_failure = true;
return already;
}
/** have we totally succeeded? */
public boolean getSuccess() { return _success; }
/** we succeeded. returns true if we had already succeeded before */
public boolean success() {
boolean already = _success;
_success = true;
return already;
}
/** represent a unique tunnel at any given time */
private class Tunnel {
private Hash _gateway;
private TunnelId _tunnel;
public Tunnel(Hash tunnelGateway, TunnelId tunnel) {
_gateway = tunnelGateway;
_tunnel = tunnel;
}
public Hash getGateway() { return _gateway; }
public TunnelId getTunnel() { return _tunnel; }
public int hashCode() {
int rv = 0;
if (_gateway != null)
rv += _gateway.hashCode();
if (_tunnel != null)
rv += 7*_tunnel.getTunnelId();
return rv;
}
public boolean equals(Object o) {
if (o == null) return false;
if (o.getClass() != Tunnel.class) return false;
Tunnel t = (Tunnel)o;
return (getTunnel() == t.getTunnel()) &&
getGateway().equals(t.getGateway());
}
}
private ClientMessage _msg;
private PayloadGarlicConfig _clove;
private LeaseSet _leaseSet;
private Set _sent;
private int _numLookups;
private boolean _success;
private boolean _failure;
private long _start;
private int _previousSent;
public OutboundClientMessageStatus(ClientMessage msg) {
_msg = msg;
_clove = null;
_leaseSet = null;
_sent = new HashSet(4);
_success = false;
_failure = false;
_numLookups = 0;
_previousSent = 0;
_start = _context.clock().now();
}
/** raw payload */
public Payload getPayload() { return _msg.getPayload(); }
/** clove, if we've built it */
public PayloadGarlicConfig getClove() { return _clove; }
public void setClove(PayloadGarlicConfig clove) { _clove = clove; }
public ClientMessage getMessage() { return _msg; }
/** date we started the process on */
public long getStart() { return _start; }
public int getNumLookups() { return _numLookups; }
public void incrementLookups() { _numLookups++; }
public void clearAlreadySent() {
synchronized (_sent) {
_previousSent += _sent.size();
_sent.clear();
}
}
/** who sent the message? */
public Destination getFrom() { return _msg.getFromDestination(); }
/** who is the message going to? */
public Destination getTo() { return _msg.getDestination(); }
/** what is the target's current leaseSet (or null if we don't know yet) */
public LeaseSet getLeaseSet() { return _leaseSet; }
public void setLeaseSet(LeaseSet ls) { _leaseSet = ls; }
/** have we already sent the message down this tunnel? */
public boolean alreadySent(Hash gateway, TunnelId tunnelId) {
Tunnel t = new Tunnel(gateway, tunnelId);
synchronized (_sent) {
return _sent.contains(t);
}
}
public void sent(Hash gateway, TunnelId tunnelId) {
Tunnel t = new Tunnel(gateway, tunnelId);
synchronized (_sent) {
_sent.add(t);
}
}
/** how many messages have we sent through various leases? */
public int getNumSent() {
synchronized (_sent) {
return _sent.size() + _previousSent;
}
}
/** did we totally fail? */
public boolean getFailure() { return _failure; }
/** we failed. returns true if we had already failed before */
public boolean failed() {
boolean already = _failure;
_failure = true;
return already;
}
/** have we totally succeeded? */
public boolean getSuccess() { return _success; }
/** we succeeded. returns true if we had already succeeded before */
public boolean success() {
boolean already = _success;
_success = true;
return already;
}
/** represent a unique tunnel at any given time */
private class Tunnel {
private Hash _gateway;
private TunnelId _tunnel;
public Tunnel(Hash tunnelGateway, TunnelId tunnel) {
_gateway = tunnelGateway;
_tunnel = tunnel;
}
public Hash getGateway() { return _gateway; }
public TunnelId getTunnel() { return _tunnel; }
public int hashCode() {
int rv = 0;
if (_gateway != null)
rv += _gateway.hashCode();
if (_tunnel != null)
rv += 7*_tunnel.getTunnelId();
return rv;
}
public boolean equals(Object o) {
if (o == null) return false;
if (o.getClass() != Tunnel.class) return false;
Tunnel t = (Tunnel)o;
return (getTunnel() == t.getTunnel()) &&
getGateway().equals(t.getGateway());
}
}
}
/**
* Keep an eye out for any of the delivery status message tokens that have been
* sent down the various tunnels to deliver this message
*
*/
private class ReplySelector implements MessageSelector {
private long _pendingToken;
public ReplySelector(long token) {
_pendingToken = token;
}
public boolean continueMatching() { return false; }
public long getExpiration() { return _overallExpiration; }
public boolean isMatch(I2NPMessage inMsg) {
if (inMsg.getType() == DeliveryStatusMessage.MESSAGE_TYPE) {
return _pendingToken == ((DeliveryStatusMessage)inMsg).getMessageId();
} else {
return false;
}
}
private long _pendingToken;
public ReplySelector(long token) {
_pendingToken = token;
}
public boolean continueMatching() { return false; }
public long getExpiration() { return _overallExpiration; }
public boolean isMatch(I2NPMessage inMsg) {
if (inMsg.getType() == DeliveryStatusMessage.MESSAGE_TYPE) {
return _pendingToken == ((DeliveryStatusMessage)inMsg).getMessageId();
} else {
return false;
}
}
}
/** queued by the db lookup success and the send timeout to get us to try the next lease */
private class NextStepJob extends JobImpl {
public String getName() { return "Process next step for outbound client message"; }
public void runJob() { sendNext(); }
public NextStepJob() {
super(OutboundClientMessageJob.this._context);
}
public String getName() { return "Process next step for outbound client message"; }
public void runJob() { sendNext(); }
}
/** we couldn't even find the leaseSet, fuck off */
private class LookupLeaseSetFailedJob extends JobImpl {
public String getName() { return "Lookup for outbound client message failed"; }
public void runJob() { dieFatal(); }
public LookupLeaseSetFailedJob() {
super(OutboundClientMessageJob.this._context);
}
public String getName() { return "Lookup for outbound client message failed"; }
public void runJob() { dieFatal(); }
}
/** send a message to a lease */
private class SendJob extends JobImpl {
private Lease _lease;
public SendJob(Lease lease) { _lease = lease; }
public String getName() { return "Send outbound client message through the lease"; }
public void runJob() { send(_lease); }
private Lease _lease;
public SendJob(Lease lease) {
super(OutboundClientMessageJob.this._context);
_lease = lease;
}
public String getName() { return "Send outbound client message through the lease"; }
public void runJob() { send(_lease); }
}
/**
@ -527,48 +536,49 @@ public class OutboundClientMessageJob extends JobImpl {
*
*/
private class SendSuccessJob extends JobImpl implements ReplyJob {
private Lease _lease;
private SessionKey _key;
private Set _tags;
/**
* Create a new success job that will be fired when the message encrypted with
* the given session key and bearing the specified tags are confirmed delivered.
*
*/
public SendSuccessJob(Lease lease, SessionKey key, Set tags) {
_lease = lease;
_key = key;
_tags = tags;
}
public String getName() { return "Send client message successful to a lease"; }
public void runJob() {
long sendTime = Clock.getInstance().now() - _status.getStart();
boolean alreadySuccessful = _status.success();
MessageId msgId = _status.getMessage().getMessageId();
if (_log.shouldLog(Log.DEBUG))
_log.debug("SUCCESS! Message delivered completely for message " + msgId + " after " + sendTime + "ms [for " + _status.getMessage().getMessageId() + "]");
if ( (_key != null) && (_tags != null) && (_tags.size() > 0) ) {
SessionKeyManager.getInstance().tagsDelivered(_status.getLeaseSet().getEncryptionKey(), _key, _tags);
}
if (alreadySuccessful) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Success is a duplicate for " + _status.getMessage().getMessageId() + ", dont notify again...");
return;
}
long dataMsgId = _status.getClove().getId();
MessageHistory.getInstance().sendPayloadMessage(dataMsgId, true, sendTime);
ClientManagerFacade.getInstance().messageDeliveryStatusUpdate(_status.getFrom(), msgId, true);
_lease.setNumSuccess(_lease.getNumSuccess()+1);
StatManager.getInstance().addRateData("client.sendMessageSize", _status.getMessage().getPayload().getSize(), sendTime);
StatManager.getInstance().addRateData("client.sendAttemptAverage", _status.getNumSent(), sendTime);
}
public void setMessage(I2NPMessage msg) {}
private Lease _lease;
private SessionKey _key;
private Set _tags;
/**
* Create a new success job that will be fired when the message encrypted with
* the given session key and bearing the specified tags are confirmed delivered.
*
*/
public SendSuccessJob(Lease lease, SessionKey key, Set tags) {
super(OutboundClientMessageJob.this._context);
_lease = lease;
_key = key;
_tags = tags;
}
public String getName() { return "Send client message successful to a lease"; }
public void runJob() {
long sendTime = _context.clock().now() - _status.getStart();
boolean alreadySuccessful = _status.success();
MessageId msgId = _status.getMessage().getMessageId();
if (_log.shouldLog(Log.DEBUG))
_log.debug("SUCCESS! Message delivered completely for message " + msgId + " after " + sendTime + "ms [for " + _status.getMessage().getMessageId() + "]");
if ( (_key != null) && (_tags != null) && (_tags.size() > 0) ) {
SendSuccessJob.this._context.sessionKeyManager().tagsDelivered(_status.getLeaseSet().getEncryptionKey(), _key, _tags);
}
if (alreadySuccessful) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Success is a duplicate for " + _status.getMessage().getMessageId() + ", dont notify again...");
return;
}
long dataMsgId = _status.getClove().getId();
SendSuccessJob.this._context.messageHistory().sendPayloadMessage(dataMsgId, true, sendTime);
SendSuccessJob.this._context.clientManager().messageDeliveryStatusUpdate(_status.getFrom(), msgId, true);
_lease.setNumSuccess(_lease.getNumSuccess()+1);
SendSuccessJob.this._context.statManager().addRateData("client.sendMessageSize", _status.getMessage().getPayload().getSize(), sendTime);
SendSuccessJob.this._context.statManager().addRateData("client.sendAttemptAverage", _status.getNumSent(), sendTime);
}
public void setMessage(I2NPMessage msg) {}
}
/**
@ -577,18 +587,19 @@ public class OutboundClientMessageJob extends JobImpl {
*
*/
private class SendTimeoutJob extends JobImpl {
private Lease _lease;
public SendTimeoutJob(Lease lease) {
_lease = lease;
}
public String getName() { return "Send client message timed out through a lease"; }
public void runJob() {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Soft timeout through the lease " + _lease);
_lease.setNumFailure(_lease.getNumFailure()+1);
sendNext();
}
private Lease _lease;
public SendTimeoutJob(Lease lease) {
super(OutboundClientMessageJob.this._context);
_lease = lease;
}
public String getName() { return "Send client message timed out through a lease"; }
public void runJob() {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Soft timeout through the lease " + _lease);
_lease.setNumFailure(_lease.getNumFailure()+1);
sendNext();
}
}
}

View File

@ -1,9 +1,9 @@
package net.i2p.router.message;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
@ -30,17 +30,16 @@ import net.i2p.router.TunnelSelectionCriteria;
import net.i2p.util.Clock;
import net.i2p.util.Log;
import net.i2p.util.RandomSource;
import net.i2p.router.RouterContext;
/**
* Handle a particular client message that is destined for a remote destination.
*
*/
class OutboundClientMessageJobHelper {
private static Log _log = new Log(OutboundClientMessageJobHelper.class);
/**
* Build a garlic message that will be delivered to the router on which the target is located.
* Inside the message are two cloves: one containing the payload with instructions for
* Inside the message are two cloves: one containing the payload with instructions for
* delivery to the (now local) destination, and the other containing a DeliveryStatusMessage with
* instructions for delivery to an inbound tunnel of this router.
*
@ -52,128 +51,130 @@ class OutboundClientMessageJobHelper {
* For now, its just a tunneled DeliveryStatusMessage
*
*/
static GarlicMessage createGarlicMessage(long replyToken, long expiration, PublicKey recipientPK, Payload data, Destination dest, SessionKey wrappedKey, Set wrappedTags, boolean requireAck) {
PayloadGarlicConfig dataClove = buildDataClove(data, dest, expiration);
return createGarlicMessage(replyToken, expiration, recipientPK, dataClove, dest, wrappedKey, wrappedTags, requireAck);
static GarlicMessage createGarlicMessage(RouterContext ctx, long replyToken, long expiration, PublicKey recipientPK, Payload data, Destination dest, SessionKey wrappedKey, Set wrappedTags, boolean requireAck) {
PayloadGarlicConfig dataClove = buildDataClove(ctx, data, dest, expiration);
return createGarlicMessage(ctx, replyToken, expiration, recipientPK, dataClove, dest, wrappedKey, wrappedTags, requireAck);
}
/**
* Allow the app to specify the data clove directly, which enables OutboundClientMessage to resend the
* same payload (including expiration and unique id) in different garlics (down different tunnels)
*
*/
static GarlicMessage createGarlicMessage(long replyToken, long expiration, PublicKey recipientPK, PayloadGarlicConfig dataClove, Destination dest, SessionKey wrappedKey, Set wrappedTags, boolean requireAck) {
GarlicConfig config = createGarlicConfig(replyToken, expiration, recipientPK, dataClove, dest, requireAck);
GarlicMessage msg = GarlicMessageBuilder.buildMessage(config, wrappedKey, wrappedTags);
return msg;
static GarlicMessage createGarlicMessage(RouterContext ctx, long replyToken, long expiration, PublicKey recipientPK, PayloadGarlicConfig dataClove, Destination dest, SessionKey wrappedKey, Set wrappedTags, boolean requireAck) {
GarlicConfig config = createGarlicConfig(ctx, replyToken, expiration, recipientPK, dataClove, dest, requireAck);
GarlicMessage msg = GarlicMessageBuilder.buildMessage(ctx, config, wrappedKey, wrappedTags);
return msg;
}
private static GarlicConfig createGarlicConfig(long replyToken, long expiration, PublicKey recipientPK, PayloadGarlicConfig dataClove, Destination dest, boolean requireAck) {
_log.debug("Reply token: " + replyToken);
GarlicConfig config = new GarlicConfig();
config.addClove(dataClove);
if (requireAck) {
PayloadGarlicConfig ackClove = buildAckClove(replyToken, expiration);
config.addClove(ackClove);
}
DeliveryInstructions instructions = new DeliveryInstructions();
instructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_LOCAL);
instructions.setDelayRequested(false);
instructions.setDelaySeconds(0);
instructions.setEncrypted(false);
instructions.setEncryptionKey(null);
instructions.setRouter(null);
instructions.setTunnelId(null);
config.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null));
config.setDeliveryInstructions(instructions);
config.setId(RandomSource.getInstance().nextInt(Integer.MAX_VALUE));
config.setExpiration(expiration+2*Router.CLOCK_FUDGE_FACTOR);
config.setRecipientPublicKey(recipientPK);
config.setRequestAck(false);
_log.info("Creating garlic config to be encrypted to " + recipientPK + " for destination " + dest.calculateHash().toBase64());
return config;
private static GarlicConfig createGarlicConfig(RouterContext ctx, long replyToken, long expiration, PublicKey recipientPK, PayloadGarlicConfig dataClove, Destination dest, boolean requireAck) {
Log log = ctx.logManager().getLog(OutboundClientMessageJobHelper.class);
log.debug("Reply token: " + replyToken);
GarlicConfig config = new GarlicConfig();
config.addClove(dataClove);
if (requireAck) {
PayloadGarlicConfig ackClove = buildAckClove(ctx, replyToken, expiration);
config.addClove(ackClove);
}
DeliveryInstructions instructions = new DeliveryInstructions();
instructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_LOCAL);
instructions.setDelayRequested(false);
instructions.setDelaySeconds(0);
instructions.setEncrypted(false);
instructions.setEncryptionKey(null);
instructions.setRouter(null);
instructions.setTunnelId(null);
config.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null));
config.setDeliveryInstructions(instructions);
config.setId(ctx.random().nextInt(Integer.MAX_VALUE));
config.setExpiration(expiration+2*Router.CLOCK_FUDGE_FACTOR);
config.setRecipientPublicKey(recipientPK);
config.setRequestAck(false);
log.info("Creating garlic config to be encrypted to " + recipientPK + " for destination " + dest.calculateHash().toBase64());
return config;
}
/**
* Build a clove that sends a DeliveryStatusMessage to us
*/
private static PayloadGarlicConfig buildAckClove(long replyToken, long expiration) {
PayloadGarlicConfig ackClove = new PayloadGarlicConfig();
Hash replyToTunnelRouter = null; // inbound tunnel gateway
TunnelId replyToTunnelId = null; // tunnel id on that gateway
TunnelSelectionCriteria criteria = new TunnelSelectionCriteria();
criteria.setMaximumTunnelsRequired(1);
criteria.setMinimumTunnelsRequired(1);
criteria.setReliabilityPriority(50); // arbitrary. fixme
criteria.setAnonymityPriority(50); // arbitrary. fixme
criteria.setLatencyPriority(50); // arbitrary. fixme
List tunnelIds = TunnelManagerFacade.getInstance().selectInboundTunnelIds(criteria);
if (tunnelIds.size() <= 0) {
_log.error("No inbound tunnels to receive an ack through!?");
return null;
}
replyToTunnelId = (TunnelId)tunnelIds.get(0);
TunnelInfo info = TunnelManagerFacade.getInstance().getTunnelInfo(replyToTunnelId);
replyToTunnelRouter = info.getThisHop(); // info is the chain, and the first hop is the gateway
_log.debug("Ack for the data message will come back along tunnel " + replyToTunnelId + ":\n" + info);
DeliveryInstructions ackInstructions = new DeliveryInstructions();
ackInstructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_TUNNEL);
ackInstructions.setRouter(replyToTunnelRouter);
ackInstructions.setTunnelId(replyToTunnelId);
ackInstructions.setDelayRequested(false);
ackInstructions.setDelaySeconds(0);
ackInstructions.setEncrypted(false);
DeliveryStatusMessage msg = new DeliveryStatusMessage();
msg.setArrival(new Date(Clock.getInstance().now()));
msg.setMessageId(replyToken);
_log.debug("Delivery status message key: " + replyToken + " arrival: " + msg.getArrival());
ackClove.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null));
ackClove.setDeliveryInstructions(ackInstructions);
ackClove.setExpiration(expiration);
ackClove.setId(RandomSource.getInstance().nextInt(Integer.MAX_VALUE));
ackClove.setPayload(msg);
ackClove.setRecipient(Router.getInstance().getRouterInfo());
ackClove.setRequestAck(false);
_log.debug("Delivery status message is targetting us [" + ackClove.getRecipient().getIdentity().getHash().toBase64() + "] via tunnel " + replyToTunnelId.getTunnelId() + " on " + replyToTunnelRouter.toBase64());
return ackClove;
}
private static PayloadGarlicConfig buildAckClove(RouterContext ctx, long replyToken, long expiration) {
Log log = ctx.logManager().getLog(OutboundClientMessageJobHelper.class);
PayloadGarlicConfig ackClove = new PayloadGarlicConfig();
Hash replyToTunnelRouter = null; // inbound tunnel gateway
TunnelId replyToTunnelId = null; // tunnel id on that gateway
TunnelSelectionCriteria criteria = new TunnelSelectionCriteria();
criteria.setMaximumTunnelsRequired(1);
criteria.setMinimumTunnelsRequired(1);
criteria.setReliabilityPriority(50); // arbitrary. fixme
criteria.setAnonymityPriority(50); // arbitrary. fixme
criteria.setLatencyPriority(50); // arbitrary. fixme
List tunnelIds = ctx.tunnelManager().selectInboundTunnelIds(criteria);
if (tunnelIds.size() <= 0) {
log.error("No inbound tunnels to receive an ack through!?");
return null;
}
replyToTunnelId = (TunnelId)tunnelIds.get(0);
TunnelInfo info = ctx.tunnelManager().getTunnelInfo(replyToTunnelId);
replyToTunnelRouter = info.getThisHop(); // info is the chain, and the first hop is the gateway
log.debug("Ack for the data message will come back along tunnel " + replyToTunnelId + ":\n" + info);
DeliveryInstructions ackInstructions = new DeliveryInstructions();
ackInstructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_TUNNEL);
ackInstructions.setRouter(replyToTunnelRouter);
ackInstructions.setTunnelId(replyToTunnelId);
ackInstructions.setDelayRequested(false);
ackInstructions.setDelaySeconds(0);
ackInstructions.setEncrypted(false);
DeliveryStatusMessage msg = new DeliveryStatusMessage(ctx);
msg.setArrival(new Date(ctx.clock().now()));
msg.setMessageId(replyToken);
log.debug("Delivery status message key: " + replyToken + " arrival: " + msg.getArrival());
ackClove.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null));
ackClove.setDeliveryInstructions(ackInstructions);
ackClove.setExpiration(expiration);
ackClove.setId(ctx.random().nextInt(Integer.MAX_VALUE));
ackClove.setPayload(msg);
ackClove.setRecipient(ctx.router().getRouterInfo());
ackClove.setRequestAck(false);
log.debug("Delivery status message is targetting us [" + ackClove.getRecipient().getIdentity().getHash().toBase64() + "] via tunnel " + replyToTunnelId.getTunnelId() + " on " + replyToTunnelRouter.toBase64());
return ackClove;
}
/**
* Build a clove that sends the payload to the destination
*/
static PayloadGarlicConfig buildDataClove(Payload data, Destination dest, long expiration) {
PayloadGarlicConfig clove = new PayloadGarlicConfig();
DeliveryInstructions instructions = new DeliveryInstructions();
instructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_DESTINATION);
instructions.setDestination(dest.calculateHash());
instructions.setDelayRequested(false);
instructions.setDelaySeconds(0);
instructions.setEncrypted(false);
clove.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null));
clove.setDeliveryInstructions(instructions);
clove.setExpiration(expiration);
clove.setId(RandomSource.getInstance().nextInt(Integer.MAX_VALUE));
DataMessage msg = new DataMessage();
msg.setData(data.getEncryptedData());
clove.setPayload(msg);
clove.setRecipientPublicKey(null);
clove.setRequestAck(false);
return clove;
static PayloadGarlicConfig buildDataClove(RouterContext ctx, Payload data, Destination dest, long expiration) {
PayloadGarlicConfig clove = new PayloadGarlicConfig();
DeliveryInstructions instructions = new DeliveryInstructions();
instructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_DESTINATION);
instructions.setDestination(dest.calculateHash());
instructions.setDelayRequested(false);
instructions.setDelaySeconds(0);
instructions.setEncrypted(false);
clove.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null));
clove.setDeliveryInstructions(instructions);
clove.setExpiration(expiration);
clove.setId(ctx.random().nextInt(Integer.MAX_VALUE));
DataMessage msg = new DataMessage(ctx);
msg.setData(data.getEncryptedData());
clove.setPayload(msg);
clove.setRecipientPublicKey(null);
clove.setRequestAck(false);
return clove;
}
}

View File

@ -1,9 +1,9 @@
package net.i2p.router.message;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
@ -23,13 +23,14 @@ import net.i2p.router.ReplyJob;
import net.i2p.router.Router;
import net.i2p.util.Clock;
import net.i2p.util.Log;
import net.i2p.router.RouterContext;
/**
* Build a garlic message from config, encrypt it, and enqueue it for delivery.
*
*/
public class SendGarlicJob extends JobImpl {
private final static Log _log = new Log(SendGarlicJob.class);
private Log _log;
//private RouterInfo _target;
private GarlicConfig _config;
private Job _onSend;
@ -42,7 +43,7 @@ public class SendGarlicJob extends JobImpl {
private GarlicMessage _message;
private SessionKey _wrappedKey;
private Set _wrappedTags;
/**
*
* @param config ???
@ -54,69 +55,73 @@ public class SendGarlicJob extends JobImpl {
* @param priority how high priority to send this test
* @param replySelector ???
*/
public SendGarlicJob(GarlicConfig config, Job onSend, Job onSendFailed, ReplyJob onReply, Job onReplyFailed, long timeoutMs, int priority, MessageSelector replySelector) {
this(config, onSend, onSendFailed, onReply, onReplyFailed, timeoutMs, priority, replySelector, new SessionKey(), new HashSet());
public SendGarlicJob(RouterContext ctx, GarlicConfig config, Job onSend, Job onSendFailed, ReplyJob onReply, Job onReplyFailed, long timeoutMs, int priority, MessageSelector replySelector) {
this(ctx, config, onSend, onSendFailed, onReply, onReplyFailed, timeoutMs, priority, replySelector, new SessionKey(), new HashSet());
}
public SendGarlicJob(GarlicConfig config, Job onSend, Job onSendFailed, ReplyJob onReply, Job onReplyFailed, long timeoutMs, int priority, MessageSelector replySelector, SessionKey wrappedKey, Set wrappedTags) {
super();
if (config == null) throw new IllegalArgumentException("No config specified");
if (config.getRecipient() == null) throw new IllegalArgumentException("No recipient in the config");
//_target = target;
_config = config;
_onSend = onSend;
_onSendFailed = onSendFailed;
_onReply = onReply;
_onReplyFailed = onReplyFailed;
_timeoutMs = timeoutMs;
_priority = priority;
_replySelector = replySelector;
_message = null;
_wrappedKey = wrappedKey;
_wrappedTags = wrappedTags;
public SendGarlicJob(RouterContext ctx, GarlicConfig config, Job onSend, Job onSendFailed, ReplyJob onReply, Job onReplyFailed, long timeoutMs, int priority, MessageSelector replySelector, SessionKey wrappedKey, Set wrappedTags) {
super(ctx);
_log = ctx.logManager().getLog(SendGarlicJob.class);
if (config == null) throw new IllegalArgumentException("No config specified");
if (config.getRecipient() == null) throw new IllegalArgumentException("No recipient in the config");
//_target = target;
_config = config;
_onSend = onSend;
_onSendFailed = onSendFailed;
_onReply = onReply;
_onReplyFailed = onReplyFailed;
_timeoutMs = timeoutMs;
_priority = priority;
_replySelector = replySelector;
_message = null;
_wrappedKey = wrappedKey;
_wrappedTags = wrappedTags;
}
public String getName() { return "Build Garlic Message"; }
public void runJob() {
long before = Clock.getInstance().now();
_message = GarlicMessageBuilder.buildMessage(_config, _wrappedKey, _wrappedTags);
long after = Clock.getInstance().now();
if ( (after - before) > 1000) {
_log.warn("Building the garlic took too long [" + (after-before)+" ms]", getAddedBy());
} else {
_log.debug("Building the garlic was fast! " + (after - before) + " ms");
}
JobQueue.getInstance().addJob(new SendJob());
long before = _context.clock().now();
_message = GarlicMessageBuilder.buildMessage(_context, _config, _wrappedKey, _wrappedTags);
long after = _context.clock().now();
if ( (after - before) > 1000) {
_log.warn("Building the garlic took too long [" + (after-before)+" ms]", getAddedBy());
} else {
_log.debug("Building the garlic was fast! " + (after - before) + " ms");
}
_context.jobQueue().addJob(new SendJob());
}
private class SendJob extends JobImpl {
public String getName() { return "Send Built Garlic Message"; }
public void runJob() {
if (_config.getRecipient() != null)
_log.info("sending garlic to recipient " + _config.getRecipient().getIdentity().getHash().toBase64());
else
_log.info("sending garlic to public key " + _config.getRecipientPublicKey());
sendGarlic();
}
public SendJob() {
super(SendGarlicJob.this._context);
}
public String getName() { return "Send Built Garlic Message"; }
public void runJob() {
if (_config.getRecipient() != null)
_log.info("sending garlic to recipient " + _config.getRecipient().getIdentity().getHash().toBase64());
else
_log.info("sending garlic to public key " + _config.getRecipientPublicKey());
sendGarlic();
}
}
private void sendGarlic() {
OutNetMessage msg = new OutNetMessage();
long when = _message.getMessageExpiration().getTime() + Router.CLOCK_FUDGE_FACTOR;
msg.setExpiration(when);
msg.setMessage(_message);
msg.setOnFailedReplyJob(_onReplyFailed);
msg.setOnFailedSendJob(_onSendFailed);
msg.setOnReplyJob(_onReply);
msg.setOnSendJob(_onSend);
msg.setPriority(_priority);
msg.setReplySelector(_replySelector);
msg.setTarget(_config.getRecipient());
//_log.info("Sending garlic message to [" + _config.getRecipient() + "] encrypted with " + _config.getRecipientPublicKey() + " or " + _config.getRecipient().getIdentity().getPublicKey());
//_log.debug("Garlic config data:\n" + _config);
//msg.setTarget(_target);
OutNetMessagePool.getInstance().add(msg);
_log.debug("Garlic message added to outbound network message pool");
OutNetMessage msg = new OutNetMessage(_context);
long when = _message.getMessageExpiration().getTime() + Router.CLOCK_FUDGE_FACTOR;
msg.setExpiration(when);
msg.setMessage(_message);
msg.setOnFailedReplyJob(_onReplyFailed);
msg.setOnFailedSendJob(_onSendFailed);
msg.setOnReplyJob(_onReply);
msg.setOnSendJob(_onSend);
msg.setPriority(_priority);
msg.setReplySelector(_replySelector);
msg.setTarget(_config.getRecipient());
//_log.info("Sending garlic message to [" + _config.getRecipient() + "] encrypted with " + _config.getRecipientPublicKey() + " or " + _config.getRecipient().getIdentity().getPublicKey());
//_log.debug("Garlic config data:\n" + _config);
//msg.setTarget(_target);
_context.outNetMessagePool().add(msg);
_log.debug("Garlic message added to outbound network message pool");
}
}

View File

@ -1,9 +1,9 @@
package net.i2p.router.message;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
@ -16,6 +16,7 @@ import net.i2p.data.i2np.SourceRouteBlock;
import net.i2p.router.JobImpl;
import net.i2p.router.JobQueue;
import net.i2p.util.Clock;
import net.i2p.router.RouterContext;
/**
* Send a DeliveryStatusMessage to the location specified in the source route block
@ -30,29 +31,29 @@ public class SendMessageAckJob extends JobImpl {
public final static int ACK_PRIORITY = 100;
public SendMessageAckJob(SourceRouteBlock block, long ackId) {
super();
_block = block;
_ackId = ackId;
public SendMessageAckJob(RouterContext ctx, SourceRouteBlock block, long ackId) {
super(ctx);
_block = block;
_ackId = ackId;
}
public void runJob() {
JobQueue.getInstance().addJob(new SendReplyMessageJob(_block, createAckMessage(), ACK_PRIORITY));
_context.jobQueue().addJob(new SendReplyMessageJob(_context, _block, createAckMessage(), ACK_PRIORITY));
}
/**
* Create whatever should be delivered to the intermediary hop so that
* a DeliveryStatusMessage gets to the intended recipient.
* Create whatever should be delivered to the intermediary hop so that
* a DeliveryStatusMessage gets to the intended recipient.
*
* Currently this doesn't garlic encrypt the DeliveryStatusMessage with
* the block's tag and sessionKey, but it could.
*
*/
protected I2NPMessage createAckMessage() {
DeliveryStatusMessage statusMessage = new DeliveryStatusMessage();
statusMessage.setArrival(new Date(Clock.getInstance().now()));
statusMessage.setMessageId(_ackId);
return statusMessage;
DeliveryStatusMessage statusMessage = new DeliveryStatusMessage(_context);
statusMessage.setArrival(new Date(_context.clock().now()));
statusMessage.setMessageId(_ackId);
return statusMessage;
}
public String getName() { return "Send Message Ack"; }

View File

@ -27,9 +27,10 @@ import net.i2p.router.Router;
import net.i2p.router.transport.OutboundMessageRegistry;
import net.i2p.util.Clock;
import net.i2p.util.Log;
import net.i2p.router.RouterContext;
public class SendMessageDirectJob extends JobImpl {
private final static Log _log = new Log(SendMessageDirectJob.class);
private Log _log;
private I2NPMessage _message;
private Hash _targetHash;
private RouterInfo _router;
@ -44,114 +45,135 @@ public class SendMessageDirectJob extends JobImpl {
private final static long DEFAULT_TIMEOUT = 60*1000;
public SendMessageDirectJob(I2NPMessage message, Hash toPeer, long expiration, int priority) {
this(message, toPeer, null, null, null, null, expiration, priority);
public SendMessageDirectJob(RouterContext ctx, I2NPMessage message, Hash toPeer, long expiration, int priority) {
this(ctx, message, toPeer, null, null, null, null, expiration, priority);
}
public SendMessageDirectJob(I2NPMessage message, Hash toPeer, int priority) {
this(message, toPeer, DEFAULT_TIMEOUT+Clock.getInstance().now(), priority);
public SendMessageDirectJob(RouterContext ctx, I2NPMessage message, Hash toPeer, int priority) {
this(ctx, message, toPeer, DEFAULT_TIMEOUT+ctx.clock().now(), priority);
}
public SendMessageDirectJob(I2NPMessage message, Hash toPeer, ReplyJob onSuccess, Job onFail, MessageSelector selector, long expiration, int priority) {
this(message, toPeer, null, onSuccess, onFail, selector, expiration, priority);
public SendMessageDirectJob(RouterContext ctx, I2NPMessage message, Hash toPeer, ReplyJob onSuccess, Job onFail, MessageSelector selector, long expiration, int priority) {
this(ctx, message, toPeer, null, onSuccess, onFail, selector, expiration, priority);
}
public SendMessageDirectJob(I2NPMessage message, Hash toPeer, Job onSend, ReplyJob onSuccess, Job onFail, MessageSelector selector, long expiration, int priority) {
super();
_message = message;
_targetHash = toPeer;
_router = null;
_expiration = expiration;
_priority = priority;
_alreadySearched = false;
_onSend = onSend;
_onSuccess = onSuccess;
_onFail = onFail;
_selector = selector;
if (message == null)
throw new IllegalArgumentException("Attempt to send a null message");
if (_targetHash == null)
throw new IllegalArgumentException("Attempt to send a message to a null peer");
_sent = false;
long remaining = expiration - Clock.getInstance().now();
if (remaining < 50*1000) {
_log.info("Sending message to expire in " + remaining + "ms containing " + message.getUniqueId() + " (a " + message.getClass().getName() + ")", new Exception("SendDirect from"));
}
public SendMessageDirectJob(RouterContext ctx, I2NPMessage message, Hash toPeer, Job onSend, ReplyJob onSuccess, Job onFail, MessageSelector selector, long expiration, int priority) {
super(ctx);
_log = _context.logManager().getLog(SendMessageDirectJob.class);
_message = message;
_targetHash = toPeer;
_router = null;
_expiration = expiration;
_priority = priority;
_alreadySearched = false;
_onSend = onSend;
_onSuccess = onSuccess;
_onFail = onFail;
_selector = selector;
if (message == null)
throw new IllegalArgumentException("Attempt to send a null message");
if (_targetHash == null)
throw new IllegalArgumentException("Attempt to send a message to a null peer");
_sent = false;
long remaining = expiration - _context.clock().now();
if (remaining < 50*1000) {
_log.info("Sending message to expire in " + remaining + "ms containing " + message.getUniqueId() + " (a " + message.getClass().getName() + ")", new Exception("SendDirect from"));
}
}
public String getName() { return "Send Message Direct"; }
public void runJob() {
long now = Clock.getInstance().now();
if (_expiration == 0)
_expiration = now + DEFAULT_TIMEOUT;
if (_expiration - 30*1000 < now) {
_log.info("Soon to expire sendDirect of " + _message.getClass().getName() + " [expiring in " + (_expiration-now) + "]", getAddedBy());
}
if (_expiration < now) {
_log.warn("Timed out sending message " + _message + " directly (expiration = " + new Date(_expiration) + ") to " + _targetHash.toBase64(), getAddedBy());
return;
}
if (_router != null) {
_log.debug("Router specified, sending");
send();
} else {
_router = NetworkDatabaseFacade.getInstance().lookupRouterInfoLocally(_targetHash);
if (_router != null) {
_log.debug("Router not specified but lookup found it");
send();
} else {
if (!_alreadySearched) {
_log.debug("Router not specified, so we're looking for it...");
NetworkDatabaseFacade.getInstance().lookupRouterInfo(_targetHash, this, this, _expiration - Clock.getInstance().now());
_alreadySearched = true;
} else {
_log.error("Unable to find the router to send to: " + _targetHash + " message: " + _message, getAddedBy());
}
}
}
long now = _context.clock().now();
if (_expiration == 0)
_expiration = now + DEFAULT_TIMEOUT;
if (_expiration - 30*1000 < now) {
_log.info("Soon to expire sendDirect of " + _message.getClass().getName()
+ " [expiring in " + (_expiration-now) + "]", getAddedBy());
}
if (_expiration < now) {
if (_log.shouldLog(Log.WARN))
_log.warn("Timed out sending message " + _message + " directly (expiration = "
+ new Date(_expiration) + ") to " + _targetHash.toBase64(), getAddedBy());
return;
}
if (_router != null) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Router specified, sending");
send();
} else {
_router = _context.netDb().lookupRouterInfoLocally(_targetHash);
if (_router != null) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Router not specified but lookup found it");
send();
} else {
if (!_alreadySearched) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Router not specified, so we're looking for it...");
_context.netDb().lookupRouterInfo(_targetHash, this, this,
_expiration - _context.clock().now());
_alreadySearched = true;
} else {
if (_log.shouldLog(Log.ERROR))
_log.error("Unable to find the router to send to: " + _targetHash
+ " message: " + _message, getAddedBy());
}
}
}
}
private void send() {
if (_sent) { _log.warn("Not resending!", new Exception("blah")); return; }
_sent = true;
if (Router.getInstance().getRouterInfo().getIdentity().getHash().equals(_router.getIdentity().getHash())) {
if (_selector != null) {
OutNetMessage outM = new OutNetMessage();
outM.setExpiration(_expiration);
outM.setMessage(_message);
outM.setOnFailedReplyJob(_onFail);
outM.setOnFailedSendJob(_onFail);
outM.setOnReplyJob(_onSuccess);
outM.setOnSendJob(_onSend);
outM.setPriority(_priority);
outM.setReplySelector(_selector);
outM.setTarget(_router);
OutboundMessageRegistry.getInstance().registerPending(outM);
}
if (_onSend != null)
JobQueue.getInstance().addJob(_onSend);
InNetMessage msg = new InNetMessage();
msg.setFromRouter(_router.getIdentity());
msg.setMessage(_message);
InNetMessagePool.getInstance().add(msg);
_log.debug("Adding " + _message.getClass().getName() + " to inbound message pool as it was destined for ourselves");
//_log.debug("debug", _createdBy);
} else {
OutNetMessage msg = new OutNetMessage();
msg.setExpiration(_expiration);
msg.setMessage(_message);
msg.setOnFailedReplyJob(_onFail);
msg.setOnFailedSendJob(_onFail);
msg.setOnReplyJob(_onSuccess);
msg.setOnSendJob(_onSend);
msg.setPriority(_priority);
msg.setReplySelector(_selector);
msg.setTarget(_router);
OutNetMessagePool.getInstance().add(msg);
_log.debug("Adding " + _message.getClass().getName() + " to outbound message pool targeting " + _router.getIdentity().getHash().toBase64());
//_log.debug("Message pooled: " + _message);
}
if (_sent) {
if (_log.shouldLog(Log.WARN))
_log.warn("Not resending!", new Exception("blah"));
return;
}
_sent = true;
Hash to = _router.getIdentity().getHash();
Hash us = _context.router().getRouterInfo().getIdentity().getHash();
if (us.equals(to)) {
if (_selector != null) {
OutNetMessage outM = new OutNetMessage(_context);
outM.setExpiration(_expiration);
outM.setMessage(_message);
outM.setOnFailedReplyJob(_onFail);
outM.setOnFailedSendJob(_onFail);
outM.setOnReplyJob(_onSuccess);
outM.setOnSendJob(_onSend);
outM.setPriority(_priority);
outM.setReplySelector(_selector);
outM.setTarget(_router);
_context.messageRegistry().registerPending(outM);
}
if (_onSend != null)
_context.jobQueue().addJob(_onSend);
InNetMessage msg = new InNetMessage();
msg.setFromRouter(_router.getIdentity());
msg.setMessage(_message);
_context.inNetMessagePool().add(msg);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Adding " + _message.getClass().getName()
+ " to inbound message pool as it was destined for ourselves");
//_log.debug("debug", _createdBy);
} else {
OutNetMessage msg = new OutNetMessage(_context);
msg.setExpiration(_expiration);
msg.setMessage(_message);
msg.setOnFailedReplyJob(_onFail);
msg.setOnFailedSendJob(_onFail);
msg.setOnReplyJob(_onSuccess);
msg.setOnSendJob(_onSend);
msg.setPriority(_priority);
msg.setReplySelector(_selector);
msg.setTarget(_router);
_context.outNetMessagePool().add(msg);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Adding " + _message.getClass().getName()
+ " to outbound message pool targeting "
+ _router.getIdentity().getHash().toBase64());
//_log.debug("Message pooled: " + _message);
}
}
}

View File

@ -1,9 +1,9 @@
package net.i2p.router.message;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
@ -14,34 +14,36 @@ import net.i2p.data.i2np.SourceRouteReplyMessage;
import net.i2p.router.JobImpl;
import net.i2p.router.JobQueue;
import net.i2p.util.Log;
import net.i2p.router.RouterContext;
/**
* Send a SourceRouteReplyMessage to the location specified in the source route block.
* This uses the simplest technique (don't garlic, and send direct to where the
* SourceRouteBlock requested), but it could instead garlic it and send it via a
* Send a SourceRouteReplyMessage to the location specified in the source route block.
* This uses the simplest technique (don't garlic, and send direct to where the
* SourceRouteBlock requested), but it could instead garlic it and send it via a
* tunnel or garlic route it additionally)
*
*/
public class SendReplyMessageJob extends JobImpl {
private final static Log _log = new Log(SendReplyMessageJob.class);
private Log _log;
private SourceRouteBlock _block;
private I2NPMessage _message;
private int _priority;
public SendReplyMessageJob(SourceRouteBlock block, I2NPMessage message, int priority) {
super();
_block = block;
_message = message;
_priority = priority;
public SendReplyMessageJob(RouterContext context, SourceRouteBlock block, I2NPMessage message, int priority) {
super(context);
_log = context.logManager().getLog(SendReplyMessageJob.class);
_block = block;
_message = message;
_priority = priority;
}
public void runJob() {
SourceRouteReplyMessage msg = new SourceRouteReplyMessage();
msg.setMessage(_message);
msg.setEncryptedHeader(_block.getData());
msg.setMessageExpiration(_message.getMessageExpiration());
send(msg);
SourceRouteReplyMessage msg = new SourceRouteReplyMessage(_context);
msg.setMessage(_message);
msg.setEncryptedHeader(_block.getData());
msg.setMessageExpiration(_message.getMessageExpiration());
send(msg);
}
/**
@ -54,9 +56,9 @@ public class SendReplyMessageJob extends JobImpl {
*
*/
protected void send(I2NPMessage msg) {
_log.info("Sending reply with " + _message.getClass().getName() + " in a sourceRouteeplyMessage to " + _block.getRouter().toBase64());
SendMessageDirectJob j = new SendMessageDirectJob(msg, _block.getRouter(), _priority);
JobQueue.getInstance().addJob(j);
_log.info("Sending reply with " + _message.getClass().getName() + " in a sourceRouteeplyMessage to " + _block.getRouter().toBase64());
SendMessageDirectJob j = new SendMessageDirectJob(_context, msg, _block.getRouter(), _priority);
_context.jobQueue().addJob(j);
}
public String getName() { return "Send Reply Message"; }

View File

@ -1,9 +1,9 @@
package net.i2p.router.message;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
@ -46,13 +46,14 @@ import net.i2p.router.TunnelManagerFacade;
import net.i2p.router.transport.OutboundMessageRegistry;
import net.i2p.util.Clock;
import net.i2p.util.Log;
import net.i2p.router.RouterContext;
/**
* Send a message down a tunnel that we are the gateway for
*
*/
public class SendTunnelMessageJob extends JobImpl {
private final static Log _log = new Log(SendTunnelMessageJob.class);
private Log _log;
private I2NPMessage _message;
private Hash _destRouter;
private TunnelId _tunnelId;
@ -65,360 +66,361 @@ public class SendTunnelMessageJob extends JobImpl {
private long _expiration;
private int _priority;
public SendTunnelMessageJob(I2NPMessage msg, TunnelId tunnelId, Job onSend, ReplyJob onReply, Job onFailure, MessageSelector selector, long timeoutMs, int priority) {
this(msg, tunnelId, null, null, onSend, onReply, onFailure, selector, timeoutMs, priority);
public SendTunnelMessageJob(RouterContext ctx, I2NPMessage msg, TunnelId tunnelId, Job onSend, ReplyJob onReply, Job onFailure, MessageSelector selector, long timeoutMs, int priority) {
this(ctx, msg, tunnelId, null, null, onSend, onReply, onFailure, selector, timeoutMs, priority);
}
public SendTunnelMessageJob(I2NPMessage msg, TunnelId tunnelId, Hash targetRouter, TunnelId targetTunnelId, Job onSend, ReplyJob onReply, Job onFailure, MessageSelector selector, long timeoutMs, int priority) {
super();
if (msg == null)
throw new IllegalArgumentException("wtf, null message? sod off");
_message = msg;
_destRouter = targetRouter;
_tunnelId = tunnelId;
_targetTunnelId = targetTunnelId;
_onSend = onSend;
_onReply = onReply;
_onFailure = onFailure;
_selector = selector;
_timeout = timeoutMs;
_priority = priority;
if (timeoutMs < 50*1000) {
_log.info("Sending tunnel message to expire in " + timeoutMs + "ms containing " + msg.getUniqueId() + " (a " + msg.getClass().getName() + ")", new Exception("SendTunnel from"));
}
//_log.info("Send tunnel message " + msg.getClass().getName() + " to " + _destRouter + " over " + _tunnelId + " targetting tunnel " + _targetTunnelId, new Exception("SendTunnel from"));
_expiration = Clock.getInstance().now() + timeoutMs;
public SendTunnelMessageJob(RouterContext ctx, I2NPMessage msg, TunnelId tunnelId, Hash targetRouter, TunnelId targetTunnelId, Job onSend, ReplyJob onReply, Job onFailure, MessageSelector selector, long timeoutMs, int priority) {
super(ctx);
_log = ctx.logManager().getLog(SendTunnelMessageJob.class);
if (msg == null)
throw new IllegalArgumentException("wtf, null message? sod off");
_message = msg;
_destRouter = targetRouter;
_tunnelId = tunnelId;
_targetTunnelId = targetTunnelId;
_onSend = onSend;
_onReply = onReply;
_onFailure = onFailure;
_selector = selector;
_timeout = timeoutMs;
_priority = priority;
if (timeoutMs < 50*1000) {
_log.info("Sending tunnel message to expire in " + timeoutMs + "ms containing " + msg.getUniqueId() + " (a " + msg.getClass().getName() + ")", new Exception("SendTunnel from"));
}
//_log.info("Send tunnel message " + msg.getClass().getName() + " to " + _destRouter + " over " + _tunnelId + " targetting tunnel " + _targetTunnelId, new Exception("SendTunnel from"));
_expiration = _context.clock().now() + timeoutMs;
}
public void runJob() {
TunnelInfo info = TunnelManagerFacade.getInstance().getTunnelInfo(_tunnelId);
if (info == null) {
_log.debug("Message for unknown tunnel [" + _tunnelId + "] received, forward to " + _destRouter);
if ( (_tunnelId == null) || (_destRouter == null) ) {
_log.error("Someone br0ke us. where is this message supposed to go again?", getAddedBy());
return;
}
TunnelMessage msg = new TunnelMessage();
try {
ByteArrayOutputStream baos = new ByteArrayOutputStream(1024);
_message.writeBytes(baos);
msg.setData(baos.toByteArray());
msg.setTunnelId(_tunnelId);
msg.setMessageExpiration(new Date(_expiration));
JobQueue.getInstance().addJob(new SendMessageDirectJob(msg, _destRouter, _onSend, _onReply, _onFailure, _selector, _expiration, _priority));
String bodyType = _message.getClass().getName();
MessageHistory.getInstance().wrap(bodyType, _message.getUniqueId(), TunnelMessage.class.getName(), msg.getUniqueId());
} catch (IOException ioe) {
_log.error("Error writing out the tunnel message to send to the tunnel", ioe);
} catch (DataFormatException dfe) {
_log.error("Error writing out the tunnel message to send to the tunnel", dfe);
}
return;
}
if (isEndpoint(info)) {
_log.info("Tunnel message where we're both the gateway and the endpoint - honor instructions");
honorInstructions(info);
return;
} else if (isGateway(info)) {
handleAsGateway(info);
return;
} else {
handleAsParticipant(info);
return;
}
TunnelInfo info = _context.tunnelManager().getTunnelInfo(_tunnelId);
if (info == null) {
_log.debug("Message for unknown tunnel [" + _tunnelId + "] received, forward to " + _destRouter);
if ( (_tunnelId == null) || (_destRouter == null) ) {
_log.error("Someone br0ke us. where is this message supposed to go again?", getAddedBy());
return;
}
TunnelMessage msg = new TunnelMessage(_context);
try {
ByteArrayOutputStream baos = new ByteArrayOutputStream(1024);
_message.writeBytes(baos);
msg.setData(baos.toByteArray());
msg.setTunnelId(_tunnelId);
msg.setMessageExpiration(new Date(_expiration));
_context.jobQueue().addJob(new SendMessageDirectJob(_context, msg, _destRouter, _onSend, _onReply, _onFailure, _selector, _expiration, _priority));
String bodyType = _message.getClass().getName();
_context.messageHistory().wrap(bodyType, _message.getUniqueId(), TunnelMessage.class.getName(), msg.getUniqueId());
} catch (IOException ioe) {
_log.error("Error writing out the tunnel message to send to the tunnel", ioe);
} catch (DataFormatException dfe) {
_log.error("Error writing out the tunnel message to send to the tunnel", dfe);
}
return;
}
if (isEndpoint(info)) {
_log.info("Tunnel message where we're both the gateway and the endpoint - honor instructions");
honorInstructions(info);
return;
} else if (isGateway(info)) {
handleAsGateway(info);
return;
} else {
handleAsParticipant(info);
return;
}
}
private void handleAsGateway(TunnelInfo info) {
// since we are the gateway, we don't need to verify the data structures
TunnelInfo us = getUs(info);
if (us == null) {
_log.error("We are not participating in this /known/ tunnel - was the router reset?");
if (_onFailure != null)
JobQueue.getInstance().addJob(_onFailure);
} else {
// we're the gateway, so sign, encrypt, and forward to info.getNextHop()
TunnelMessage msg = prepareMessage(info);
if (msg == null) {
if (_log.shouldLog(Log.ERROR))
_log.error("wtf, unable to prepare a tunnel message to the next hop, when we're the gateway and hops remain? tunnel: " + info);
if (_onFailure != null)
JobQueue.getInstance().addJob(_onFailure);
return;
}
_log.debug("Tunnel message created: " + msg + " out of encrypted message: " + _message);
long now = Clock.getInstance().now();
if (_expiration < now + 15*1000) {
_log.warn("Adding a tunnel message that will expire shortly [" + new Date(_expiration) + "]", getAddedBy());
}
msg.setMessageExpiration(new Date(_expiration));
JobQueue.getInstance().addJob(new SendMessageDirectJob(msg, info.getNextHop(), _onSend, _onReply, _onFailure, _selector, _expiration, _priority));
}
// since we are the gateway, we don't need to verify the data structures
TunnelInfo us = getUs(info);
if (us == null) {
_log.error("We are not participating in this /known/ tunnel - was the router reset?");
if (_onFailure != null)
_context.jobQueue().addJob(_onFailure);
} else {
// we're the gateway, so sign, encrypt, and forward to info.getNextHop()
TunnelMessage msg = prepareMessage(info);
if (msg == null) {
if (_log.shouldLog(Log.ERROR))
_log.error("wtf, unable to prepare a tunnel message to the next hop, when we're the gateway and hops remain? tunnel: " + info);
if (_onFailure != null)
_context.jobQueue().addJob(_onFailure);
return;
}
_log.debug("Tunnel message created: " + msg + " out of encrypted message: " + _message);
long now = _context.clock().now();
if (_expiration < now + 15*1000) {
_log.warn("Adding a tunnel message that will expire shortly [" + new Date(_expiration) + "]", getAddedBy());
}
msg.setMessageExpiration(new Date(_expiration));
_context.jobQueue().addJob(new SendMessageDirectJob(_context, msg, info.getNextHop(), _onSend, _onReply, _onFailure, _selector, _expiration, _priority));
}
}
private void handleAsParticipant(TunnelInfo info) {
// SendTunnelMessageJob shouldn't be used for participants!
if (_log.shouldLog(Log.DEBUG))
_log.debug("SendTunnelMessageJob for a participant... ", getAddedBy());
if (!(_message instanceof TunnelMessage)) {
if (_log.shouldLog(Log.ERROR))
_log.error("Cannot inject non-tunnel messages as a participant!" + _message, getAddedBy());
if (_onFailure != null)
JobQueue.getInstance().addJob(_onFailure);
return;
}
TunnelMessage msg = (TunnelMessage)_message;
TunnelVerificationStructure struct = msg.getVerificationStructure();
if ( (info.getVerificationKey() == null) || (info.getVerificationKey().getKey() == null) ) {
if (_log.shouldLog(Log.ERROR))
_log.error("No verification key for the participant? tunnel: " + info, getAddedBy());
if (_onFailure != null)
JobQueue.getInstance().addJob(_onFailure);
return;
}
boolean ok = struct.verifySignature(info.getVerificationKey().getKey());
if (!ok) {
if (_log.shouldLog(Log.WARN))
_log.warn("Failed tunnel verification! Spoofing / tagging attack? " + _message, getAddedBy());
if (_onFailure != null)
JobQueue.getInstance().addJob(_onFailure);
return;
} else {
if (info.getNextHop() != null) {
if (_log.shouldLog(Log.INFO))
_log.info("Message for tunnel " + info.getTunnelId().getTunnelId() + " received where we're not the gateway and there are remaining hops, so forward it on to "
+ info.getNextHop().toBase64() + " via SendMessageDirectJob");
JobQueue.getInstance().addJob(new SendMessageDirectJob(msg, info.getNextHop(), _onSend, null, _onFailure, null, _message.getMessageExpiration().getTime(), _priority));
return;
} else {
if (_log.shouldLog(Log.ERROR))
_log.error("Should not be reached - participant, but no more hops?!");
if (_onFailure != null)
JobQueue.getInstance().addJob(_onFailure);
return;
}
}
// SendTunnelMessageJob shouldn't be used for participants!
if (_log.shouldLog(Log.DEBUG))
_log.debug("SendTunnelMessageJob for a participant... ", getAddedBy());
if (!(_message instanceof TunnelMessage)) {
if (_log.shouldLog(Log.ERROR))
_log.error("Cannot inject non-tunnel messages as a participant!" + _message, getAddedBy());
if (_onFailure != null)
_context.jobQueue().addJob(_onFailure);
return;
}
TunnelMessage msg = (TunnelMessage)_message;
TunnelVerificationStructure struct = msg.getVerificationStructure();
if ( (info.getVerificationKey() == null) || (info.getVerificationKey().getKey() == null) ) {
if (_log.shouldLog(Log.ERROR))
_log.error("No verification key for the participant? tunnel: " + info, getAddedBy());
if (_onFailure != null)
_context.jobQueue().addJob(_onFailure);
return;
}
boolean ok = struct.verifySignature(_context, info.getVerificationKey().getKey());
if (!ok) {
if (_log.shouldLog(Log.WARN))
_log.warn("Failed tunnel verification! Spoofing / tagging attack? " + _message, getAddedBy());
if (_onFailure != null)
_context.jobQueue().addJob(_onFailure);
return;
} else {
if (info.getNextHop() != null) {
if (_log.shouldLog(Log.INFO))
_log.info("Message for tunnel " + info.getTunnelId().getTunnelId() + " received where we're not the gateway and there are remaining hops, so forward it on to "
+ info.getNextHop().toBase64() + " via SendMessageDirectJob");
_context.jobQueue().addJob(new SendMessageDirectJob(_context, msg, info.getNextHop(), _onSend, null, _onFailure, null, _message.getMessageExpiration().getTime(), _priority));
return;
} else {
if (_log.shouldLog(Log.ERROR))
_log.error("Should not be reached - participant, but no more hops?!");
if (_onFailure != null)
_context.jobQueue().addJob(_onFailure);
return;
}
}
}
/** find our place in the tunnel */
private TunnelInfo getUs(TunnelInfo info) {
Hash us = Router.getInstance().getRouterInfo().getIdentity().getHash();
TunnelInfo lastUs = null;
while (info != null) {
if (us.equals(info.getThisHop()))
lastUs = info;
info = info.getNextHopInfo();
}
return lastUs;
Hash us = _context.routerHash();
TunnelInfo lastUs = null;
while (info != null) {
if (us.equals(info.getThisHop()))
lastUs = info;
info = info.getNextHopInfo();
}
return lastUs;
}
/** are we the endpoint for the tunnel? */
private boolean isEndpoint(TunnelInfo info) {
TunnelInfo us = getUs(info);
if (us == null) return false;
return (us.getNextHop() == null);
TunnelInfo us = getUs(info);
if (us == null) return false;
return (us.getNextHop() == null);
}
/** are we the gateway for the tunnel? */
private boolean isGateway(TunnelInfo info) {
TunnelInfo us = getUs(info);
if (us == null) return false;
return (us.getSigningKey() != null); // only the gateway can sign
TunnelInfo us = getUs(info);
if (us == null) return false;
return (us.getSigningKey() != null); // only the gateway can sign
}
private TunnelMessage prepareMessage(TunnelInfo info) {
TunnelMessage msg = new TunnelMessage();
SessionKey key = KeyGenerator.getInstance().generateSessionKey();
DeliveryInstructions instructions = new DeliveryInstructions();
instructions.setDelayRequested(false);
instructions.setEncrypted(true);
instructions.setEncryptionKey(key);
// if we aren't told where to send it, have it be processed locally at the endpoint
// but if we are, have the endpoint forward it appropriately.
// note that this algorithm does not currently support instructing the endpoint to send to a Destination
if (_destRouter != null) {
instructions.setRouter(_destRouter);
if (_targetTunnelId != null) {
_log.debug("Instructions target tunnel " + _targetTunnelId + " on router " + _destRouter.calculateHash());
instructions.setTunnelId(_targetTunnelId);
instructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_TUNNEL);
} else {
_log.debug("Instructions target router " + _destRouter.toBase64());
instructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_ROUTER);
}
} else {
if (_message instanceof DataMessage) {
_log.debug("Instructions are for local message delivery at the endpoint with a DataMessage to be sent to a Destination");
instructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_LOCAL);
} else {
_log.debug("Instructions are for local delivery at the endpoint targetting the now-local router");
instructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_LOCAL);
}
}
if (info == null) {
if (_log.shouldLog(Log.WARN))
_log.warn("Tunnel info is null to send message " + _message);
return null;
} else if ( (info.getEncryptionKey() == null) || (info.getEncryptionKey().getKey() == null) ) {
if (_log.shouldLog(Log.WARN))
_log.warn("Tunnel encryption key is null when we're the gateway?! info: " + info);
return null;
}
byte encryptedInstructions[] = encrypt(instructions, info.getEncryptionKey().getKey(), 512);
byte encryptedMessage[] = encrypt(_message, key, 1024);
TunnelVerificationStructure verification = createVerificationStructure(encryptedMessage, info);
String bodyType = _message.getClass().getName();
MessageHistory.getInstance().wrap(bodyType, _message.getUniqueId(), TunnelMessage.class.getName(), msg.getUniqueId());
_log.debug("Tunnel message prepared: instructions = " + instructions);
msg.setData(encryptedMessage);
msg.setEncryptedDeliveryInstructions(encryptedInstructions);
msg.setTunnelId(_tunnelId);
msg.setVerificationStructure(verification);
return msg;
TunnelMessage msg = new TunnelMessage(_context);
SessionKey key = _context.keyGenerator().generateSessionKey();
DeliveryInstructions instructions = new DeliveryInstructions();
instructions.setDelayRequested(false);
instructions.setEncrypted(true);
instructions.setEncryptionKey(key);
// if we aren't told where to send it, have it be processed locally at the endpoint
// but if we are, have the endpoint forward it appropriately.
// note that this algorithm does not currently support instructing the endpoint to send to a Destination
if (_destRouter != null) {
instructions.setRouter(_destRouter);
if (_targetTunnelId != null) {
_log.debug("Instructions target tunnel " + _targetTunnelId + " on router " + _destRouter.calculateHash());
instructions.setTunnelId(_targetTunnelId);
instructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_TUNNEL);
} else {
_log.debug("Instructions target router " + _destRouter.toBase64());
instructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_ROUTER);
}
} else {
if (_message instanceof DataMessage) {
_log.debug("Instructions are for local message delivery at the endpoint with a DataMessage to be sent to a Destination");
instructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_LOCAL);
} else {
_log.debug("Instructions are for local delivery at the endpoint targetting the now-local router");
instructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_LOCAL);
}
}
if (info == null) {
if (_log.shouldLog(Log.WARN))
_log.warn("Tunnel info is null to send message " + _message);
return null;
} else if ( (info.getEncryptionKey() == null) || (info.getEncryptionKey().getKey() == null) ) {
if (_log.shouldLog(Log.WARN))
_log.warn("Tunnel encryption key is null when we're the gateway?! info: " + info);
return null;
}
byte encryptedInstructions[] = encrypt(instructions, info.getEncryptionKey().getKey(), 512);
byte encryptedMessage[] = encrypt(_message, key, 1024);
TunnelVerificationStructure verification = createVerificationStructure(encryptedMessage, info);
String bodyType = _message.getClass().getName();
_context.messageHistory().wrap(bodyType, _message.getUniqueId(), TunnelMessage.class.getName(), msg.getUniqueId());
_log.debug("Tunnel message prepared: instructions = " + instructions);
msg.setData(encryptedMessage);
msg.setEncryptedDeliveryInstructions(encryptedInstructions);
msg.setTunnelId(_tunnelId);
msg.setVerificationStructure(verification);
return msg;
}
private TunnelVerificationStructure createVerificationStructure(byte encryptedMessage[], TunnelInfo info) {
TunnelVerificationStructure struct = new TunnelVerificationStructure();
struct.setMessageHash(SHA256Generator.getInstance().calculateHash(encryptedMessage));
struct.sign(info.getSigningKey().getKey());
return struct;
TunnelVerificationStructure struct = new TunnelVerificationStructure();
struct.setMessageHash(_context.sha().calculateHash(encryptedMessage));
struct.sign(_context, info.getSigningKey().getKey());
return struct;
}
private byte[] encrypt(DataStructure struct, SessionKey key, int paddedSize) {
try {
ByteArrayOutputStream baos = new ByteArrayOutputStream(paddedSize);
struct.writeBytes(baos);
byte iv[] = new byte[16];
Hash h = SHA256Generator.getInstance().calculateHash(key.getData());
System.arraycopy(h.getData(), 0, iv, 0, iv.length);
return AESEngine.getInstance().safeEncrypt(baos.toByteArray(), key, iv, paddedSize);
} catch (IOException ioe) {
_log.error("Error writing out data to encrypt", ioe);
} catch (DataFormatException dfe) {
_log.error("Error formatting data to encrypt", dfe);
}
return null;
try {
ByteArrayOutputStream baos = new ByteArrayOutputStream(paddedSize);
struct.writeBytes(baos);
byte iv[] = new byte[16];
Hash h = _context.sha().calculateHash(key.getData());
System.arraycopy(h.getData(), 0, iv, 0, iv.length);
return _context.AESEngine().safeEncrypt(baos.toByteArray(), key, iv, paddedSize);
} catch (IOException ioe) {
_log.error("Error writing out data to encrypt", ioe);
} catch (DataFormatException dfe) {
_log.error("Error formatting data to encrypt", dfe);
}
return null;
}
private void honorInstructions(TunnelInfo info) {
if (_selector != null)
createFakeOutNetMessage();
if (_onSend != null) {
_log.debug("Firing onSend as we're honoring the instructions");
JobQueue.getInstance().addJob(_onSend);
}
// since we are the gateway, we don't need to decrypt the delivery instructions or the payload
RouterIdentity ident = Router.getInstance().getRouterInfo().getIdentity();
if (_destRouter != null) {
I2NPMessage msg = null;
if (_targetTunnelId != null) {
_log.debug("Forward " + _message.getClass().getName() + " message off to remote tunnel " + _targetTunnelId.getTunnelId() + " on router " + _destRouter.toBase64());
TunnelMessage tmsg = new TunnelMessage();
tmsg.setEncryptedDeliveryInstructions(null);
tmsg.setTunnelId(_targetTunnelId);
tmsg.setVerificationStructure(null);
ByteArrayOutputStream baos = new ByteArrayOutputStream(1024);
try {
_message.writeBytes(baos);
} catch (IOException ioe) {
_log.error("Error writing out the message to be forwarded...??", ioe);
} catch (DataFormatException dfe) {
_log.error("Error writing message to be forwarded...???", dfe);
}
tmsg.setData(baos.toByteArray());
msg = tmsg;
} else {
_log.debug("Forward " + _message.getClass().getName() + " message off to remote router " + _destRouter.toBase64());
msg = _message;
}
long now = Clock.getInstance().now();
//if (_expiration < now) {
_expiration = now + Router.CLOCK_FUDGE_FACTOR;
//_log.info("Fudging the message send so it expires in the fudge factor...");
//}
if (_expiration - 30*1000 < now) {
_log.error("Why are we trying to send a " + _message.getClass().getName() + " message with " + (_expiration-now) + "ms left?", getAddedBy());
}
String bodyType = _message.getClass().getName();
MessageHistory.getInstance().wrap(bodyType, _message.getUniqueId(), TunnelMessage.class.getName(), msg.getUniqueId());
// don't specify a selector, since createFakeOutNetMessage already does that
JobQueue.getInstance().addJob(new SendMessageDirectJob(msg, _destRouter, _onSend, _onReply, _onFailure, null, _expiration, _priority));
} else {
if ( (info.getDestination() == null) || !(_message instanceof DataMessage) ) {
// its a network message targeting us...
_log.debug("Destination is null or its not a DataMessage - pass it off to the InNetMessagePool");
InNetMessage msg = new InNetMessage();
msg.setFromRouter(ident);
msg.setFromRouterHash(ident.getHash());
msg.setMessage(_message);
msg.setReplyBlock(null);
InNetMessagePool.getInstance().add(msg);
} else {
_log.debug("Destination is not null and it is a DataMessage - pop it into the ClientMessagePool");
DataMessage msg = (DataMessage)_message;
boolean valid = MessageValidator.getInstance().validateMessage(msg.getUniqueId(), msg.getMessageExpiration().getTime());
if (!valid) {
if (_log.shouldLog(Log.WARN))
_log.warn("Duplicate data message received [" + msg.getUniqueId() + " expiring on " + msg.getMessageExpiration() + "]");
MessageHistory.getInstance().droppedOtherMessage(msg);
MessageHistory.getInstance().messageProcessingError(msg.getUniqueId(), msg.getClass().getName(), "Duplicate");
return;
}
Payload payload = new Payload();
payload.setEncryptedData(msg.getData());
MessageReceptionInfo receptionInfo = new MessageReceptionInfo();
receptionInfo.setFromPeer(ident.getHash());
receptionInfo.setFromTunnel(_tunnelId);
ClientMessage clientMessage = new ClientMessage();
clientMessage.setDestination(info.getDestination());
clientMessage.setPayload(payload);
clientMessage.setReceptionInfo(receptionInfo);
ClientMessagePool.getInstance().add(clientMessage);
MessageHistory.getInstance().receivePayloadMessage(msg.getUniqueId());
}
}
if (_selector != null)
createFakeOutNetMessage();
if (_onSend != null) {
_log.debug("Firing onSend as we're honoring the instructions");
_context.jobQueue().addJob(_onSend);
}
// since we are the gateway, we don't need to decrypt the delivery instructions or the payload
RouterIdentity ident = _context.router().getRouterInfo().getIdentity();
if (_destRouter != null) {
I2NPMessage msg = null;
if (_targetTunnelId != null) {
_log.debug("Forward " + _message.getClass().getName() + " message off to remote tunnel " + _targetTunnelId.getTunnelId() + " on router " + _destRouter.toBase64());
TunnelMessage tmsg = new TunnelMessage(_context);
tmsg.setEncryptedDeliveryInstructions(null);
tmsg.setTunnelId(_targetTunnelId);
tmsg.setVerificationStructure(null);
ByteArrayOutputStream baos = new ByteArrayOutputStream(1024);
try {
_message.writeBytes(baos);
} catch (IOException ioe) {
_log.error("Error writing out the message to be forwarded...??", ioe);
} catch (DataFormatException dfe) {
_log.error("Error writing message to be forwarded...???", dfe);
}
tmsg.setData(baos.toByteArray());
msg = tmsg;
} else {
_log.debug("Forward " + _message.getClass().getName() + " message off to remote router " + _destRouter.toBase64());
msg = _message;
}
long now = _context.clock().now();
//if (_expiration < now) {
_expiration = now + Router.CLOCK_FUDGE_FACTOR;
//_log.info("Fudging the message send so it expires in the fudge factor...");
//}
if (_expiration - 30*1000 < now) {
_log.error("Why are we trying to send a " + _message.getClass().getName() + " message with " + (_expiration-now) + "ms left?", getAddedBy());
}
String bodyType = _message.getClass().getName();
_context.messageHistory().wrap(bodyType, _message.getUniqueId(), TunnelMessage.class.getName(), msg.getUniqueId());
// don't specify a selector, since createFakeOutNetMessage already does that
_context.jobQueue().addJob(new SendMessageDirectJob(_context, msg, _destRouter, _onSend, _onReply, _onFailure, null, _expiration, _priority));
} else {
if ( (info.getDestination() == null) || !(_message instanceof DataMessage) ) {
// its a network message targeting us...
_log.debug("Destination is null or its not a DataMessage - pass it off to the InNetMessagePool");
InNetMessage msg = new InNetMessage();
msg.setFromRouter(ident);
msg.setFromRouterHash(ident.getHash());
msg.setMessage(_message);
msg.setReplyBlock(null);
_context.inNetMessagePool().add(msg);
} else {
_log.debug("Destination is not null and it is a DataMessage - pop it into the ClientMessagePool");
DataMessage msg = (DataMessage)_message;
boolean valid = _context.messageValidator().validateMessage(msg.getUniqueId(), msg.getMessageExpiration().getTime());
if (!valid) {
if (_log.shouldLog(Log.WARN))
_log.warn("Duplicate data message received [" + msg.getUniqueId() + " expiring on " + msg.getMessageExpiration() + "]");
_context.messageHistory().droppedOtherMessage(msg);
_context.messageHistory().messageProcessingError(msg.getUniqueId(), msg.getClass().getName(), "Duplicate");
return;
}
Payload payload = new Payload();
payload.setEncryptedData(msg.getData());
MessageReceptionInfo receptionInfo = new MessageReceptionInfo();
receptionInfo.setFromPeer(ident.getHash());
receptionInfo.setFromTunnel(_tunnelId);
ClientMessage clientMessage = new ClientMessage();
clientMessage.setDestination(info.getDestination());
clientMessage.setPayload(payload);
clientMessage.setReceptionInfo(receptionInfo);
_context.clientMessagePool().add(clientMessage);
_context.messageHistory().receivePayloadMessage(msg.getUniqueId());
}
}
}
private void createFakeOutNetMessage() {
// now we create a fake outNetMessage to go onto the registry so we can select
_log.debug("Registering a fake outNetMessage for the message tunneled locally since we have a selector");
OutNetMessage outM = new OutNetMessage();
outM.setExpiration(_expiration);
outM.setMessage(_message);
outM.setOnFailedReplyJob(_onFailure);
outM.setOnFailedSendJob(_onFailure);
outM.setOnReplyJob(_onReply);
outM.setOnSendJob(_onSend);
outM.setPriority(_priority);
outM.setReplySelector(_selector);
outM.setTarget(null);
OutboundMessageRegistry.getInstance().registerPending(outM);
// now we create a fake outNetMessage to go onto the registry so we can select
_log.debug("Registering a fake outNetMessage for the message tunneled locally since we have a selector");
OutNetMessage outM = new OutNetMessage(_context);
outM.setExpiration(_expiration);
outM.setMessage(_message);
outM.setOnFailedReplyJob(_onFailure);
outM.setOnFailedSendJob(_onFailure);
outM.setOnReplyJob(_onReply);
outM.setOnSendJob(_onSend);
outM.setPriority(_priority);
outM.setReplySelector(_selector);
outM.setTarget(null);
_context.messageRegistry().registerPending(outM);
}
public String getName() { return "Send Tunnel Message"; }

View File

@ -1,9 +1,9 @@
package net.i2p.router.message;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
@ -15,17 +15,22 @@ import net.i2p.data.i2np.SourceRouteBlock;
import net.i2p.data.i2np.SourceRouteReplyMessage;
import net.i2p.router.HandlerJobBuilder;
import net.i2p.router.Job;
import net.i2p.router.RouterContext;
/**
* HandlerJobBuilder to build jobs to handle SourceRouteReplyMessages
*
*/
public class SourceRouteReplyMessageHandler implements HandlerJobBuilder {
private RouterContext _context;
public SourceRouteReplyMessageHandler(RouterContext context) {
_context = context;
}
public Job createJob(I2NPMessage receivedMessage, RouterIdentity from, Hash fromHash, SourceRouteBlock replyBlock) {
// ignore the replyBlock for now
HandleSourceRouteReplyMessageJob job = new HandleSourceRouteReplyMessageJob((SourceRouteReplyMessage)receivedMessage, from, fromHash);
return job;
// ignore the replyBlock for now
HandleSourceRouteReplyMessageJob job = new HandleSourceRouteReplyMessageJob(_context, (SourceRouteReplyMessage)receivedMessage, from, fromHash);
return job;
}
}

View File

@ -1,9 +1,9 @@
package net.i2p.router.message;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
@ -15,17 +15,22 @@ import net.i2p.data.i2np.SourceRouteBlock;
import net.i2p.data.i2np.TunnelMessage;
import net.i2p.router.HandlerJobBuilder;
import net.i2p.router.Job;
import net.i2p.router.RouterContext;
/**
* HandlerJobBuilder to build jobs to handle TunnelMessages
*
*/
public class TunnelMessageHandler implements HandlerJobBuilder {
private RouterContext _context;
public TunnelMessageHandler(RouterContext context) {
_context = context;
}
public Job createJob(I2NPMessage receivedMessage, RouterIdentity from, Hash fromHash, SourceRouteBlock replyBlock) {
// ignore the replyBlock for now
HandleTunnelMessageJob job = new HandleTunnelMessageJob((TunnelMessage)receivedMessage, from, fromHash);
return job;
// ignore the replyBlock for now
HandleTunnelMessageJob job = new HandleTunnelMessageJob(_context, (TunnelMessage)receivedMessage, from, fromHash);
return job;
}
}

View File

@ -15,20 +15,22 @@ import net.i2p.data.i2np.I2NPMessage;
import net.i2p.data.i2np.SourceRouteBlock;
import net.i2p.router.HandlerJobBuilder;
import net.i2p.router.Job;
import net.i2p.stat.StatManager;
import net.i2p.router.RouterContext;
/**
* Build a HandleDatabaseLookupMessageJob whenever a DatabaseLookupMessage arrives
*
*/
public class DatabaseLookupMessageHandler implements HandlerJobBuilder {
static {
StatManager.getInstance().createRateStat("netDb.lookupsReceived", "How many netDb lookups have we received?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
private RouterContext _context;
public DatabaseLookupMessageHandler(RouterContext context) {
_context = context;
_context.statManager().createRateStat("netDb.lookupsReceived", "How many netDb lookups have we received?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
}
public Job createJob(I2NPMessage receivedMessage, RouterIdentity from, Hash fromHash, SourceRouteBlock replyBlock) {
StatManager.getInstance().addRateData("netDb.lookupsReceived", 1, 0);
_context.statManager().addRateData("netDb.lookupsReceived", 1, 0);
// ignore the reply block for the moment
return new HandleDatabaseLookupMessageJob((DatabaseLookupMessage)receivedMessage, from, fromHash);
return new HandleDatabaseLookupMessageJob(_context, (DatabaseLookupMessage)receivedMessage, from, fromHash);
}
}

View File

@ -1,9 +1,9 @@
package net.i2p.router.networkdb;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
@ -15,14 +15,19 @@ import net.i2p.data.i2np.I2NPMessage;
import net.i2p.data.i2np.SourceRouteBlock;
import net.i2p.router.HandlerJobBuilder;
import net.i2p.router.Job;
import net.i2p.router.RouterContext;
/**
* Build a HandleDatabaseSearchReplyMessageJob whenever a DatabaseSearchReplyMessage arrives
*
*/
public class DatabaseSearchReplyMessageHandler implements HandlerJobBuilder {
private RouterContext _context;
public DatabaseSearchReplyMessageHandler(RouterContext context) {
_context = context;
}
public Job createJob(I2NPMessage receivedMessage, RouterIdentity from, Hash fromHash, SourceRouteBlock replyBlock) {
// ignore the reply block for now
return new HandleDatabaseSearchReplyMessageJob((DatabaseSearchReplyMessage)receivedMessage, from, fromHash);
// ignore the reply block for now
return new HandleDatabaseSearchReplyMessageJob(_context, (DatabaseSearchReplyMessage)receivedMessage, from, fromHash);
}
}

View File

@ -1,9 +1,9 @@
package net.i2p.router.networkdb;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
@ -15,14 +15,19 @@ import net.i2p.data.i2np.I2NPMessage;
import net.i2p.data.i2np.SourceRouteBlock;
import net.i2p.router.HandlerJobBuilder;
import net.i2p.router.Job;
import net.i2p.router.RouterContext;
/**
* Create a HandleDatabaseStoreMessageJob whenever a DatabaseStoreMessage arrives
*
*/
public class DatabaseStoreMessageHandler implements HandlerJobBuilder {
private RouterContext _context;
public DatabaseStoreMessageHandler(RouterContext context) {
_context = context;
}
public Job createJob(I2NPMessage receivedMessage, RouterIdentity from, Hash fromHash, SourceRouteBlock replyBlock) {
// ignore the reply block for the moment
return new HandleDatabaseStoreMessageJob((DatabaseStoreMessage)receivedMessage, from, fromHash);
// ignore the reply block for the moment
return new HandleDatabaseStoreMessageJob(_context, (DatabaseStoreMessage)receivedMessage, from, fromHash);
}
}

View File

@ -37,7 +37,7 @@ import net.i2p.router.message.SendMessageDirectJob;
import net.i2p.router.message.SendTunnelMessageJob;
import net.i2p.util.Clock;
import net.i2p.util.Log;
import net.i2p.stat.StatManager;
import net.i2p.router.RouterContext;
/**
* Handle a lookup for a key received from a remote peer. Needs to be implemented
@ -45,20 +45,19 @@ import net.i2p.stat.StatManager;
*
*/
public class HandleDatabaseLookupMessageJob extends JobImpl {
private final static Log _log = new Log(HandleDatabaseLookupMessageJob.class);
private Log _log;
private DatabaseLookupMessage _message;
private RouterIdentity _from;
private Hash _fromHash;
private final static int MAX_ROUTERS_RETURNED = 3;
private final static int REPLY_TIMEOUT = 60*1000;
private final static int MESSAGE_PRIORITY = 300;
static {
StatManager.getInstance().createRateStat("netDb.lookupsHandled", "How many netDb lookups have we handled?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
StatManager.getInstance().createRateStat("netDb.lookupsMatched", "How many netDb lookups did we have the data for?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
}
public HandleDatabaseLookupMessageJob(DatabaseLookupMessage receivedMessage, RouterIdentity from, Hash fromHash) {
public HandleDatabaseLookupMessageJob(RouterContext ctx, DatabaseLookupMessage receivedMessage, RouterIdentity from, Hash fromHash) {
super(ctx);
_log = _context.logManager().getLog(HandleDatabaseLookupMessageJob.class);
_context.statManager().createRateStat("netDb.lookupsHandled", "How many netDb lookups have we handled?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("netDb.lookupsMatched", "How many netDb lookups did we have the data for?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
_message = receivedMessage;
_from = from;
_fromHash = fromHash;
@ -77,14 +76,14 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
}
// might as well grab what they sent us
NetworkDatabaseFacade.getInstance().store(fromKey, _message.getFrom());
_context.netDb().store(fromKey, _message.getFrom());
// whatdotheywant?
handleRequest(fromKey);
}
private void handleRequest(Hash fromKey) {
LeaseSet ls = NetworkDatabaseFacade.getInstance().lookupLeaseSetLocally(_message.getSearchKey());
LeaseSet ls = _context.netDb().lookupLeaseSetLocally(_message.getSearchKey());
if (ls != null) {
// send that lease set to the _message.getFromHash peer
if (_log.shouldLog(Log.DEBUG))
@ -92,7 +91,7 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
+ " locally as a lease set. sending to " + fromKey.toBase64());
sendData(_message.getSearchKey(), ls, fromKey, _message.getReplyTunnel());
} else {
RouterInfo info = NetworkDatabaseFacade.getInstance().lookupRouterInfoLocally(_message.getSearchKey());
RouterInfo info = _context.netDb().lookupRouterInfoLocally(_message.getSearchKey());
if (info != null) {
// send that routerInfo to the _message.getFromHash peer
if (_log.shouldLog(Log.DEBUG))
@ -101,8 +100,9 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
sendData(_message.getSearchKey(), info, fromKey, _message.getReplyTunnel());
} else {
// not found locally - return closest peer routerInfo structs
Set routerInfoSet = NetworkDatabaseFacade.getInstance().findNearestRouters(_message.getSearchKey(),
MAX_ROUTERS_RETURNED, _message.getDontIncludePeers());
Set routerInfoSet = _context.netDb().findNearestRouters(_message.getSearchKey(),
MAX_ROUTERS_RETURNED,
_message.getDontIncludePeers());
if (_log.shouldLog(Log.DEBUG))
_log.debug("We do not have key " + _message.getSearchKey().toBase64() +
" locally. sending back " + routerInfoSet.size() + " peers to " + fromKey.toBase64());
@ -115,7 +115,7 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Sending data matching key key " + key.toBase64() + " to peer " + toPeer.toBase64()
+ " tunnel " + replyTunnel);
DatabaseStoreMessage msg = new DatabaseStoreMessage();
DatabaseStoreMessage msg = new DatabaseStoreMessage(_context);
msg.setKey(key);
if (data instanceof LeaseSet) {
msg.setLeaseSet((LeaseSet)data);
@ -124,8 +124,8 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
msg.setRouterInfo((RouterInfo)data);
msg.setValueType(DatabaseStoreMessage.KEY_TYPE_ROUTERINFO);
}
StatManager.getInstance().addRateData("netDb.lookupsMatched", 1, 0);
StatManager.getInstance().addRateData("netDb.lookupsHandled", 1, 0);
_context.statManager().addRateData("netDb.lookupsMatched", 1, 0);
_context.statManager().addRateData("netDb.lookupsHandled", 1, 0);
sendMessage(msg, toPeer, replyTunnel);
}
@ -133,15 +133,15 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Sending closest routers to key " + key.toBase64() + ": # peers = "
+ routerInfoSet.size() + " tunnel " + replyTunnel);
DatabaseSearchReplyMessage msg = new DatabaseSearchReplyMessage();
msg.setFromHash(Router.getInstance().getRouterInfo().getIdentity().getHash());
DatabaseSearchReplyMessage msg = new DatabaseSearchReplyMessage(_context);
msg.setFromHash(_context.router().getRouterInfo().getIdentity().getHash());
msg.setSearchKey(key);
if (routerInfoSet.size() <= 0) {
// always include something, so lets toss ourselves in there
routerInfoSet.add(Router.getInstance().getRouterInfo());
routerInfoSet.add(_context.router().getRouterInfo());
}
msg.addReplies(routerInfoSet);
StatManager.getInstance().addRateData("netDb.lookupsHandled", 1, 0);
_context.statManager().addRateData("netDb.lookupsHandled", 1, 0);
sendMessage(msg, toPeer, replyTunnel); // should this go via garlic messages instead?
}
@ -152,21 +152,21 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Sending reply directly to " + toPeer);
send = new SendMessageDirectJob(message, toPeer, REPLY_TIMEOUT+Clock.getInstance().now(), MESSAGE_PRIORITY);
send = new SendMessageDirectJob(_context, message, toPeer, REPLY_TIMEOUT+_context.clock().now(), MESSAGE_PRIORITY);
}
NetworkDatabaseFacade.getInstance().lookupRouterInfo(toPeer, send, null, REPLY_TIMEOUT);
_context.netDb().lookupRouterInfo(toPeer, send, null, REPLY_TIMEOUT);
}
private void sendThroughTunnel(I2NPMessage message, Hash toPeer, TunnelId replyTunnel) {
TunnelInfo info = TunnelManagerFacade.getInstance().getTunnelInfo(replyTunnel);
TunnelInfo info = _context.tunnelManager().getTunnelInfo(replyTunnel);
// the sendTunnelMessageJob can't handle injecting into the tunnel anywhere but the beginning
// (and if we are the beginning, we have the signing key)
if ( (info == null) || (info.getSigningKey() != null)) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Sending reply through " + replyTunnel + " on " + toPeer);
JobQueue.getInstance().addJob(new SendTunnelMessageJob(message, replyTunnel, toPeer, null, null, null, null, null, REPLY_TIMEOUT, MESSAGE_PRIORITY));
_context.jobQueue().addJob(new SendTunnelMessageJob(_context, message, replyTunnel, toPeer, null, null, null, null, null, REPLY_TIMEOUT, MESSAGE_PRIORITY));
} else {
// its a tunnel we're participating in, but we're NOT the gateway, so
sendToGateway(message, toPeer, replyTunnel, info);
@ -183,19 +183,19 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
return;
}
long expiration = REPLY_TIMEOUT + Clock.getInstance().now();
long expiration = REPLY_TIMEOUT + _context.clock().now();
TunnelMessage msg = new TunnelMessage();
TunnelMessage msg = new TunnelMessage(_context);
try {
ByteArrayOutputStream baos = new ByteArrayOutputStream(1024);
message.writeBytes(baos);
msg.setData(baos.toByteArray());
msg.setTunnelId(replyTunnel);
msg.setMessageExpiration(new Date(expiration));
JobQueue.getInstance().addJob(new SendMessageDirectJob(msg, toPeer, null, null, null, null, expiration, MESSAGE_PRIORITY));
_context.jobQueue().addJob(new SendMessageDirectJob(_context, msg, toPeer, null, null, null, null, expiration, MESSAGE_PRIORITY));
String bodyType = message.getClass().getName();
MessageHistory.getInstance().wrap(bodyType, message.getUniqueId(), TunnelMessage.class.getName(), msg.getUniqueId());
_context.messageHistory().wrap(bodyType, message.getUniqueId(), TunnelMessage.class.getName(), msg.getUniqueId());
} catch (IOException ioe) {
if (_log.shouldLog(Log.ERROR))
_log.error("Error writing out the tunnel message to send to the tunnel", ioe);
@ -208,8 +208,8 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
public String getName() { return "Handle Database Lookup Message"; }
public void dropped() {
MessageHistory.getInstance().messageProcessingError(_message.getUniqueId(),
_message.getClass().getName(),
"Dropped due to overload");
_context.messageHistory().messageProcessingError(_message.getUniqueId(),
_message.getClass().getName(),
"Dropped due to overload");
}
}

View File

@ -1,9 +1,9 @@
package net.i2p.router.networkdb;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
@ -16,55 +16,60 @@ import net.i2p.router.JobImpl;
import net.i2p.router.JobQueue;
import net.i2p.router.NetworkDatabaseFacade;
import net.i2p.util.Log;
import net.i2p.router.RouterContext;
/**
* Receive DatabaseSearchReplyMessage data and store it in the local net db
*
*/
public class HandleDatabaseSearchReplyMessageJob extends JobImpl {
private final static Log _log = new Log(HandleDatabaseSearchReplyMessageJob.class);
private Log _log;
private DatabaseSearchReplyMessage _message;
private RouterIdentity _from;
private Hash _fromHash;
public HandleDatabaseSearchReplyMessageJob(DatabaseSearchReplyMessage receivedMessage, RouterIdentity from, Hash fromHash) {
_message = receivedMessage;
_from = from;
_fromHash = fromHash;
public HandleDatabaseSearchReplyMessageJob(RouterContext context, DatabaseSearchReplyMessage receivedMessage, RouterIdentity from, Hash fromHash) {
super(context);
_log = context.logManager().getLog(HandleDatabaseSearchReplyMessageJob.class);
_message = receivedMessage;
_from = from;
_fromHash = fromHash;
}
public void runJob() {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Handling database search reply message for key " + _message.getSearchKey().toBase64() + " with " + _message.getNumReplies() + " replies");
if (_message.getNumReplies() > 0)
JobQueue.getInstance().addJob(new HandlePeerJob(0));
if (_log.shouldLog(Log.DEBUG))
_log.debug("Handling database search reply message for key " + _message.getSearchKey().toBase64() + " with " + _message.getNumReplies() + " replies");
if (_message.getNumReplies() > 0)
_context.jobQueue().addJob(new HandlePeerJob(0));
}
/**
* Partial job - take each reply entry, store it, then requeue again until all
* Partial job - take each reply entry, store it, then requeue again until all
* of the entries are stored. This prevents a single reply from swamping the jobqueue
*
*/
private final class HandlePeerJob extends JobImpl {
private int _curReply;
public HandlePeerJob(int reply) {
_curReply = reply;
}
public void runJob() {
boolean remaining = handle();
if (remaining)
requeue(0);
}
private boolean handle() {
RouterInfo info = _message.getReply(_curReply);
if (_log.shouldLog(Log.INFO))
_log.info("On search for " + _message.getSearchKey().toBase64() + ", received " + info.getIdentity().getHash().toBase64());
NetworkDatabaseFacade.getInstance().store(info.getIdentity().getHash(), info);
_curReply++;
return _message.getNumReplies() > _curReply;
}
public String getName() { return "Handle search reply value"; }
private int _curReply;
public HandlePeerJob(int reply) {
super(HandleDatabaseSearchReplyMessageJob.this._context);
_curReply = reply;
}
public void runJob() {
boolean remaining = handle();
if (remaining)
requeue(0);
}
private boolean handle() {
RouterInfo info = _message.getReply(_curReply);
if (_log.shouldLog(Log.INFO))
_log.info("On search for " + _message.getSearchKey().toBase64() + ", received " + info.getIdentity().getHash().toBase64());
HandlePeerJob.this._context.netDb().store(info.getIdentity().getHash(), info);
_curReply++;
return _message.getNumReplies() > _curReply;
}
public String getName() { return "Handle search reply value"; }
}
public String getName() { return "Handle Database Search Reply Message"; }

View File

@ -19,22 +19,22 @@ import net.i2p.router.NetworkDatabaseFacade;
import net.i2p.router.ProfileManager;
import net.i2p.util.Log;
import net.i2p.stat.StatManager;
import net.i2p.router.RouterContext;
/**
* Receive DatabaseStoreMessage data and store it in the local net db
*
*/
public class HandleDatabaseStoreMessageJob extends JobImpl {
private final static Log _log = new Log(HandleDatabaseStoreMessageJob.class);
private Log _log;
private DatabaseStoreMessage _message;
private RouterIdentity _from;
private Hash _fromHash;
static {
StatManager.getInstance().createRateStat("netDb.storeHandled", "How many netDb store messages have we handled?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
}
public HandleDatabaseStoreMessageJob(DatabaseStoreMessage receivedMessage, RouterIdentity from, Hash fromHash) {
public HandleDatabaseStoreMessageJob(RouterContext ctx, DatabaseStoreMessage receivedMessage, RouterIdentity from, Hash fromHash) {
super(ctx);
_log = ctx.logManager().getLog(HandleDatabaseStoreMessageJob.class);
ctx.statManager().createRateStat("netDb.storeHandled", "How many netDb store messages have we handled?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
_message = receivedMessage;
_from = from;
_fromHash = fromHash;
@ -46,15 +46,15 @@ public class HandleDatabaseStoreMessageJob extends JobImpl {
boolean wasNew = false;
if (_message.getValueType() == DatabaseStoreMessage.KEY_TYPE_LEASESET) {
Object match = NetworkDatabaseFacade.getInstance().store(_message.getKey(), _message.getLeaseSet());
Object match = _context.netDb().store(_message.getKey(), _message.getLeaseSet());
wasNew = (null == match);
} else if (_message.getValueType() == DatabaseStoreMessage.KEY_TYPE_ROUTERINFO) {
if (_log.shouldLog(Log.INFO))
_log.info("Handling dbStore of router " + _message.getKey() + " with publishDate of "
+ new Date(_message.getRouterInfo().getPublished()));
Object match = NetworkDatabaseFacade.getInstance().store(_message.getKey(), _message.getRouterInfo());
Object match = _context.netDb().store(_message.getKey(), _message.getRouterInfo());
wasNew = (null == match);
ProfileManager.getInstance().heardAbout(_message.getKey());
_context.profileManager().heardAbout(_message.getKey());
} else {
if (_log.shouldLog(Log.ERROR))
_log.error("Invalid DatabaseStoreMessage data type - " + _message.getValueType()
@ -63,13 +63,13 @@ public class HandleDatabaseStoreMessageJob extends JobImpl {
if (_from != null)
_fromHash = _from.getHash();
if (_fromHash != null)
ProfileManager.getInstance().dbStoreReceived(_fromHash, wasNew);
StatManager.getInstance().addRateData("netDb.storeHandled", 1, 0);
_context.profileManager().dbStoreReceived(_fromHash, wasNew);
_context.statManager().addRateData("netDb.storeHandled", 1, 0);
}
public String getName() { return "Handle Database Store Message"; }
public void dropped() {
MessageHistory.getInstance().messageProcessingError(_message.getUniqueId(), _message.getClass().getName(), "Dropped due to overload");
_context.messageHistory().messageProcessingError(_message.getUniqueId(), _message.getClass().getName(), "Dropped due to overload");
}
}

View File

@ -22,33 +22,42 @@ import net.i2p.router.StatisticsManager;
import net.i2p.util.Clock;
import net.i2p.util.Log;
import net.i2p.util.RandomSource;
import net.i2p.router.RouterContext;
/**
* Publish the local router's RouterInfo every 5 to 10 minutes
*
*/
public class PublishLocalRouterInfoJob extends JobImpl {
private final static Log _log = new Log(PublishLocalRouterInfoJob.class);
private Log _log;
final static long PUBLISH_DELAY = 5*60*1000; // every 5 to 10 minutes (since we randomize)
public PublishLocalRouterInfoJob(RouterContext ctx) {
super(ctx);
_log = ctx.logManager().getLog(PublishLocalRouterInfoJob.class);
}
public String getName() { return "Publish Local Router Info"; }
public void runJob() {
RouterInfo ri = new RouterInfo(Router.getInstance().getRouterInfo());
if (_log.shouldLog(Log.DEBUG))
_log.debug("Old routerInfo contains " + ri.getAddresses().size() + " addresses and " + ri.getOptions().size() + " options");
Properties stats = StatisticsManager.getInstance().publishStatistics();
try {
ri.setPublished(Clock.getInstance().now());
ri.setOptions(stats);
ri.setAddresses(CommSystemFacade.getInstance().createAddresses());
ri.sign(KeyManager.getInstance().getSigningPrivateKey());
Router.getInstance().setRouterInfo(ri);
if (_log.shouldLog(Log.INFO))
_log.info("Newly updated routerInfo is published with " + stats.size() + "/" + ri.getOptions().size() + " options on " + new Date(ri.getPublished()));
NetworkDatabaseFacade.getInstance().publish(ri);
} catch (DataFormatException dfe) {
_log.error("Error signing the updated local router info!", dfe);
}
requeue(PUBLISH_DELAY + RandomSource.getInstance().nextInt((int)PUBLISH_DELAY));
RouterInfo ri = new RouterInfo(_context.router().getRouterInfo());
if (_log.shouldLog(Log.DEBUG))
_log.debug("Old routerInfo contains " + ri.getAddresses().size()
+ " addresses and " + ri.getOptions().size() + " options");
Properties stats = _context.statPublisher().publishStatistics();
try {
ri.setPublished(_context.clock().now());
ri.setOptions(stats);
ri.setAddresses(_context.commSystem().createAddresses());
ri.sign(_context.keyManager().getSigningPrivateKey());
_context.router().setRouterInfo(ri);
if (_log.shouldLog(Log.INFO))
_log.info("Newly updated routerInfo is published with " + stats.size()
+ "/" + ri.getOptions().size() + " options on "
+ new Date(ri.getPublished()));
_context.netDb().publish(ri);
} catch (DataFormatException dfe) {
_log.error("Error signing the updated local router info!", dfe);
}
requeue(PUBLISH_DELAY + _context.random().nextInt((int)PUBLISH_DELAY));
}
}

View File

@ -1,9 +1,9 @@
package net.i2p.router.networkdb.kademlia;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
@ -20,68 +20,70 @@ import net.i2p.router.JobQueue;
import net.i2p.router.Router;
import net.i2p.util.Clock;
import net.i2p.util.Log;
import net.i2p.router.RouterContext;
class DataPublisherJob extends JobImpl {
private final static Log _log = new Log(DataPublisherJob.class);
private Log _log;
private KademliaNetworkDatabaseFacade _facade;
private final static long RERUN_DELAY_MS = 30*1000;
private final static int MAX_SEND_PER_RUN = 5; // publish no more than 5 at a time
private final static long STORE_TIMEOUT = 60*1000; // give 'er a minute to send the data
public DataPublisherJob(KademliaNetworkDatabaseFacade facade) {
super();
_facade = facade;
getTiming().setStartAfter(Clock.getInstance().now()+RERUN_DELAY_MS); // not immediate...
public DataPublisherJob(RouterContext ctx, KademliaNetworkDatabaseFacade facade) {
super(ctx);
_log = ctx.logManager().getLog(DataPublisherJob.class);
_facade = facade;
getTiming().setStartAfter(ctx.clock().now()+RERUN_DELAY_MS); // not immediate...
}
public String getName() { return "Data Publisher Job"; }
public void runJob() {
Set toSend = selectKeysToSend();
_log.info("Keys being published in this timeslice: " + toSend);
for (Iterator iter = toSend.iterator(); iter.hasNext(); ) {
Hash key = (Hash)iter.next();
DataStructure data = _facade.getDataStore().get(key);
if (data == null) {
_log.warn("Trying to send a key we dont have? " + key);
continue;
}
if (data instanceof LeaseSet) {
LeaseSet ls = (LeaseSet)data;
if (!ls.isCurrent(Router.CLOCK_FUDGE_FACTOR)) {
_log.warn("Not publishing a lease that isn't current - " + key, new Exception("Publish expired lease?"));
}
}
StoreJob store = new StoreJob(_facade, key, data, null, null, STORE_TIMEOUT);
JobQueue.getInstance().addJob(store);
}
requeue(RERUN_DELAY_MS);
public void runJob() {
Set toSend = selectKeysToSend();
_log.info("Keys being published in this timeslice: " + toSend);
for (Iterator iter = toSend.iterator(); iter.hasNext(); ) {
Hash key = (Hash)iter.next();
DataStructure data = _facade.getDataStore().get(key);
if (data == null) {
_log.warn("Trying to send a key we dont have? " + key);
continue;
}
if (data instanceof LeaseSet) {
LeaseSet ls = (LeaseSet)data;
if (!ls.isCurrent(Router.CLOCK_FUDGE_FACTOR)) {
_log.warn("Not publishing a lease that isn't current - " + key, new Exception("Publish expired lease?"));
}
}
StoreJob store = new StoreJob(_context, _facade, key, data, null, null, STORE_TIMEOUT);
_context.jobQueue().addJob(store);
}
requeue(RERUN_DELAY_MS);
}
private Set selectKeysToSend() {
Set explicit = _facade.getExplicitSendKeys();
Set toSend = new HashSet(MAX_SEND_PER_RUN);
if (explicit.size() < MAX_SEND_PER_RUN) {
toSend.addAll(explicit);
_facade.removeFromExplicitSend(explicit);
Set passive = _facade.getPassivelySendKeys();
Set psend = new HashSet(passive.size());
for (Iterator iter = passive.iterator(); iter.hasNext(); ) {
if (toSend.size() >= MAX_SEND_PER_RUN) break;
Hash key = (Hash)iter.next();
toSend.add(key);
psend.add(key);
}
_facade.removeFromPassiveSend(psend);
} else {
for (Iterator iter = explicit.iterator(); iter.hasNext(); ) {
if (toSend.size() >= MAX_SEND_PER_RUN) break;
Hash key = (Hash)iter.next();
toSend.add(key);
}
_facade.removeFromExplicitSend(toSend);
}
return toSend;
Set explicit = _facade.getExplicitSendKeys();
Set toSend = new HashSet(MAX_SEND_PER_RUN);
if (explicit.size() < MAX_SEND_PER_RUN) {
toSend.addAll(explicit);
_facade.removeFromExplicitSend(explicit);
Set passive = _facade.getPassivelySendKeys();
Set psend = new HashSet(passive.size());
for (Iterator iter = passive.iterator(); iter.hasNext(); ) {
if (toSend.size() >= MAX_SEND_PER_RUN) break;
Hash key = (Hash)iter.next();
toSend.add(key);
psend.add(key);
}
_facade.removeFromPassiveSend(psend);
} else {
for (Iterator iter = explicit.iterator(); iter.hasNext(); ) {
if (toSend.size() >= MAX_SEND_PER_RUN) break;
Hash key = (Hash)iter.next();
toSend.add(key);
}
_facade.removeFromExplicitSend(toSend);
}
return toSend;
}
}

View File

@ -1,9 +1,9 @@
package net.i2p.router.networkdb.kademlia;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
@ -21,21 +21,22 @@ import net.i2p.router.Router;
import net.i2p.util.Clock;
import net.i2p.util.Log;
import net.i2p.util.RandomSource;
import net.i2p.router.RouterContext;
class DataRepublishingSelectorJob extends JobImpl {
private final static Log _log = new Log(DataRepublishingSelectorJob.class);
private Log _log;
private KademliaNetworkDatabaseFacade _facade;
private final static long RERUN_DELAY_MS = 1*60*1000;
public final static int MAX_PASSIVE_POOL_SIZE = 30; // no need to have the pool be too big
/**
* For every bucket away from us, resend period increases by 5 minutes - so we resend
* our own key every 5 minutes, and keys very far from us every 2.5 hours, increasing
* linearly
*/
public final static long RESEND_BUCKET_FACTOR = 5*60*1000;
/**
* % chance any peer not specializing in the lease's key will broadcast it on each pass
* of this job /after/ waiting 5 minutes (one RESENT_BUCKET_FACTOR). In other words,
@ -44,66 +45,67 @@ class DataRepublishingSelectorJob extends JobImpl {
*
*/
private final static int LEASE_REBROADCAST_PROBABILITY = 5;
/**
/**
* LEASE_REBROADCAST_PROBABILITY out of LEASE_REBROADCAST_PROBABILITY_SCALE chance.
*/
private final static int LEASE_REBROADCAST_PROBABILITY_SCALE = 1000;
public DataRepublishingSelectorJob(KademliaNetworkDatabaseFacade facade) {
super();
_facade = facade;
getTiming().setStartAfter(Clock.getInstance().now()+RERUN_DELAY_MS); // not immediate...
public DataRepublishingSelectorJob(RouterContext ctx, KademliaNetworkDatabaseFacade facade) {
super(ctx);
_log = ctx.logManager().getLog(DataRepublishingSelectorJob.class);
_facade = facade;
getTiming().setStartAfter(ctx.clock().now()+RERUN_DELAY_MS); // not immediate...
}
public String getName() { return "Data Publisher Job"; }
public void runJob() {
Set toSend = selectKeysToSend();
_log.info("Keys being queued up for publishing: " + toSend);
_facade.queueForPublishing(toSend);
requeue(RERUN_DELAY_MS);
public void runJob() {
Set toSend = selectKeysToSend();
_log.info("Keys being queued up for publishing: " + toSend);
_facade.queueForPublishing(toSend);
requeue(RERUN_DELAY_MS);
}
/**
* Run through the entire data store, ranking how much we want to send each
* Run through the entire data store, ranking how much we want to send each
* data point, and returning the ones we most want to send so that they can
* be placed in the passive send pool (without making the passive pool greater
* than the limit)
*
*/
private Set selectKeysToSend() {
Set alreadyQueued = new HashSet(128);
alreadyQueued.addAll(_facade.getPassivelySendKeys());
int toAdd = MAX_PASSIVE_POOL_SIZE - alreadyQueued.size();
_log.debug("Keys we need to queue up to fill the passive send pool: " + toAdd);
if (toAdd <= 0) return new HashSet();
alreadyQueued.addAll(_facade.getExplicitSendKeys());
Set keys = _facade.getDataStore().getKeys();
keys.removeAll(alreadyQueued);
_log.debug("Total number of keys in the datastore: " + keys.size());
TreeMap toSend = new TreeMap();
for (Iterator iter = keys.iterator(); iter.hasNext(); ) {
Hash key = (Hash)iter.next();
Long lastPublished = _facade.getLastSent(key);
long publishRank = rankPublishNeed(key, lastPublished);
_log.debug("Publish rank for " + key + ": " + publishRank);
if (publishRank > 0) {
while (toSend.containsKey(new Long(publishRank)))
publishRank++;
toSend.put(new Long(publishRank), key);
}
}
Set rv = new HashSet(toAdd);
for (Iterator iter = toSend.values().iterator(); iter.hasNext(); ) {
if (rv.size() > toAdd) break;
Hash key = (Hash)iter.next();
rv.add(key);
}
return rv;
Set alreadyQueued = new HashSet(128);
alreadyQueued.addAll(_facade.getPassivelySendKeys());
int toAdd = MAX_PASSIVE_POOL_SIZE - alreadyQueued.size();
_log.debug("Keys we need to queue up to fill the passive send pool: " + toAdd);
if (toAdd <= 0) return new HashSet();
alreadyQueued.addAll(_facade.getExplicitSendKeys());
Set keys = _facade.getDataStore().getKeys();
keys.removeAll(alreadyQueued);
_log.debug("Total number of keys in the datastore: " + keys.size());
TreeMap toSend = new TreeMap();
for (Iterator iter = keys.iterator(); iter.hasNext(); ) {
Hash key = (Hash)iter.next();
Long lastPublished = _facade.getLastSent(key);
long publishRank = rankPublishNeed(key, lastPublished);
_log.debug("Publish rank for " + key + ": " + publishRank);
if (publishRank > 0) {
while (toSend.containsKey(new Long(publishRank)))
publishRank++;
toSend.put(new Long(publishRank), key);
}
}
Set rv = new HashSet(toAdd);
for (Iterator iter = toSend.values().iterator(); iter.hasNext(); ) {
if (rv.size() > toAdd) break;
Hash key = (Hash)iter.next();
rv.add(key);
}
return rv;
}
/**
@ -112,49 +114,49 @@ class DataRepublishingSelectorJob extends JobImpl {
*
*/
private long rankPublishNeed(Hash key, Long lastPublished) {
int bucket = _facade.getKBuckets().pickBucket(key);
long sendPeriod = (bucket+1) * RESEND_BUCKET_FACTOR;
long now = Clock.getInstance().now();
if (lastPublished.longValue() < now-sendPeriod) {
RouterInfo ri = _facade.lookupRouterInfoLocally(key);
if (ri != null) {
if (ri.isCurrent(2 * ExpireRoutersJob.EXPIRE_DELAY)) {
// last time it was sent was before the last send period
return KBucketSet.NUM_BUCKETS - bucket;
} else {
_log.info("Not republishing router " + key + " since it is really old [" + (now-ri.getPublished()) + "ms]");
return -2;
}
} else {
LeaseSet ls = _facade.lookupLeaseSetLocally(key);
if (ls != null) {
if (ls.isCurrent(Router.CLOCK_FUDGE_FACTOR)) {
// last time it was sent was before the last send period
return KBucketSet.NUM_BUCKETS - bucket;
} else {
_log.info("Not republishing leaseSet " + key + " since it is really old [" + (now-ls.getEarliestLeaseDate()) + "ms]");
return -3;
}
} else {
_log.info("Key " + key + " is not a leaseSet or routerInfo, definitely not publishing it");
return -5;
}
}
} else {
// its been published since the last period we want to publish it
if (now - RESEND_BUCKET_FACTOR > lastPublished.longValue()) {
if (_facade.lookupRouterInfoLocally(key) != null) {
// randomize the chance of rebroadcast for leases if we haven't
// sent it within 5 minutes
int val = RandomSource.getInstance().nextInt(LEASE_REBROADCAST_PROBABILITY_SCALE);
if (val <= LEASE_REBROADCAST_PROBABILITY) {
_log.info("Randomized rebroadcast of leases tells us to send " + key + ": " + val);
return 1;
}
}
}
return -1;
}
int bucket = _facade.getKBuckets().pickBucket(key);
long sendPeriod = (bucket+1) * RESEND_BUCKET_FACTOR;
long now = _context.clock().now();
if (lastPublished.longValue() < now-sendPeriod) {
RouterInfo ri = _facade.lookupRouterInfoLocally(key);
if (ri != null) {
if (ri.isCurrent(2 * ExpireRoutersJob.EXPIRE_DELAY)) {
// last time it was sent was before the last send period
return KBucketSet.NUM_BUCKETS - bucket;
} else {
_log.info("Not republishing router " + key + " since it is really old [" + (now-ri.getPublished()) + "ms]");
return -2;
}
} else {
LeaseSet ls = _facade.lookupLeaseSetLocally(key);
if (ls != null) {
if (ls.isCurrent(Router.CLOCK_FUDGE_FACTOR)) {
// last time it was sent was before the last send period
return KBucketSet.NUM_BUCKETS - bucket;
} else {
_log.info("Not republishing leaseSet " + key + " since it is really old [" + (now-ls.getEarliestLeaseDate()) + "ms]");
return -3;
}
} else {
_log.info("Key " + key + " is not a leaseSet or routerInfo, definitely not publishing it");
return -5;
}
}
} else {
// its been published since the last period we want to publish it
if (now - RESEND_BUCKET_FACTOR > lastPublished.longValue()) {
if (_facade.lookupRouterInfoLocally(key) != null) {
// randomize the chance of rebroadcast for leases if we haven't
// sent it within 5 minutes
int val = _context.random().nextInt(LEASE_REBROADCAST_PROBABILITY_SCALE);
if (val <= LEASE_REBROADCAST_PROBABILITY) {
_log.info("Randomized rebroadcast of leases tells us to send " + key + ": " + val);
return 1;
}
}
}
return -1;
}
}
}

View File

@ -1,9 +1,9 @@
package net.i2p.router.networkdb.kademlia;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
@ -17,57 +17,59 @@ import net.i2p.data.LeaseSet;
import net.i2p.router.JobImpl;
import net.i2p.router.Router;
import net.i2p.util.Log;
import net.i2p.router.RouterContext;
/**
* Periodically search through all leases to find expired ones, failing those
* keys and firing up a new search for each (in case we want it later, might as
* Periodically search through all leases to find expired ones, failing those
* keys and firing up a new search for each (in case we want it later, might as
* well preemptively fetch it)
*
*/
class ExpireLeasesJob extends JobImpl {
private final static Log _log = new Log(ExpireLeasesJob.class);
private Log _log;
private KademliaNetworkDatabaseFacade _facade;
private final static long RERUN_DELAY_MS = 1*60*1000;
public ExpireLeasesJob(KademliaNetworkDatabaseFacade facade) {
super();
_facade = facade;
public ExpireLeasesJob(RouterContext ctx, KademliaNetworkDatabaseFacade facade) {
super(ctx);
_log = ctx.logManager().getLog(ExpireLeasesJob.class);
_facade = facade;
}
public String getName() { return "Expire Lease Sets Job"; }
public void runJob() {
Set toExpire = selectKeysToExpire();
_log.info("Leases to expire: " + toExpire);
for (Iterator iter = toExpire.iterator(); iter.hasNext(); ) {
Hash key = (Hash)iter.next();
_facade.fail(key);
_log.info("Lease " + key + " is expiring, so lets look for it again", new Exception("Expire and search"));
_facade.lookupLeaseSet(key, null, null, RERUN_DELAY_MS);
}
//_facade.queueForExploration(toExpire); // don't do explicit searches, just explore passively
requeue(RERUN_DELAY_MS);
public void runJob() {
Set toExpire = selectKeysToExpire();
_log.info("Leases to expire: " + toExpire);
for (Iterator iter = toExpire.iterator(); iter.hasNext(); ) {
Hash key = (Hash)iter.next();
_facade.fail(key);
_log.info("Lease " + key + " is expiring, so lets look for it again", new Exception("Expire and search"));
_facade.lookupLeaseSet(key, null, null, RERUN_DELAY_MS);
}
//_facade.queueForExploration(toExpire); // don't do explicit searches, just explore passively
requeue(RERUN_DELAY_MS);
}
/**
* Run through the entire data store, finding all expired leaseSets (ones that
* don't have any leases that haven't yet passed, even with the CLOCK_FUDGE_FACTOR)
*
*/
private Set selectKeysToExpire() {
Set keys = _facade.getDataStore().getKeys();
Set toExpire = new HashSet(128);
for (Iterator iter = keys.iterator(); iter.hasNext(); ) {
Hash key = (Hash)iter.next();
Object obj = _facade.getDataStore().get(key);
if (obj instanceof LeaseSet) {
LeaseSet ls = (LeaseSet)obj;
if (!ls.isCurrent(Router.CLOCK_FUDGE_FACTOR))
toExpire.add(key);
else
_log.debug("Lease " + ls.getDestination().calculateHash() + " is current, no need to expire");
}
}
return toExpire;
Set keys = _facade.getDataStore().getKeys();
Set toExpire = new HashSet(128);
for (Iterator iter = keys.iterator(); iter.hasNext(); ) {
Hash key = (Hash)iter.next();
Object obj = _facade.getDataStore().get(key);
if (obj instanceof LeaseSet) {
LeaseSet ls = (LeaseSet)obj;
if (!ls.isCurrent(Router.CLOCK_FUDGE_FACTOR))
toExpire.add(key);
else
_log.debug("Lease " + ls.getDestination().calculateHash() + " is current, no need to expire");
}
}
return toExpire;
}
}

View File

@ -1,9 +1,9 @@
package net.i2p.router.networkdb.kademlia;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
@ -19,46 +19,48 @@ import net.i2p.router.JobImpl;
import net.i2p.router.TunnelManagerFacade;
import net.i2p.util.Clock;
import net.i2p.util.Log;
import net.i2p.router.RouterContext;
/**
* Go through the routing table pick routers that are performing poorly or
* is out of date, but don't expire routers we're actively tunneling through.
* Go through the routing table pick routers that are performing poorly or
* is out of date, but don't expire routers we're actively tunneling through.
* If a peer is performing worse than some threshold (via profile.rankLiveliness)
* drop it and don't ask any questions. If a peer isn't ranked really poorly, but
* we just haven't heard from it in a while, drop it and add it to the set of
* we just haven't heard from it in a while, drop it and add it to the set of
* keys we want the netDb to explore.
*
*/
class ExpireRoutersJob extends JobImpl {
private final static Log _log = new Log(ExpireRoutersJob.class);
private Log _log;
private KademliaNetworkDatabaseFacade _facade;
private final static long RERUN_DELAY_MS = 30*1000;
/**
* If a routerInfo structure isn't updated within an hour, drop it
/**
* If a routerInfo structure isn't updated within an hour, drop it
* and search for a later version. This value should be large enough
* to deal with the Router.CLOCK_FUDGE_FACTOR.
*/
public final static long EXPIRE_DELAY = 60*60*1000;
public ExpireRoutersJob(KademliaNetworkDatabaseFacade facade) {
super();
_facade = facade;
public ExpireRoutersJob(RouterContext ctx, KademliaNetworkDatabaseFacade facade) {
super(ctx);
_log = ctx.logManager().getLog(ExpireRoutersJob.class);
_facade = facade;
}
public String getName() { return "Expire Routers Job"; }
public void runJob() {
Set toExpire = selectKeysToExpire();
_log.info("Routers to expire (drop and try to refetch): " + toExpire);
for (Iterator iter = toExpire.iterator(); iter.hasNext(); ) {
Hash key = (Hash)iter.next();
_facade.fail(key);
}
_facade.queueForExploration(toExpire);
requeue(RERUN_DELAY_MS);
public void runJob() {
Set toExpire = selectKeysToExpire();
_log.info("Routers to expire (drop and try to refetch): " + toExpire);
for (Iterator iter = toExpire.iterator(); iter.hasNext(); ) {
Hash key = (Hash)iter.next();
_facade.fail(key);
}
_facade.queueForExploration(toExpire);
requeue(RERUN_DELAY_MS);
}
/**
* Run through all of the known peers and pick ones that have really old
@ -67,40 +69,40 @@ class ExpireRoutersJob extends JobImpl {
*
*/
private Set selectKeysToExpire() {
Set possible = getNotInUse();
Set expiring = new HashSet(16);
long earliestPublishDate = Clock.getInstance().now() - EXPIRE_DELAY;
for (Iterator iter = possible.iterator(); iter.hasNext(); ) {
Hash key = (Hash)iter.next();
RouterInfo ri = _facade.lookupRouterInfoLocally(key);
if (ri != null) {
if (!ri.isCurrent(EXPIRE_DELAY)) {
if (_log.shouldLog(Log.INFO))
_log.info("Expiring RouterInfo for " + key.toBase64() + " [published on " + new Date(ri.getPublished()) + "]");
expiring.add(key);
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Not expiring routerInfo for " + key.toBase64() + " [published on " + new Date(ri.getPublished()) + "]");
}
}
}
return expiring;
Set possible = getNotInUse();
Set expiring = new HashSet(16);
long earliestPublishDate = _context.clock().now() - EXPIRE_DELAY;
for (Iterator iter = possible.iterator(); iter.hasNext(); ) {
Hash key = (Hash)iter.next();
RouterInfo ri = _facade.lookupRouterInfoLocally(key);
if (ri != null) {
if (!ri.isCurrent(EXPIRE_DELAY)) {
if (_log.shouldLog(Log.INFO))
_log.info("Expiring RouterInfo for " + key.toBase64() + " [published on " + new Date(ri.getPublished()) + "]");
expiring.add(key);
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Not expiring routerInfo for " + key.toBase64() + " [published on " + new Date(ri.getPublished()) + "]");
}
}
}
return expiring;
}
/** all peers not in use by tunnels */
private Set getNotInUse() {
Set possible = new HashSet(16);
for (Iterator iter = _facade.getAllRouters().iterator(); iter.hasNext(); ) {
Hash peer = (Hash)iter.next();
if (!TunnelManagerFacade.getInstance().isInUse(peer)) {
possible.add(peer);
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Peer is in use: " + peer.toBase64());
}
}
return possible;
Set possible = new HashSet(16);
for (Iterator iter = _facade.getAllRouters().iterator(); iter.hasNext(); ) {
Hash peer = (Hash)iter.next();
if (!_context.tunnelManager().isInUse(peer)) {
possible.add(peer);
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Peer is in use: " + peer.toBase64());
}
}
return possible;
}
}

View File

@ -1,9 +1,9 @@
package net.i2p.router.networkdb.kademlia;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
@ -19,37 +19,41 @@ import net.i2p.data.TunnelId;
import net.i2p.data.i2np.DatabaseLookupMessage;
import net.i2p.router.Router;
import net.i2p.util.Log;
import net.i2p.router.RouterContext;
/**
* Search for a particular key iteratively until we either find a value, we run
* Search for a particular key iteratively until we either find a value, we run
* out of peers, or the bucket the key belongs in has sufficient values in it.
* Well, we're skipping the 'bucket gets filled up' test for now, since it'll never
* get used (at least for a while).
*
*/
class ExploreJob extends SearchJob {
private final Log _log = new Log(ExploreJob.class);
private Log _log;
private PeerSelector _peerSelector;
/** how long each exploration should run for (currently a trivial 20 seconds) */
private final static long MAX_EXPLORE_TIME = 30*1000;
/** how many of the peers closest to the key being explored do we want to explicitly say "dont send me this"? */
private final static int NUM_CLOSEST_TO_IGNORE = 3;
/**
* Create a new search for the routingKey specified
*
*
*/
public ExploreJob(KademliaNetworkDatabaseFacade facade, Hash key) {
// note that we're treating the last param (isLease) as *false* since we're just exploring.
// if this collides with an actual leaseSet's key, neat, but that wouldn't imply we're actually
// attempting to send that lease a message!
super(facade, key, null, null, MAX_EXPLORE_TIME, false, false);
public ExploreJob(RouterContext context, KademliaNetworkDatabaseFacade facade, Hash key) {
// note that we're treating the last param (isLease) as *false* since we're just exploring.
// if this collides with an actual leaseSet's key, neat, but that wouldn't imply we're actually
// attempting to send that lease a message!
super(context, facade, key, null, null, MAX_EXPLORE_TIME, false, false);
_log = context.logManager().getLog(ExploreJob.class);
_peerSelector = new PeerSelector(context);
}
/**
* Build the database search message, but unlike the normal searches, we're more explicit in
* what we /dont/ want. We don't just ask them to ignore the peers we've already searched
* what we /dont/ want. We don't just ask them to ignore the peers we've already searched
* on, but to ignore a number of the peers we already know about (in the target key's bucket) as well.
*
* Perhaps we may want to ignore other keys too, such as the ones in nearby
@ -59,29 +63,29 @@ class ExploreJob extends SearchJob {
*
* @param replyTunnelId tunnel to receive replies through
* @param replyGateway gateway for the reply tunnel
* @param expiration when the search should stop
* @param expiration when the search should stop
*/
protected DatabaseLookupMessage buildMessage(TunnelId replyTunnelId, RouterInfo replyGateway, long expiration) {
DatabaseLookupMessage msg = new DatabaseLookupMessage();
msg.setSearchKey(getState().getTarget());
msg.setFrom(replyGateway);
msg.setDontIncludePeers(getState().getAttempted());
msg.setMessageExpiration(new Date(expiration));
msg.setReplyTunnel(replyTunnelId);
Set attempted = getState().getAttempted();
List peers = PeerSelector.getInstance().selectNearestExplicit(getState().getTarget(), NUM_CLOSEST_TO_IGNORE, attempted, getFacade().getKBuckets());
Set toSkip = new HashSet(64);
toSkip.addAll(attempted);
toSkip.addAll(peers);
msg.setDontIncludePeers(toSkip);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Peers we don't want to hear about: " + toSkip);
return msg;
DatabaseLookupMessage msg = new DatabaseLookupMessage(_context);
msg.setSearchKey(getState().getTarget());
msg.setFrom(replyGateway);
msg.setDontIncludePeers(getState().getAttempted());
msg.setMessageExpiration(new Date(expiration));
msg.setReplyTunnel(replyTunnelId);
Set attempted = getState().getAttempted();
List peers = _peerSelector.selectNearestExplicit(getState().getTarget(), NUM_CLOSEST_TO_IGNORE, attempted, getFacade().getKBuckets());
Set toSkip = new HashSet(64);
toSkip.addAll(attempted);
toSkip.addAll(peers);
msg.setDontIncludePeers(toSkip);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Peers we don't want to hear about: " + toSkip);
return msg;
}
/**
* We're looking for a router, so lets build the lookup message (no need to tunnel route either, so just have
@ -89,13 +93,13 @@ class ExploreJob extends SearchJob {
*
*/
protected DatabaseLookupMessage buildMessage(long expiration) {
return buildMessage(null, Router.getInstance().getRouterInfo(), expiration);
return buildMessage(null, _context.router().getRouterInfo(), expiration);
}
/*
* We could override searchNext to see if we actually fill up a kbucket before
* the search expires, but, c'mon, the keyspace is just too bloody massive, and
* the search expires, but, c'mon, the keyspace is just too bloody massive, and
* buckets wont be filling anytime soon, so might as well just use the SearchJob's
* searchNext
*

View File

@ -1,9 +1,9 @@
package net.i2p.router.networkdb.kademlia;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
@ -15,65 +15,67 @@ import java.util.Set;
import net.i2p.data.Hash;
import net.i2p.router.JobImpl;
import net.i2p.util.Log;
import net.i2p.router.RouterContext;
/**
* Go through the kbuckets and generate random keys for routers in buckets not
* Go through the kbuckets and generate random keys for routers in buckets not
* yet full, attempting to keep a pool of keys we can explore with (at least one
* per bucket)
*
*/
class ExploreKeySelectorJob extends JobImpl {
private final static Log _log = new Log(ExploreKeySelectorJob.class);
private Log _log;
private KademliaNetworkDatabaseFacade _facade;
private final static long RERUN_DELAY_MS = 60*1000;
public ExploreKeySelectorJob(KademliaNetworkDatabaseFacade facade) {
super();
_facade = facade;
public ExploreKeySelectorJob(RouterContext context, KademliaNetworkDatabaseFacade facade) {
super(context);
_log = context.logManager().getLog(ExploreKeySelectorJob.class);
_facade = facade;
}
public String getName() { return "Explore Key Selector Job"; }
public void runJob() {
Set toExplore = selectKeysToExplore();
_log.info("Filling the explorer pool with: " + toExplore);
if (toExplore != null)
_facade.queueForExploration(toExplore);
requeue(RERUN_DELAY_MS);
public void runJob() {
Set toExplore = selectKeysToExplore();
_log.info("Filling the explorer pool with: " + toExplore);
if (toExplore != null)
_facade.queueForExploration(toExplore);
requeue(RERUN_DELAY_MS);
}
/**
* Run through all kbuckets with too few routers and generate a random key
* for it, with a maximum number of keys limited by the exploration pool size
*
*/
private Set selectKeysToExplore() {
Set alreadyQueued = _facade.getExploreKeys();
if (alreadyQueued.size() > KBucketSet.NUM_BUCKETS) return null;
Set toExplore = new HashSet(KBucketSet.NUM_BUCKETS - alreadyQueued.size());
for (int i = 0; i < KBucketSet.NUM_BUCKETS; i++) {
KBucket bucket = _facade.getKBuckets().getBucket(i);
if (bucket.getKeyCount() < KBucketSet.BUCKET_SIZE) {
boolean already = false;
for (Iterator iter = alreadyQueued.iterator(); iter.hasNext(); ) {
Hash key = (Hash)iter.next();
if (bucket.shouldContain(key)) {
already = true;
_log.debug("Bucket " + i + " is already queued for exploration \t" + key);
break;
}
}
if (!already) {
// no keys are queued for exploring this still-too-small bucket yet
Hash key = bucket.generateRandomKey();
_log.debug("Bucket " + i + " is NOT queued for exploration, and it only has " + bucket.getKeyCount() + " keys, so explore with \t" + key);
toExplore.add(key);
}
} else {
_log.debug("Bucket " + i + " already has enough keys (" + bucket.getKeyCount() + "), no need to explore further");
}
}
return toExplore;
Set alreadyQueued = _facade.getExploreKeys();
if (alreadyQueued.size() > KBucketSet.NUM_BUCKETS) return null;
Set toExplore = new HashSet(KBucketSet.NUM_BUCKETS - alreadyQueued.size());
for (int i = 0; i < KBucketSet.NUM_BUCKETS; i++) {
KBucket bucket = _facade.getKBuckets().getBucket(i);
if (bucket.getKeyCount() < KBucketSet.BUCKET_SIZE) {
boolean already = false;
for (Iterator iter = alreadyQueued.iterator(); iter.hasNext(); ) {
Hash key = (Hash)iter.next();
if (bucket.shouldContain(key)) {
already = true;
_log.debug("Bucket " + i + " is already queued for exploration \t" + key);
break;
}
}
if (!already) {
// no keys are queued for exploring this still-too-small bucket yet
Hash key = bucket.generateRandomKey();
_log.debug("Bucket " + i + " is NOT queued for exploration, and it only has " + bucket.getKeyCount() + " keys, so explore with \t" + key);
toExplore.add(key);
}
} else {
_log.debug("Bucket " + i + " already has enough keys (" + bucket.getKeyCount() + "), no need to explore further");
}
}
return toExplore;
}
}

View File

@ -1,9 +1,9 @@
package net.i2p.router.networkdb.kademlia;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
@ -16,9 +16,10 @@ import net.i2p.data.DataHelper;
import net.i2p.data.Hash;
import net.i2p.util.Log;
import net.i2p.util.RandomSource;
import net.i2p.I2PAppContext;
class KBucketImpl implements KBucket {
private final static Log _log = new Log(KBucketImpl.class);
private Log _log;
private Set _entries; // PeerInfo structures
private Hash _local;
private int _begin; // if any bits equal or higher to this bit (in big endian order),
@ -26,96 +27,99 @@ class KBucketImpl implements KBucket {
private BigInteger _lowerBounds; // lowest distance allowed from local
private BigInteger _upperBounds; // one higher than the highest distance allowed from local
private int _size; // integer value of the number of bits that can fit between lower and upper bounds
private I2PAppContext _context;
public KBucketImpl(Hash local) {
_entries = new HashSet();
_local = local;
public KBucketImpl(I2PAppContext context, Hash local) {
_context = context;
_log = context.logManager().getLog(KBucketImpl.class);
_entries = new HashSet();
_local = local;
}
public int getRangeBegin() { return _begin; }
public int getRangeEnd() { return _end; }
public void setRange(int lowOrderBitLimit, int highOrderBitLimit) {
_begin = lowOrderBitLimit;
_end = highOrderBitLimit;
if (_begin == 0)
_lowerBounds = BigInteger.ZERO;
else
_lowerBounds = BigInteger.ZERO.setBit(_begin);
_upperBounds = BigInteger.ZERO.setBit(_end);
BigInteger diff = _upperBounds.subtract(_lowerBounds);
_size = diff.bitLength();
StringBuffer buf = new StringBuffer(1024);
buf.append("Set range: ").append(lowOrderBitLimit).append(" through ").append(highOrderBitLimit).append('\n');
buf.append("Local key, lowest allowed key, and highest allowed key: \n");
Hash low = getRangeBeginKey();
Hash high = getRangeEndKey();
if ( (_local == null) || (_local.getData() == null) )
buf.append(toString(Hash.FAKE_HASH.getData())).append('\n');
else
buf.append(toString(_local.getData())).append('\n');
buf.append(toString(low.getData())).append('\n');
buf.append(toString(high.getData()));
//_log.debug(buf.toString());
public void setRange(int lowOrderBitLimit, int highOrderBitLimit) {
_begin = lowOrderBitLimit;
_end = highOrderBitLimit;
if (_begin == 0)
_lowerBounds = BigInteger.ZERO;
else
_lowerBounds = BigInteger.ZERO.setBit(_begin);
_upperBounds = BigInteger.ZERO.setBit(_end);
BigInteger diff = _upperBounds.subtract(_lowerBounds);
_size = diff.bitLength();
StringBuffer buf = new StringBuffer(1024);
buf.append("Set range: ").append(lowOrderBitLimit).append(" through ").append(highOrderBitLimit).append('\n');
buf.append("Local key, lowest allowed key, and highest allowed key: \n");
Hash low = getRangeBeginKey();
Hash high = getRangeEndKey();
if ( (_local == null) || (_local.getData() == null) )
buf.append(toString(Hash.FAKE_HASH.getData())).append('\n');
else
buf.append(toString(_local.getData())).append('\n');
buf.append(toString(low.getData())).append('\n');
buf.append(toString(high.getData()));
//_log.debug(buf.toString());
}
public int getKeyCount() {
synchronized (_entries) {
return _entries.size();
}
public int getKeyCount() {
synchronized (_entries) {
return _entries.size();
}
}
public Hash getLocal() { return _local; }
public void setLocal(Hash local) { _local = local; }
private byte[] distanceFromLocal(Hash key) {
return DataHelper.xor(key.getData(), _local.getData());
return DataHelper.xor(key.getData(), _local.getData());
}
public boolean shouldContain(Hash key) {
// woohah, incredibly excessive object creation! whee!
BigInteger kv = new BigInteger(1, distanceFromLocal(key));
int lowComp = kv.compareTo(_lowerBounds);
int highComp = kv.compareTo(_upperBounds);
//_log.debug("kv.compareTo(low) = " + lowComp + " kv.compareTo(high) " + highComp);
if ( (lowComp >= 0) && (highComp < 0) ) return true;
return false;
// woohah, incredibly excessive object creation! whee!
BigInteger kv = new BigInteger(1, distanceFromLocal(key));
int lowComp = kv.compareTo(_lowerBounds);
int highComp = kv.compareTo(_upperBounds);
//_log.debug("kv.compareTo(low) = " + lowComp + " kv.compareTo(high) " + highComp);
if ( (lowComp >= 0) && (highComp < 0) ) return true;
return false;
}
public Set getEntries() {
Set entries = new HashSet(64);
synchronized (_entries) {
entries.addAll(_entries);
}
return entries;
Set entries = new HashSet(64);
synchronized (_entries) {
entries.addAll(_entries);
}
return entries;
}
public Set getEntries(Set toIgnoreHashes) {
Set entries = new HashSet(64);
synchronized (_entries) {
entries.addAll(_entries);
entries.removeAll(toIgnoreHashes);
}
return entries;
Set entries = new HashSet(64);
synchronized (_entries) {
entries.addAll(_entries);
entries.removeAll(toIgnoreHashes);
}
return entries;
}
public void setEntries(Set entries) {
synchronized (_entries) {
_entries.clear();
_entries.addAll(entries);
}
synchronized (_entries) {
_entries.clear();
_entries.addAll(entries);
}
}
public int add(Hash peer) {
synchronized (_entries) {
_entries.add(peer);
return _entries.size();
}
synchronized (_entries) {
_entries.add(peer);
return _entries.size();
}
}
public boolean remove(Hash peer) {
synchronized (_entries) {
return _entries.remove(peer);
}
synchronized (_entries) {
return _entries.remove(peer);
}
}
/**
@ -123,164 +127,166 @@ class KBucketImpl implements KBucket {
*
*/
public Hash generateRandomKey() {
BigInteger variance = new BigInteger(_size-1, RandomSource.getInstance());
variance = variance.add(_lowerBounds);
//_log.debug("Random variance for " + _size + " bits: " + variance);
byte data[] = variance.toByteArray();
byte hash[] = new byte[Hash.HASH_LENGTH];
if (data.length <= Hash.HASH_LENGTH) {
System.arraycopy(data, 0, hash, hash.length - data.length, data.length);
} else {
System.arraycopy(data, data.length - hash.length, hash, 0, hash.length);
}
Hash key = new Hash(hash);
data = distanceFromLocal(key);
hash = new byte[Hash.HASH_LENGTH];
if (data.length <= Hash.HASH_LENGTH) {
System.arraycopy(data, 0, hash, hash.length - data.length, data.length);
} else {
System.arraycopy(data, data.length - hash.length, hash, 0, hash.length);
}
key = new Hash(hash);
return key;
BigInteger variance = new BigInteger(_size-1, _context.random());
variance = variance.add(_lowerBounds);
//_log.debug("Random variance for " + _size + " bits: " + variance);
byte data[] = variance.toByteArray();
byte hash[] = new byte[Hash.HASH_LENGTH];
if (data.length <= Hash.HASH_LENGTH) {
System.arraycopy(data, 0, hash, hash.length - data.length, data.length);
} else {
System.arraycopy(data, data.length - hash.length, hash, 0, hash.length);
}
Hash key = new Hash(hash);
data = distanceFromLocal(key);
hash = new byte[Hash.HASH_LENGTH];
if (data.length <= Hash.HASH_LENGTH) {
System.arraycopy(data, 0, hash, hash.length - data.length, data.length);
} else {
System.arraycopy(data, data.length - hash.length, hash, 0, hash.length);
}
key = new Hash(hash);
return key;
}
public Hash getRangeBeginKey() {
BigInteger lowerBounds = _lowerBounds;
if ( (_local != null) && (_local.getData() != null) ) {
lowerBounds = lowerBounds.xor(new BigInteger(1, _local.getData()));
}
byte data[] = lowerBounds.toByteArray();
byte hash[] = new byte[Hash.HASH_LENGTH];
if (data.length <= Hash.HASH_LENGTH) {
System.arraycopy(data, 0, hash, hash.length - data.length, data.length);
} else {
System.arraycopy(data, data.length - hash.length, hash, 0, hash.length);
}
Hash key = new Hash(hash);
return key;
BigInteger lowerBounds = _lowerBounds;
if ( (_local != null) && (_local.getData() != null) ) {
lowerBounds = lowerBounds.xor(new BigInteger(1, _local.getData()));
}
byte data[] = lowerBounds.toByteArray();
byte hash[] = new byte[Hash.HASH_LENGTH];
if (data.length <= Hash.HASH_LENGTH) {
System.arraycopy(data, 0, hash, hash.length - data.length, data.length);
} else {
System.arraycopy(data, data.length - hash.length, hash, 0, hash.length);
}
Hash key = new Hash(hash);
return key;
}
public Hash getRangeEndKey() {
BigInteger upperBounds = _upperBounds;
if ( (_local != null) && (_local.getData() != null) ) {
upperBounds = upperBounds.xor(new BigInteger(1, _local.getData()));
}
byte data[] = upperBounds.toByteArray();
byte hash[] = new byte[Hash.HASH_LENGTH];
if (data.length <= Hash.HASH_LENGTH) {
System.arraycopy(data, 0, hash, hash.length - data.length, data.length);
} else {
System.arraycopy(data, data.length - hash.length, hash, 0, hash.length);
}
Hash key = new Hash(hash);
return key;
BigInteger upperBounds = _upperBounds;
if ( (_local != null) && (_local.getData() != null) ) {
upperBounds = upperBounds.xor(new BigInteger(1, _local.getData()));
}
byte data[] = upperBounds.toByteArray();
byte hash[] = new byte[Hash.HASH_LENGTH];
if (data.length <= Hash.HASH_LENGTH) {
System.arraycopy(data, 0, hash, hash.length - data.length, data.length);
} else {
System.arraycopy(data, data.length - hash.length, hash, 0, hash.length);
}
Hash key = new Hash(hash);
return key;
}
public String toString() {
StringBuffer buf = new StringBuffer(1024);
buf.append("KBucketImpl: ");
synchronized (_entries) {
buf.append(_entries.toString()).append("\n");
}
buf.append("Low bit: ").append(_begin).append(" high bit: ").append(_end).append('\n');
buf.append("Local key: \n");
if ( (_local != null) && (_local.getData() != null) )
buf.append(toString(_local.getData())).append('\n');
else
buf.append("[undefined]\n");
buf.append("Low and high keys:\n");
buf.append(toString(getRangeBeginKey().getData())).append('\n');
buf.append(toString(getRangeEndKey().getData())).append('\n');
buf.append("Low and high deltas:\n");
buf.append(_lowerBounds.toString(2)).append('\n');
buf.append(_upperBounds.toString(2)).append('\n');
return buf.toString();
StringBuffer buf = new StringBuffer(1024);
buf.append("KBucketImpl: ");
synchronized (_entries) {
buf.append(_entries.toString()).append("\n");
}
buf.append("Low bit: ").append(_begin).append(" high bit: ").append(_end).append('\n');
buf.append("Local key: \n");
if ( (_local != null) && (_local.getData() != null) )
buf.append(toString(_local.getData())).append('\n');
else
buf.append("[undefined]\n");
buf.append("Low and high keys:\n");
buf.append(toString(getRangeBeginKey().getData())).append('\n');
buf.append(toString(getRangeEndKey().getData())).append('\n');
buf.append("Low and high deltas:\n");
buf.append(_lowerBounds.toString(2)).append('\n');
buf.append(_upperBounds.toString(2)).append('\n');
return buf.toString();
}
/**
* Test harness to make sure its assigning keys to the right buckets
*
*/
*/
public static void main(String args[]) {
testRand2();
testRand();
try { Thread.sleep(10000); } catch (InterruptedException ie) {}
testRand2();
testRand();
try { Thread.sleep(10000); } catch (InterruptedException ie) {}
}
private static void testRand() {
StringBuffer buf = new StringBuffer(2048);
int low = 1;
int high = 3;
KBucketImpl bucket = new KBucketImpl(Hash.FAKE_HASH);
bucket.setRange(low, high);
Hash lowerBoundKey = bucket.getRangeBeginKey();
Hash upperBoundKey = bucket.getRangeEndKey();
for (int i = 0; i < 100; i++) {
Hash rnd = bucket.generateRandomKey();
//buf.append(toString(rnd.getData())).append('\n');
boolean ok = bucket.shouldContain(rnd);
if (!ok) {
byte diff[] = DataHelper.xor(rnd.getData(), bucket.getLocal().getData());
BigInteger dv = new BigInteger(1, diff);
_log.error("WTF! bucket doesn't want: \n" + toString(rnd.getData()) + "\nDelta: \n" + toString(diff) + "\nDelta val: \n" + dv.toString(2) + "\nBucket: \n"+bucket, new Exception("WTF"));
try { Thread.sleep(1000); } catch (Exception e) {}
System.exit(0);
} else {
//_log.debug("Ok, bucket wants: \n" + toString(rnd.getData()));
}
//_log.info("Low/High:\n" + toString(lowBounds.toByteArray()) + "\n" + toString(highBounds.toByteArray()));
}
_log.info("Passed 100 random key generations against the null hash");
StringBuffer buf = new StringBuffer(2048);
int low = 1;
int high = 3;
Log log = I2PAppContext.getGlobalContext().logManager().getLog(KBucketImpl.class);
KBucketImpl bucket = new KBucketImpl(I2PAppContext.getGlobalContext(), Hash.FAKE_HASH);
bucket.setRange(low, high);
Hash lowerBoundKey = bucket.getRangeBeginKey();
Hash upperBoundKey = bucket.getRangeEndKey();
for (int i = 0; i < 100; i++) {
Hash rnd = bucket.generateRandomKey();
//buf.append(toString(rnd.getData())).append('\n');
boolean ok = bucket.shouldContain(rnd);
if (!ok) {
byte diff[] = DataHelper.xor(rnd.getData(), bucket.getLocal().getData());
BigInteger dv = new BigInteger(1, diff);
log.error("WTF! bucket doesn't want: \n" + toString(rnd.getData()) + "\nDelta: \n" + toString(diff) + "\nDelta val: \n" + dv.toString(2) + "\nBucket: \n"+bucket, new Exception("WTF"));
try { Thread.sleep(1000); } catch (Exception e) {}
System.exit(0);
} else {
//_log.debug("Ok, bucket wants: \n" + toString(rnd.getData()));
}
//_log.info("Low/High:\n" + toString(lowBounds.toByteArray()) + "\n" + toString(highBounds.toByteArray()));
}
log.info("Passed 100 random key generations against the null hash");
}
private static void testRand2() {
StringBuffer buf = new StringBuffer(1024*1024*16);
int low = 1;
int high = 200;
byte hash[] = new byte[Hash.HASH_LENGTH];
RandomSource.getInstance().nextBytes(hash);
KBucketImpl bucket = new KBucketImpl(new Hash(hash));
bucket.setRange(low, high);
Hash lowerBoundKey = bucket.getRangeBeginKey();
Hash upperBoundKey = bucket.getRangeEndKey();
for (int i = 0; i < 1000; i++) {
Hash rnd = bucket.generateRandomKey();
buf.append(toString(rnd.getData())).append('\n');
boolean ok = bucket.shouldContain(rnd);
if (!ok) {
byte diff[] = DataHelper.xor(rnd.getData(), bucket.getLocal().getData());
BigInteger dv = new BigInteger(1, diff);
_log.error("WTF! bucket doesn't want: \n" + toString(rnd.getData()) + "\nDelta: \n" + toString(diff) + "\nDelta val: \n" + dv.toString(2) + "\nBucket: \n"+bucket, new Exception("WTF"));
try { Thread.sleep(1000); } catch (Exception e) {}
System.exit(0);
} else {
//_log.debug("Ok, bucket wants: \n" + toString(rnd.getData()));
}
}
_log.info("Passed 1000 random key generations against a random hash\n" + buf.toString());
Log log = I2PAppContext.getGlobalContext().logManager().getLog(KBucketImpl.class);
StringBuffer buf = new StringBuffer(1024*1024*16);
int low = 1;
int high = 200;
byte hash[] = new byte[Hash.HASH_LENGTH];
RandomSource.getInstance().nextBytes(hash);
KBucketImpl bucket = new KBucketImpl(I2PAppContext.getGlobalContext(), new Hash(hash));
bucket.setRange(low, high);
Hash lowerBoundKey = bucket.getRangeBeginKey();
Hash upperBoundKey = bucket.getRangeEndKey();
for (int i = 0; i < 1000; i++) {
Hash rnd = bucket.generateRandomKey();
buf.append(toString(rnd.getData())).append('\n');
boolean ok = bucket.shouldContain(rnd);
if (!ok) {
byte diff[] = DataHelper.xor(rnd.getData(), bucket.getLocal().getData());
BigInteger dv = new BigInteger(1, diff);
log.error("WTF! bucket doesn't want: \n" + toString(rnd.getData()) + "\nDelta: \n" + toString(diff) + "\nDelta val: \n" + dv.toString(2) + "\nBucket: \n"+bucket, new Exception("WTF"));
try { Thread.sleep(1000); } catch (Exception e) {}
System.exit(0);
} else {
//_log.debug("Ok, bucket wants: \n" + toString(rnd.getData()));
}
}
log.info("Passed 1000 random key generations against a random hash\n" + buf.toString());
}
private final static String toString(byte b[]) {
StringBuffer buf = new StringBuffer(b.length);
for (int i = 0; i < b.length; i++) {
buf.append(toString(b[i]));
buf.append(" ");
}
return buf.toString();
StringBuffer buf = new StringBuffer(b.length);
for (int i = 0; i < b.length; i++) {
buf.append(toString(b[i]));
buf.append(" ");
}
return buf.toString();
}
private final static String toString(byte b) {
StringBuffer buf = new StringBuffer(8);
for (int i = 7; i >= 0; i--) {
boolean bb = (0 != (b & (1<<i)));
if (bb)
buf.append("1");
else
buf.append("0");
}
return buf.toString();
StringBuffer buf = new StringBuffer(8);
for (int i = 7; i >= 0; i--) {
boolean bb = (0 != (b & (1<<i)));
if (bb)
buf.append("1");
else
buf.append("0");
}
return buf.toString();
}
}

View File

@ -1,9 +1,9 @@
package net.i2p.router.networkdb.kademlia;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
@ -15,6 +15,7 @@ import java.util.Set;
import net.i2p.data.DataHelper;
import net.i2p.data.Hash;
import net.i2p.util.Log;
import net.i2p.I2PAppContext;
/**
* In memory storage of buckets sorted by the XOR metric from the local router's
@ -23,7 +24,8 @@ import net.i2p.util.Log;
*
*/
class KBucketSet {
private final static Log _log = new Log(KBucketSet.class);
private Log _log;
private I2PAppContext _context;
private Hash _us;
private KBucket _buckets[];
@ -33,117 +35,119 @@ class KBucketSet {
private final static BigInteger BASE_I = new BigInteger(""+(1<<BASE));
public final static int BUCKET_SIZE = 500; // # values at which we start periodic trimming (500 ~= 250Kb)
public KBucketSet(Hash us) {
_us = us;
createBuckets();
public KBucketSet(I2PAppContext context, Hash us) {
_us = us;
_context = context;
_log = context.logManager().getLog(KBucketSet.class);
createBuckets();
}
/**
* Return true if the peer is new to the bucket it goes in, or false if it was
* Return true if the peer is new to the bucket it goes in, or false if it was
* already in it
*/
public boolean add(Hash peer) {
int bucket = pickBucket(peer);
if (bucket >= 0) {
int oldSize = _buckets[bucket].getKeyCount();
int numInBucket = _buckets[bucket].add(peer);
if (numInBucket > BUCKET_SIZE) {
// perhaps queue up coallesce job? naaahh.. lets let 'er grow for now
}
_log.debug("Peer " + peer + " added to bucket " + bucket);
return oldSize != numInBucket;
} else {
throw new IllegalArgumentException("Unable to pick a bucket. wtf!");
}
int bucket = pickBucket(peer);
if (bucket >= 0) {
int oldSize = _buckets[bucket].getKeyCount();
int numInBucket = _buckets[bucket].add(peer);
if (numInBucket > BUCKET_SIZE) {
// perhaps queue up coallesce job? naaahh.. lets let 'er grow for now
}
_log.debug("Peer " + peer + " added to bucket " + bucket);
return oldSize != numInBucket;
} else {
throw new IllegalArgumentException("Unable to pick a bucket. wtf!");
}
}
public int size() {
int size = 0;
for (int i = 0; i < _buckets.length; i++)
size += _buckets[i].getKeyCount();
return size;
public int size() {
int size = 0;
for (int i = 0; i < _buckets.length; i++)
size += _buckets[i].getKeyCount();
return size;
}
public boolean remove(Hash entry) {
int bucket = pickBucket(entry);
KBucket kbucket = getBucket(bucket);
boolean removed = kbucket.remove(entry);
return removed;
int bucket = pickBucket(entry);
KBucket kbucket = getBucket(bucket);
boolean removed = kbucket.remove(entry);
return removed;
}
public Set getAll() { return getAll(new HashSet()); }
public Set getAll(Set toIgnore) {
HashSet all = new HashSet(1024);
for (int i = 0; i < _buckets.length; i++) {
all.addAll(_buckets[i].getEntries(toIgnore));
}
return all;
HashSet all = new HashSet(1024);
for (int i = 0; i < _buckets.length; i++) {
all.addAll(_buckets[i].getEntries(toIgnore));
}
return all;
}
public int pickBucket(Hash key) {
for (int i = 0; i < NUM_BUCKETS; i++) {
if (_buckets[i].shouldContain(key))
return i;
}
_log.error("Key does not fit in any bucket?! WTF!\nKey : [" + toString(key.getData()) + "]\nDelta: ["+ toString(DataHelper.xor(_us.getData(), key.getData())) + "]\nUs : [" + toString(_us.getData()) + "]", new Exception("WTF"));
displayBuckets();
return -1;
for (int i = 0; i < NUM_BUCKETS; i++) {
if (_buckets[i].shouldContain(key))
return i;
}
_log.error("Key does not fit in any bucket?! WTF!\nKey : [" + toString(key.getData()) + "]\nDelta: ["+ toString(DataHelper.xor(_us.getData(), key.getData())) + "]\nUs : [" + toString(_us.getData()) + "]", new Exception("WTF"));
displayBuckets();
return -1;
}
public KBucket getBucket(int bucket) { return _buckets[bucket]; }
protected void createBuckets() {
_buckets = new KBucket[NUM_BUCKETS];
for (int i = 0; i < NUM_BUCKETS-1; i++) {
_buckets[i] = createBucket(i*BASE, (i+1)*BASE);
}
_buckets[NUM_BUCKETS-1] = createBucket(BASE*(NUM_BUCKETS-1), BASE*(NUM_BUCKETS) + 1);
_buckets = new KBucket[NUM_BUCKETS];
for (int i = 0; i < NUM_BUCKETS-1; i++) {
_buckets[i] = createBucket(i*BASE, (i+1)*BASE);
}
_buckets[NUM_BUCKETS-1] = createBucket(BASE*(NUM_BUCKETS-1), BASE*(NUM_BUCKETS) + 1);
}
protected KBucket createBucket(int start, int end) {
KBucket bucket = new KBucketImpl(_us);
bucket.setRange(start, end);
_log.debug("Creating a bucket from " + start + " to " + (end));
return bucket;
KBucket bucket = new KBucketImpl(_context, _us);
bucket.setRange(start, end);
_log.debug("Creating a bucket from " + start + " to " + (end));
return bucket;
}
public void displayBuckets() {
_log.info(toString());
_log.info(toString());
}
public String toString() {
BigInteger us = new BigInteger(1, _us.getData());
StringBuffer buf = new StringBuffer(1024);
buf.append("Bucket set rooted on: ").append(us.toString()).append(" (aka ").append(us.toString(2)).append("): \n");
for (int i = 0; i < NUM_BUCKETS; i++) {
buf.append("* Bucket ").append(i).append("/").append(NUM_BUCKETS-1).append(": )\n");
buf.append("Start: ").append("2^").append(_buckets[i].getRangeBegin()).append(")\n");
buf.append("End: ").append("2^").append(_buckets[i].getRangeEnd()).append(")\n");
buf.append("Contents:").append(_buckets[i].toString()).append("\n");
}
return buf.toString();
BigInteger us = new BigInteger(1, _us.getData());
StringBuffer buf = new StringBuffer(1024);
buf.append("Bucket set rooted on: ").append(us.toString()).append(" (aka ").append(us.toString(2)).append("): \n");
for (int i = 0; i < NUM_BUCKETS; i++) {
buf.append("* Bucket ").append(i).append("/").append(NUM_BUCKETS-1).append(": )\n");
buf.append("Start: ").append("2^").append(_buckets[i].getRangeBegin()).append(")\n");
buf.append("End: ").append("2^").append(_buckets[i].getRangeEnd()).append(")\n");
buf.append("Contents:").append(_buckets[i].toString()).append("\n");
}
return buf.toString();
}
final static String toString(byte b[]) {
byte val[] = new byte[Hash.HASH_LENGTH];
if (b.length < 32)
System.arraycopy(b, 0, val, Hash.HASH_LENGTH-b.length-1, b.length);
else
System.arraycopy(b, Hash.HASH_LENGTH-b.length, val, 0, val.length);
StringBuffer buf = new StringBuffer(KEYSIZE_BITS);
for (int i = 0; i < val.length; i++) {
for (int j = 7; j >= 0; j--) {
boolean bb = (0 != (val[i] & (1<<j)));
if (bb)
buf.append("1");
else
buf.append("0");
}
buf.append(" ");
}
// buf.append(Integer.toBinaryString(val[i]));
return buf.toString();
byte val[] = new byte[Hash.HASH_LENGTH];
if (b.length < 32)
System.arraycopy(b, 0, val, Hash.HASH_LENGTH-b.length-1, b.length);
else
System.arraycopy(b, Hash.HASH_LENGTH-b.length, val, 0, val.length);
StringBuffer buf = new StringBuffer(KEYSIZE_BITS);
for (int i = 0; i < val.length; i++) {
for (int j = 7; j >= 0; j--) {
boolean bb = (0 != (val[i] & (1<<j)));
if (bb)
buf.append("1");
else
buf.append("0");
}
buf.append(" ");
}
// buf.append(Integer.toBinaryString(val[i]));
return buf.toString();
}
}

View File

@ -23,11 +23,16 @@ import net.i2p.data.Hash;
import net.i2p.router.ProfileManager;
import net.i2p.router.Router;
import net.i2p.util.Log;
import net.i2p.router.RouterContext;
class PeerSelector {
private final static Log _log = new Log(PeerSelector.class);
private static final PeerSelector _instance = new PeerSelector();
public static final PeerSelector getInstance() { return _instance; }
private Log _log;
private RouterContext _context;
public PeerSelector(RouterContext ctx) {
_context = ctx;
_log = _context.logManager().getLog(PeerSelector.class);
}
/**
* Search through the kbucket set to find the most reliable peers close to the
@ -36,9 +41,9 @@ class PeerSelector {
* @return ordered list of Hash objects
*/
public List selectMostReliablePeers(Hash key, int numClosest, Set alreadyChecked, KBucketSet kbuckets) {
// get the peers closest to the key
List nearest = selectNearestExplicit(key, numClosest, alreadyChecked, kbuckets);
return nearest;
// get the peers closest to the key
List nearest = selectNearestExplicit(key, numClosest, alreadyChecked, kbuckets);
return nearest;
}
/**
@ -49,26 +54,29 @@ class PeerSelector {
* @return List of Hash for the peers selected, ordered by bucket (but intra bucket order is not defined)
*/
public List selectNearestExplicit(Hash key, int maxNumRouters, Set peersToIgnore, KBucketSet kbuckets) {
if (peersToIgnore == null)
peersToIgnore = new HashSet(1);
peersToIgnore.add(Router.getInstance().getRouterInfo().getIdentity().getHash());
Set allHashes = kbuckets.getAll(peersToIgnore);
removeFailingPeers(allHashes);
Map diffMap = new HashMap(allHashes.size());
for (Iterator iter = allHashes.iterator(); iter.hasNext(); ) {
Hash cur = (Hash)iter.next();
BigInteger diff = getDistance(key, cur);
diffMap.put(diff, cur);
}
// n*log(n)
Map sortedMap = new TreeMap(diffMap);
List peerHashes = new ArrayList(maxNumRouters);
for (Iterator iter = sortedMap.values().iterator(); iter.hasNext(); ) {
if (peerHashes.size() >= maxNumRouters) break;
peerHashes.add(iter.next());
}
_log.debug("Searching for " + maxNumRouters + " peers close to " + key + ": " + peerHashes + " (not including " + peersToIgnore + ") [allHashes.size = " + allHashes.size() + "]");
return peerHashes;
if (peersToIgnore == null)
peersToIgnore = new HashSet(1);
peersToIgnore.add(_context.router().getRouterInfo().getIdentity().getHash());
Set allHashes = kbuckets.getAll(peersToIgnore);
removeFailingPeers(allHashes);
Map diffMap = new HashMap(allHashes.size());
for (Iterator iter = allHashes.iterator(); iter.hasNext(); ) {
Hash cur = (Hash)iter.next();
BigInteger diff = getDistance(key, cur);
diffMap.put(diff, cur);
}
// n*log(n)
Map sortedMap = new TreeMap(diffMap);
List peerHashes = new ArrayList(maxNumRouters);
for (Iterator iter = sortedMap.values().iterator(); iter.hasNext(); ) {
if (peerHashes.size() >= maxNumRouters) break;
peerHashes.add(iter.next());
}
if (_log.shouldLog(Log.DEBUG))
_log.debug("Searching for " + maxNumRouters + " peers close to " + key + ": "
+ peerHashes + " (not including " + peersToIgnore + ") [allHashes.size = "
+ allHashes.size() + "]");
return peerHashes;
}
/**
@ -76,22 +84,22 @@ class PeerSelector {
*
*/
private void removeFailingPeers(Set peerHashes) {
List failing = new ArrayList(16);
for (Iterator iter = peerHashes.iterator(); iter.hasNext(); ) {
Hash cur = (Hash)iter.next();
if (ProfileManager.getInstance().isFailing(cur)) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Peer " + cur.toBase64() + " is failing, don't include them in the peer selection");
failing.add(cur);
}
}
peerHashes.removeAll(failing);
List failing = new ArrayList(16);
for (Iterator iter = peerHashes.iterator(); iter.hasNext(); ) {
Hash cur = (Hash)iter.next();
if (_context.profileOrganizer().isFailing(cur)) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Peer " + cur.toBase64() + " is failing, don't include them in the peer selection");
failing.add(cur);
}
}
peerHashes.removeAll(failing);
}
protected BigInteger getDistance(Hash targetKey, Hash routerInQuestion) {
// plain XOR of the key and router
byte diff[] = DataHelper.xor(routerInQuestion.getData(), targetKey.getData());
return new BigInteger(1, diff);
// plain XOR of the key and router
byte diff[] = DataHelper.xor(routerInQuestion.getData(), targetKey.getData());
return new BigInteger(1, diff);
}
/**
@ -102,10 +110,10 @@ class PeerSelector {
* @return List of Hash for the peers selected, ordered by bucket (but intra bucket order is not defined)
*/
public List selectNearest(Hash key, int maxNumRouters, Set peersToIgnore, KBucketSet kbuckets) {
// sure, this may not be exactly correct per kademlia (peers on the border of a kbucket in strict kademlia
// would behave differently) but I can see no reason to keep around an /additional/ more complicated algorithm.
// later if/when selectNearestExplicit gets costly, we may revisit this (since kbuckets let us cache the distance()
// into a simple bucket selection algo + random select rather than an n*log(n) op)
return selectNearestExplicit(key, maxNumRouters, peersToIgnore, kbuckets);
// sure, this may not be exactly correct per kademlia (peers on the border of a kbucket in strict kademlia
// would behave differently) but I can see no reason to keep around an /additional/ more complicated algorithm.
// later if/when selectNearestExplicit gets costly, we may revisit this (since kbuckets let us cache the distance()
// into a simple bucket selection algo + random select rather than an n*log(n) op)
return selectNearestExplicit(key, maxNumRouters, peersToIgnore, kbuckets);
}
}

View File

@ -1,9 +1,9 @@
package net.i2p.router.networkdb.kademlia;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
@ -24,226 +24,231 @@ import net.i2p.router.JobImpl;
import net.i2p.router.JobQueue;
import net.i2p.router.Router;
import net.i2p.util.Log;
import net.i2p.router.RouterContext;
/**
* Write out keys to disk when we get them and periodically read ones we don't know
* about into memory, with newly read routers are also added to the routing table.
*
*
*/
class PersistentDataStore extends TransientDataStore {
private final static Log _log = new Log(PersistentDataStore.class);
private Log _log;
private String _dbDir;
private KademliaNetworkDatabaseFacade _facade;
private final static int READ_DELAY = 60*1000;
public PersistentDataStore(String dbDir, KademliaNetworkDatabaseFacade facade) {
super();
_dbDir = dbDir;
_facade = facade;
JobQueue.getInstance().addJob(new ReadJob());
public PersistentDataStore(RouterContext ctx, String dbDir, KademliaNetworkDatabaseFacade facade) {
super(ctx);
_log = ctx.logManager().getLog(PersistentDataStore.class);
_dbDir = dbDir;
_facade = facade;
_context.jobQueue().addJob(new ReadJob());
}
public DataStructure remove(Hash key) {
JobQueue.getInstance().addJob(new RemoveJob(key));
return super.remove(key);
_context.jobQueue().addJob(new RemoveJob(key));
return super.remove(key);
}
public void put(Hash key, DataStructure data) {
if ( (data == null) || (key == null) ) return;
super.put(key, data);
JobQueue.getInstance().addJob(new WriteJob(key, data));
if ( (data == null) || (key == null) ) return;
super.put(key, data);
_context.jobQueue().addJob(new WriteJob(key, data));
}
private void accept(LeaseSet ls) {
super.put(ls.getDestination().calculateHash(), ls);
super.put(ls.getDestination().calculateHash(), ls);
}
private void accept(RouterInfo ri) {
Hash key = ri.getIdentity().getHash();
super.put(key, ri);
// add recently loaded routers to the routing table
_facade.getKBuckets().add(key);
Hash key = ri.getIdentity().getHash();
super.put(key, ri);
// add recently loaded routers to the routing table
_facade.getKBuckets().add(key);
}
private class RemoveJob extends JobImpl {
private Hash _key;
public RemoveJob(Hash key) {
_key = key;
}
public String getName() { return "Remove Key"; }
public void runJob() {
_log.info("Removing key " + _key, getAddedBy());
try {
File dbDir = getDbDir();
removeFile(_key, dbDir);
} catch (IOException ioe) {
_log.error("Error removing key " + _key, ioe);
}
}
private Hash _key;
public RemoveJob(Hash key) {
super(PersistentDataStore.this._context);
_key = key;
}
public String getName() { return "Remove Key"; }
public void runJob() {
_log.info("Removing key " + _key, getAddedBy());
try {
File dbDir = getDbDir();
removeFile(_key, dbDir);
} catch (IOException ioe) {
_log.error("Error removing key " + _key, ioe);
}
}
}
private class WriteJob extends JobImpl {
private Hash _key;
private DataStructure _data;
public WriteJob(Hash key, DataStructure data) {
super();
_key = key;
_data = data;
}
public String getName() { return "DB Writer Job"; }
public void runJob() {
_log.info("Writing key " + _key);
FileOutputStream fos = null;
try {
String filename = null;
File dbDir = getDbDir();
if (_data instanceof LeaseSet)
filename = getLeaseSetName(_key);
else if (_data instanceof RouterInfo)
filename = getRouterInfoName(_key);
else
throw new IOException("We don't know how to write objects of type " + _data.getClass().getName());
fos = new FileOutputStream(new File(dbDir, filename));
try {
_data.writeBytes(fos);
} catch (DataFormatException dfe) {
_log.error("Error writing out malformed object as " + _key + ": " + _data, dfe);
File f = new File(dbDir, filename);
f.delete();
}
} catch (IOException ioe) {
_log.error("Error writing out the object", ioe);
} finally {
if (fos != null) try { fos.close(); } catch (IOException ioe) {}
}
}
private Hash _key;
private DataStructure _data;
public WriteJob(Hash key, DataStructure data) {
super(PersistentDataStore.this._context);
_key = key;
_data = data;
}
public String getName() { return "DB Writer Job"; }
public void runJob() {
_log.info("Writing key " + _key);
FileOutputStream fos = null;
try {
String filename = null;
File dbDir = getDbDir();
if (_data instanceof LeaseSet)
filename = getLeaseSetName(_key);
else if (_data instanceof RouterInfo)
filename = getRouterInfoName(_key);
else
throw new IOException("We don't know how to write objects of type " + _data.getClass().getName());
fos = new FileOutputStream(new File(dbDir, filename));
try {
_data.writeBytes(fos);
} catch (DataFormatException dfe) {
_log.error("Error writing out malformed object as " + _key + ": " + _data, dfe);
File f = new File(dbDir, filename);
f.delete();
}
} catch (IOException ioe) {
_log.error("Error writing out the object", ioe);
} finally {
if (fos != null) try { fos.close(); } catch (IOException ioe) {}
}
}
}
private class ReadJob extends JobImpl {
public ReadJob() {
super();
}
public String getName() { return "DB Read Job"; }
public void runJob() {
_log.info("Rereading new files");
readFiles();
requeue(READ_DELAY);
}
private void readFiles() {
try {
File dbDir = getDbDir();
File leaseSetFiles[] = dbDir.listFiles(LeaseSetFilter.getInstance());
if (leaseSetFiles != null) {
for (int i = 0; i < leaseSetFiles.length; i++) {
Hash key = getLeaseSetHash(leaseSetFiles[i].getName());
if ( (key != null) && (!isKnown(key)) )
JobQueue.getInstance().addJob(new ReadLeaseJob(leaseSetFiles[i]));
}
}
File routerInfoFiles[] = dbDir.listFiles(RouterInfoFilter.getInstance());
if (routerInfoFiles != null) {
for (int i = 0; i < routerInfoFiles.length; i++) {
Hash key = getRouterInfoHash(routerInfoFiles[i].getName());
if ( (key != null) && (!isKnown(key)) )
JobQueue.getInstance().addJob(new ReadRouterJob(routerInfoFiles[i]));
}
}
} catch (IOException ioe) {
_log.error("Error reading files in the db dir", ioe);
}
}
public ReadJob() {
super(PersistentDataStore.this._context);
}
public String getName() { return "DB Read Job"; }
public void runJob() {
_log.info("Rereading new files");
readFiles();
requeue(READ_DELAY);
}
private void readFiles() {
try {
File dbDir = getDbDir();
File leaseSetFiles[] = dbDir.listFiles(LeaseSetFilter.getInstance());
if (leaseSetFiles != null) {
for (int i = 0; i < leaseSetFiles.length; i++) {
Hash key = getLeaseSetHash(leaseSetFiles[i].getName());
if ( (key != null) && (!isKnown(key)) )
PersistentDataStore.this._context.jobQueue().addJob(new ReadLeaseJob(leaseSetFiles[i]));
}
}
File routerInfoFiles[] = dbDir.listFiles(RouterInfoFilter.getInstance());
if (routerInfoFiles != null) {
for (int i = 0; i < routerInfoFiles.length; i++) {
Hash key = getRouterInfoHash(routerInfoFiles[i].getName());
if ( (key != null) && (!isKnown(key)) )
PersistentDataStore.this._context.jobQueue().addJob(new ReadRouterJob(routerInfoFiles[i]));
}
}
} catch (IOException ioe) {
_log.error("Error reading files in the db dir", ioe);
}
}
}
private class ReadLeaseJob extends JobImpl {
private File _leaseFile;
public ReadLeaseJob(File leaseFile) {
_leaseFile = leaseFile;
}
public String getName() { return "Read LeaseSet"; }
public void runJob() {
try {
FileInputStream fis = null;
boolean corrupt = false;
try {
fis = new FileInputStream(_leaseFile);
LeaseSet ls = new LeaseSet();
ls.readBytes(fis);
if (ls.isCurrent(Router.CLOCK_FUDGE_FACTOR)) {
_log.info("Reading in new LeaseSet: " + ls.getDestination().calculateHash());
accept(ls);
} else {
_log.warn("Expired LeaseSet found for " + ls.getDestination().calculateHash() + ": Deleting");
corrupt = true;
}
} catch (DataFormatException dfe) {
_log.warn("Error reading the leaseSet from " + _leaseFile.getAbsolutePath(), dfe);
corrupt = true;
} catch (FileNotFoundException fnfe) {
_log.debug("Deleted prior to read.. a race during expiration / load");
corrupt = false;
} finally {
if (fis != null) try { fis.close(); } catch (IOException ioe) {}
}
if (corrupt) _leaseFile.delete();
} catch (IOException ioe) {
_log.warn("Error reading the leaseSet from " + _leaseFile.getAbsolutePath(), ioe);
}
}
private File _leaseFile;
public ReadLeaseJob(File leaseFile) {
super(PersistentDataStore.this._context);
_leaseFile = leaseFile;
}
public String getName() { return "Read LeaseSet"; }
public void runJob() {
try {
FileInputStream fis = null;
boolean corrupt = false;
try {
fis = new FileInputStream(_leaseFile);
LeaseSet ls = new LeaseSet();
ls.readBytes(fis);
if (ls.isCurrent(Router.CLOCK_FUDGE_FACTOR)) {
_log.info("Reading in new LeaseSet: " + ls.getDestination().calculateHash());
accept(ls);
} else {
_log.warn("Expired LeaseSet found for " + ls.getDestination().calculateHash() + ": Deleting");
corrupt = true;
}
} catch (DataFormatException dfe) {
_log.warn("Error reading the leaseSet from " + _leaseFile.getAbsolutePath(), dfe);
corrupt = true;
} catch (FileNotFoundException fnfe) {
_log.debug("Deleted prior to read.. a race during expiration / load");
corrupt = false;
} finally {
if (fis != null) try { fis.close(); } catch (IOException ioe) {}
}
if (corrupt) _leaseFile.delete();
} catch (IOException ioe) {
_log.warn("Error reading the leaseSet from " + _leaseFile.getAbsolutePath(), ioe);
}
}
}
private class ReadRouterJob extends JobImpl {
private File _routerFile;
public ReadRouterJob(File routerFile) {
_routerFile = routerFile;
}
public String getName() { return "Read RouterInfo"; }
public void runJob() {
try {
FileInputStream fis = null;
boolean corrupt = false;
try {
fis = new FileInputStream(_routerFile);
RouterInfo ri = new RouterInfo();
ri.readBytes(fis);
if (ri.isValid()) {
_log.info("Reading in new RouterInfo: " + ri.getIdentity().getHash());
accept(ri);
} else {
_log.warn("Invalid routerInfo found for " + ri.getIdentity().getHash() + ": " + ri);
corrupt = true;
}
} catch (DataFormatException dfe) {
_log.warn("Error reading the routerInfo from " + _routerFile.getAbsolutePath(), dfe);
corrupt = true;
} finally {
if (fis != null) try { fis.close(); } catch (IOException ioe) {}
}
if (corrupt) _routerFile.delete();
} catch (IOException ioe) {
_log.warn("Error reading the RouterInfo from " + _routerFile.getAbsolutePath(), ioe);
}
}
private File _routerFile;
public ReadRouterJob(File routerFile) {
super(PersistentDataStore.this._context);
_routerFile = routerFile;
}
public String getName() { return "Read RouterInfo"; }
public void runJob() {
try {
FileInputStream fis = null;
boolean corrupt = false;
try {
fis = new FileInputStream(_routerFile);
RouterInfo ri = new RouterInfo();
ri.readBytes(fis);
if (ri.isValid()) {
_log.info("Reading in new RouterInfo: " + ri.getIdentity().getHash());
accept(ri);
} else {
_log.warn("Invalid routerInfo found for " + ri.getIdentity().getHash() + ": " + ri);
corrupt = true;
}
} catch (DataFormatException dfe) {
_log.warn("Error reading the routerInfo from " + _routerFile.getAbsolutePath(), dfe);
corrupt = true;
} finally {
if (fis != null) try { fis.close(); } catch (IOException ioe) {}
}
if (corrupt) _routerFile.delete();
} catch (IOException ioe) {
_log.warn("Error reading the RouterInfo from " + _routerFile.getAbsolutePath(), ioe);
}
}
}
private File getDbDir() throws IOException {
File f = new File(_dbDir);
if (!f.exists()) {
boolean created = f.mkdirs();
if (!created)
throw new IOException("Unable to create the DB directory [" + f.getAbsolutePath() + "]");
}
if (!f.isDirectory())
throw new IOException("DB directory [" + f.getAbsolutePath() + "] is not a directory!");
if (!f.canRead())
throw new IOException("DB directory [" + f.getAbsolutePath() + "] is not readable!");
if (!f.canWrite())
throw new IOException("DB directory [" + f.getAbsolutePath() + "] is not writable!");
return f;
File f = new File(_dbDir);
if (!f.exists()) {
boolean created = f.mkdirs();
if (!created)
throw new IOException("Unable to create the DB directory [" + f.getAbsolutePath() + "]");
}
if (!f.isDirectory())
throw new IOException("DB directory [" + f.getAbsolutePath() + "] is not a directory!");
if (!f.canRead())
throw new IOException("DB directory [" + f.getAbsolutePath() + "] is not readable!");
if (!f.canWrite())
throw new IOException("DB directory [" + f.getAbsolutePath() + "] is not writable!");
return f;
}
private final static String LEASESET_PREFIX = "leaseSet-";
@ -252,72 +257,72 @@ class PersistentDataStore extends TransientDataStore {
private final static String ROUTERINFO_SUFFIX = ".dat";
private String getLeaseSetName(Hash hash) {
return LEASESET_PREFIX + hash.toBase64() + LEASESET_SUFFIX;
return LEASESET_PREFIX + hash.toBase64() + LEASESET_SUFFIX;
}
private String getRouterInfoName(Hash hash) {
return ROUTERINFO_PREFIX + hash.toBase64() + ROUTERINFO_SUFFIX;
return ROUTERINFO_PREFIX + hash.toBase64() + ROUTERINFO_SUFFIX;
}
private Hash getLeaseSetHash(String filename) {
return getHash(filename, LEASESET_PREFIX, LEASESET_SUFFIX);
return getHash(filename, LEASESET_PREFIX, LEASESET_SUFFIX);
}
private Hash getRouterInfoHash(String filename) {
return getHash(filename, ROUTERINFO_PREFIX, ROUTERINFO_SUFFIX);
return getHash(filename, ROUTERINFO_PREFIX, ROUTERINFO_SUFFIX);
}
private Hash getHash(String filename, String prefix, String suffix) {
try {
String key = filename.substring(prefix.length());
key = key.substring(0, key.length() - suffix.length());
Hash h = new Hash();
h.fromBase64(key);
return h;
} catch (Exception e) {
_log.warn("Unable to fetch the key from [" + filename + "]", e);
return null;
}
try {
String key = filename.substring(prefix.length());
key = key.substring(0, key.length() - suffix.length());
Hash h = new Hash();
h.fromBase64(key);
return h;
} catch (Exception e) {
_log.warn("Unable to fetch the key from [" + filename + "]", e);
return null;
}
}
private void removeFile(Hash key, File dir) throws IOException {
String lsName = getLeaseSetName(key);
String riName = getRouterInfoName(key);
File f = new File(dir, lsName);
if (f.exists()) {
boolean removed = f.delete();
if (!removed)
_log.warn("Unable to remove lease set at " + f.getAbsolutePath());
else
_log.info("Removed lease set at " + f.getAbsolutePath());
return;
}
f = new File(dir, riName);
if (f.exists()) {
boolean removed = f.delete();
if (!removed)
_log.warn("Unable to remove router info at " + f.getAbsolutePath());
else
_log.info("Removed router info at " + f.getAbsolutePath());
return;
}
String lsName = getLeaseSetName(key);
String riName = getRouterInfoName(key);
File f = new File(dir, lsName);
if (f.exists()) {
boolean removed = f.delete();
if (!removed)
_log.warn("Unable to remove lease set at " + f.getAbsolutePath());
else
_log.info("Removed lease set at " + f.getAbsolutePath());
return;
}
f = new File(dir, riName);
if (f.exists()) {
boolean removed = f.delete();
if (!removed)
_log.warn("Unable to remove router info at " + f.getAbsolutePath());
else
_log.info("Removed router info at " + f.getAbsolutePath());
return;
}
}
private final static class LeaseSetFilter implements FilenameFilter {
private static final FilenameFilter _instance = new LeaseSetFilter();
public static final FilenameFilter getInstance() { return _instance; }
public boolean accept(File dir, String name) {
if (name == null) return false;
name = name.toUpperCase();
return (name.startsWith(LEASESET_PREFIX.toUpperCase()) && name.endsWith(LEASESET_SUFFIX.toUpperCase()));
}
private static final FilenameFilter _instance = new LeaseSetFilter();
public static final FilenameFilter getInstance() { return _instance; }
public boolean accept(File dir, String name) {
if (name == null) return false;
name = name.toUpperCase();
return (name.startsWith(LEASESET_PREFIX.toUpperCase()) && name.endsWith(LEASESET_SUFFIX.toUpperCase()));
}
}
private final static class RouterInfoFilter implements FilenameFilter {
private static final FilenameFilter _instance = new RouterInfoFilter();
public static final FilenameFilter getInstance() { return _instance; }
public boolean accept(File dir, String name) {
if (name == null) return false;
name = name.toUpperCase();
return (name.startsWith(ROUTERINFO_PREFIX.toUpperCase()) && name.endsWith(ROUTERINFO_SUFFIX.toUpperCase()));
}
private static final FilenameFilter _instance = new RouterInfoFilter();
public static final FilenameFilter getInstance() { return _instance; }
public boolean accept(File dir, String name) {
if (name == null) return false;
name = name.toUpperCase();
return (name.startsWith(ROUTERINFO_PREFIX.toUpperCase()) && name.endsWith(ROUTERINFO_SUFFIX.toUpperCase()));
}
}
}

View File

@ -19,6 +19,8 @@ import net.i2p.router.JobQueue;
import net.i2p.router.Router;
import net.i2p.util.Clock;
import net.i2p.util.Log;
import net.i2p.router.RouterContext;
import net.i2p.I2PException;
/**
* Run periodically for each locally created leaseSet to cause it to be republished
@ -26,53 +28,43 @@ import net.i2p.util.Log;
*
*/
public class RepublishLeaseSetJob extends JobImpl {
private final static Log _log = new Log(RepublishLeaseSetJob.class);
private Log _log;
private final static long REPUBLISH_LEASESET_DELAY = 60*1000; // 5 mins
private Hash _dest;
private KademliaNetworkDatabaseFacade _facade;
/**
* maintain a set of dest hashes that we're already publishing,
* so we don't go overboard. This is clunky, so if it gets any more
* complicated this will go into a 'manager' function rather than part of
* a job.
*/
private final static Set _pending = new HashSet(16);
public static boolean alreadyRepublishing(Hash dest) {
synchronized (_pending) {
return _pending.contains(dest);
}
}
public RepublishLeaseSetJob(KademliaNetworkDatabaseFacade facade, Hash destHash) {
super();
_facade = facade;
_dest = destHash;
synchronized (_pending) {
_pending.add(destHash);
}
getTiming().setStartAfter(Clock.getInstance().now()+REPUBLISH_LEASESET_DELAY);
public RepublishLeaseSetJob(RouterContext ctx, KademliaNetworkDatabaseFacade facade, Hash destHash) {
super(ctx);
_log = ctx.logManager().getLog(RepublishLeaseSetJob.class);
_facade = facade;
_dest = destHash;
getTiming().setStartAfter(ctx.clock().now()+REPUBLISH_LEASESET_DELAY);
}
public String getName() { return "Republish a local leaseSet"; }
public void runJob() {
if (ClientManagerFacade.getInstance().isLocal(_dest)) {
LeaseSet ls = _facade.lookupLeaseSetLocally(_dest);
if (ls != null) {
_log.warn("Client " + _dest + " is local, so we're republishing it");
if (!ls.isCurrent(Router.CLOCK_FUDGE_FACTOR)) {
_log.warn("Not publishing a LOCAL lease that isn't current - " + _dest, new Exception("Publish expired LOCAL lease?"));
} else {
JobQueue.getInstance().addJob(new StoreJob(_facade, _dest, ls, null, null, REPUBLISH_LEASESET_DELAY));
}
} else {
_log.warn("Client " + _dest + " is local, but we can't find a valid LeaseSet? perhaps its being rebuilt?");
}
requeue(REPUBLISH_LEASESET_DELAY);
} else {
_log.info("Client " + _dest + " is no longer local, so no more republishing their leaseSet");
synchronized (_pending) {
_pending.remove(_dest);
}
}
try {
if (_context.clientManager().isLocal(_dest)) {
LeaseSet ls = _facade.lookupLeaseSetLocally(_dest);
if (ls != null) {
_log.warn("Client " + _dest + " is local, so we're republishing it");
if (!ls.isCurrent(Router.CLOCK_FUDGE_FACTOR)) {
_log.warn("Not publishing a LOCAL lease that isn't current - " + _dest, new Exception("Publish expired LOCAL lease?"));
} else {
_context.jobQueue().addJob(new StoreJob(_context, _facade, _dest, ls, null, null, REPUBLISH_LEASESET_DELAY));
}
} else {
_log.warn("Client " + _dest + " is local, but we can't find a valid LeaseSet? perhaps its being rebuilt?");
}
requeue(REPUBLISH_LEASESET_DELAY);
return;
} else {
_log.info("Client " + _dest + " is no longer local, so no more republishing their leaseSet");
}
_facade.stopPublishing(_dest);
} catch (RuntimeException re) {
_log.error("Uncaught error republishing the leaseSet", re);
_facade.stopPublishing(_dest);
throw re;
}
}
}

View File

@ -34,6 +34,7 @@ import net.i2p.router.message.SendTunnelMessageJob;
import net.i2p.stat.StatManager;
import net.i2p.util.Clock;
import net.i2p.util.Log;
import net.i2p.router.RouterContext;
/**
* Search for a particular key iteratively until we either find a value or we
@ -41,7 +42,7 @@ import net.i2p.util.Log;
*
*/
class SearchJob extends JobImpl {
private final Log _log = new Log(SearchJob.class);
private Log _log;
private KademliaNetworkDatabaseFacade _facade;
private SearchState _state;
private Job _onSuccess;
@ -51,34 +52,35 @@ class SearchJob extends JobImpl {
private boolean _keepStats;
private boolean _isLease;
private Job _pendingRequeueJob;
private PeerSelector _peerSelector;
public final static int SEARCH_BREDTH = 3; // 3 peers at a time
public final static int SEARCH_PRIORITY = 400; // large because the search is probably for a real search
private static final long PER_PEER_TIMEOUT = 30*1000;
static {
StatManager.getInstance().createRateStat("netDb.successTime", "How long a successful search takes", "Network Database", new long[] { 60*60*1000l, 24*60*60*1000l });
StatManager.getInstance().createRateStat("netDb.failedTime", "How long a failed search takes", "Network Database", new long[] { 60*60*1000l, 24*60*60*1000l });
StatManager.getInstance().createRateStat("netDb.successPeers", "How many peers are contacted in a successful search", "Network Database", new long[] { 60*60*1000l, 24*60*60*1000l });
StatManager.getInstance().createRateStat("netDb.failedPeers", "How many peers are contacted in a failed search", "Network Database", new long[] { 60*60*1000l, 24*60*60*1000l });
}
/**
* Create a new search for the routingKey specified
*
*/
public SearchJob(KademliaNetworkDatabaseFacade facade, Hash key, Job onSuccess, Job onFailure, long timeoutMs, boolean keepStats, boolean isLease) {
public SearchJob(RouterContext context, KademliaNetworkDatabaseFacade facade, Hash key, Job onSuccess, Job onFailure, long timeoutMs, boolean keepStats, boolean isLease) {
super(context);
if ( (key == null) || (key.getData() == null) ) throw new IllegalArgumentException("Search for null key? wtf");
_log = _context.logManager().getLog(SearchJob.class);
_facade = facade;
_state = new SearchState(key);
_state = new SearchState(_context, key);
_onSuccess = onSuccess;
_onFailure = onFailure;
_timeoutMs = timeoutMs;
_keepStats = keepStats;
_isLease = isLease;
_expiration = Clock.getInstance().now() + timeoutMs;
}
_peerSelector = new PeerSelector(_context);
_expiration = _context.clock().now() + timeoutMs;
_context.statManager().createRateStat("netDb.successTime", "How long a successful search takes", "Network Database", new long[] { 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("netDb.failedTime", "How long a failed search takes", "Network Database", new long[] { 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("netDb.successPeers", "How many peers are contacted in a successful search", "Network Database", new long[] { 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("netDb.failedPeers", "How many peers are contacted in a failed search", "Network Database", new long[] { 60*60*1000l, 24*60*60*1000l });
}
public void runJob() {
if (_log.shouldLog(Log.INFO))
@ -125,7 +127,7 @@ class SearchJob extends JobImpl {
private boolean isLocal() { return _facade.getDataStore().isKnown(_state.getTarget()); }
private boolean isExpired() {
return Clock.getInstance().now() >= _expiration;
return _context.clock().now() >= _expiration;
}
/**
@ -187,16 +189,21 @@ class SearchJob extends JobImpl {
private void requeuePending() {
if (_pendingRequeueJob == null)
_pendingRequeueJob = new JobImpl() {
public String getName() { return "Requeue search with pending"; }
public void runJob() { searchNext(); }
};
long now = Clock.getInstance().now();
if (_pendingRequeueJob.getTiming().getStartAfter() < now)
_pendingRequeueJob.getTiming().setStartAfter(now+5*1000);
JobQueue.getInstance().addJob(_pendingRequeueJob);
_pendingRequeueJob = new RequeuePending();
long now = _context.clock().now();
if (_pendingRequeueJob.getTiming().getStartAfter() < now)
_pendingRequeueJob.getTiming().setStartAfter(now+5*1000);
_context.jobQueue().addJob(_pendingRequeueJob);
}
private class RequeuePending extends JobImpl {
public RequeuePending() {
super(SearchJob.this._context);
}
public String getName() { return "Requeue search with pending"; }
public void runJob() { searchNext(); }
}
/**
* Set of Hash structures for routers we want to check next. This is the 'interesting' part of
* the algorithm. But to keep you on your toes, we've refactored it to the PeerSelector.selectNearestExplicit
@ -204,10 +211,10 @@ class SearchJob extends JobImpl {
* @return ordered list of Hash objects
*/
private List getClosestRouters(Hash key, int numClosest, Set alreadyChecked) {
Hash rkey = RoutingKeyGenerator.getInstance().getRoutingKey(key);
Hash rkey = _context.routingKeyGenerator().getRoutingKey(key);
if (_log.shouldLog(Log.DEBUG))
_log.debug(getJobId() + ": Current routing key for " + key + ": " + rkey);
return PeerSelector.getInstance().selectNearestExplicit(rkey, numClosest, alreadyChecked, _facade.getKBuckets());
return _peerSelector.selectNearestExplicit(rkey, numClosest, alreadyChecked, _facade.getKBuckets());
}
/**
@ -215,7 +222,7 @@ class SearchJob extends JobImpl {
*
*/
protected void sendSearch(RouterInfo router) {
if (router.getIdentity().equals(Router.getInstance().getRouterInfo().getIdentity())) {
if (router.getIdentity().equals(_context.router().getRouterInfo().getIdentity())) {
// don't search ourselves
if (_log.shouldLog(Log.ERROR))
_log.error(getJobId() + ": Dont send search to ourselves - why did we try?");
@ -241,26 +248,26 @@ class SearchJob extends JobImpl {
TunnelId inTunnelId = getInboundTunnelId();
if (inTunnelId == null) {
_log.error("No tunnels to get search replies through! wtf!");
JobQueue.getInstance().addJob(new FailedJob(router));
_context.jobQueue().addJob(new FailedJob(router));
return;
}
TunnelInfo inTunnel = TunnelManagerFacade.getInstance().getTunnelInfo(inTunnelId);
RouterInfo inGateway = NetworkDatabaseFacade.getInstance().lookupRouterInfoLocally(inTunnel.getThisHop());
TunnelInfo inTunnel = _context.tunnelManager().getTunnelInfo(inTunnelId);
RouterInfo inGateway = _context.netDb().lookupRouterInfoLocally(inTunnel.getThisHop());
if (inGateway == null) {
_log.error("We can't find the gateway to our inbound tunnel?! wtf");
JobQueue.getInstance().addJob(new FailedJob(router));
_context.jobQueue().addJob(new FailedJob(router));
return;
}
long expiration = Clock.getInstance().now() + PER_PEER_TIMEOUT; // getTimeoutMs();
long expiration = _context.clock().now() + PER_PEER_TIMEOUT; // getTimeoutMs();
DatabaseLookupMessage msg = buildMessage(inTunnelId, inGateway, expiration);
TunnelId outTunnelId = getOutboundTunnelId();
if (outTunnelId == null) {
_log.error("No tunnels to send search out through! wtf!");
JobQueue.getInstance().addJob(new FailedJob(router));
_context.jobQueue().addJob(new FailedJob(router));
return;
}
@ -270,18 +277,18 @@ class SearchJob extends JobImpl {
+ msg.getFrom().getIdentity().getHash().toBase64() + "] via tunnel ["
+ msg.getReplyTunnel() + "]");
SearchMessageSelector sel = new SearchMessageSelector(router, _expiration, _state);
SearchMessageSelector sel = new SearchMessageSelector(_context, router, _expiration, _state);
long timeoutMs = PER_PEER_TIMEOUT; // getTimeoutMs();
SearchUpdateReplyFoundJob reply = new SearchUpdateReplyFoundJob(router, _state, _facade, this);
SendTunnelMessageJob j = new SendTunnelMessageJob(msg, outTunnelId, router.getIdentity().getHash(),
SearchUpdateReplyFoundJob reply = new SearchUpdateReplyFoundJob(_context, router, _state, _facade, this);
SendTunnelMessageJob j = new SendTunnelMessageJob(_context, msg, outTunnelId, router.getIdentity().getHash(),
null, null, reply, new FailedJob(router), sel,
timeoutMs, SEARCH_PRIORITY);
JobQueue.getInstance().addJob(j);
_context.jobQueue().addJob(j);
}
/** we're searching for a router, so we can just send direct */
protected void sendRouterSearch(RouterInfo router) {
long expiration = Clock.getInstance().now() + PER_PEER_TIMEOUT; // getTimeoutMs();
long expiration = _context.clock().now() + PER_PEER_TIMEOUT; // getTimeoutMs();
DatabaseLookupMessage msg = buildMessage(expiration);
@ -289,12 +296,12 @@ class SearchJob extends JobImpl {
_log.info(getJobId() + ": Sending router search to " + router.getIdentity().getHash().toBase64()
+ " for " + msg.getSearchKey().toBase64() + " w/ replies to us ["
+ msg.getFrom().getIdentity().getHash().toBase64() + "]");
SearchMessageSelector sel = new SearchMessageSelector(router, _expiration, _state);
SearchMessageSelector sel = new SearchMessageSelector(_context, router, _expiration, _state);
long timeoutMs = PER_PEER_TIMEOUT;
SearchUpdateReplyFoundJob reply = new SearchUpdateReplyFoundJob(router, _state, _facade, this);
SendMessageDirectJob j = new SendMessageDirectJob(msg, router.getIdentity().getHash(),
SearchUpdateReplyFoundJob reply = new SearchUpdateReplyFoundJob(_context, router, _state, _facade, this);
SendMessageDirectJob j = new SendMessageDirectJob(_context, msg, router.getIdentity().getHash(),
reply, new FailedJob(router), sel, expiration, SEARCH_PRIORITY);
JobQueue.getInstance().addJob(j);
_context.jobQueue().addJob(j);
}
/**
@ -306,7 +313,7 @@ class SearchJob extends JobImpl {
TunnelSelectionCriteria crit = new TunnelSelectionCriteria();
crit.setMaximumTunnelsRequired(1);
crit.setMinimumTunnelsRequired(1);
List tunnelIds = TunnelManagerFacade.getInstance().selectOutboundTunnelIds(crit);
List tunnelIds = _context.tunnelManager().selectOutboundTunnelIds(crit);
if (tunnelIds.size() <= 0) {
return null;
}
@ -323,7 +330,7 @@ class SearchJob extends JobImpl {
TunnelSelectionCriteria crit = new TunnelSelectionCriteria();
crit.setMaximumTunnelsRequired(1);
crit.setMinimumTunnelsRequired(1);
List tunnelIds = TunnelManagerFacade.getInstance().selectInboundTunnelIds(crit);
List tunnelIds = _context.tunnelManager().selectInboundTunnelIds(crit);
if (tunnelIds.size() <= 0) {
return null;
}
@ -338,7 +345,7 @@ class SearchJob extends JobImpl {
* @param expiration when the search should stop
*/
protected DatabaseLookupMessage buildMessage(TunnelId replyTunnelId, RouterInfo replyGateway, long expiration) {
DatabaseLookupMessage msg = new DatabaseLookupMessage();
DatabaseLookupMessage msg = new DatabaseLookupMessage(_context);
msg.setSearchKey(_state.getTarget());
msg.setFrom(replyGateway);
msg.setDontIncludePeers(_state.getAttempted());
@ -353,9 +360,9 @@ class SearchJob extends JobImpl {
*
*/
protected DatabaseLookupMessage buildMessage(long expiration) {
DatabaseLookupMessage msg = new DatabaseLookupMessage();
DatabaseLookupMessage msg = new DatabaseLookupMessage(_context);
msg.setSearchKey(_state.getTarget());
msg.setFrom(Router.getInstance().getRouterInfo());
msg.setFrom(_context.router().getRouterInfo());
msg.setDontIncludePeers(_state.getAttempted());
msg.setMessageExpiration(new Date(expiration));
msg.setReplyTunnel(null);
@ -365,7 +372,7 @@ class SearchJob extends JobImpl {
void replyFound(DatabaseSearchReplyMessage message, Hash peer) {
long duration = _state.replyFound(peer);
// this processing can take a while, so split 'er up
JobQueue.getInstance().addJob(new SearchReplyJob((DatabaseSearchReplyMessage)message, peer, duration));
_context.jobQueue().addJob(new SearchReplyJob((DatabaseSearchReplyMessage)message, peer, duration));
}
private final class SearchReplyJob extends JobImpl {
@ -378,6 +385,7 @@ class SearchJob extends JobImpl {
private int _duplicatePeers;
private long _duration;
public SearchReplyJob(DatabaseSearchReplyMessage message, Hash peer, long duration) {
super(SearchJob.this._context);
_msg = message;
_peer = peer;
_curIndex = 0;
@ -389,8 +397,8 @@ class SearchJob extends JobImpl {
public String getName() { return "Process Reply for Kademlia Search"; }
public void runJob() {
if (_curIndex >= _msg.getNumReplies()) {
ProfileManager.getInstance().dbLookupReply(_peer, _newPeers, _seenPeers,
_invalidPeers, _duplicatePeers, _duration);
_context.profileManager().dbLookupReply(_peer, _newPeers, _seenPeers,
_invalidPeers, _duplicatePeers, _duration);
} else {
RouterInfo ri = _msg.getReply(_curIndex);
if (ri.isValid()) {
@ -435,7 +443,7 @@ class SearchJob extends JobImpl {
*
*/
public FailedJob(RouterInfo peer, boolean penalizePeer) {
super();
super(SearchJob.this._context);
_penalizePeer = penalizePeer;
_peer = peer.getIdentity().getHash();
}
@ -444,7 +452,7 @@ class SearchJob extends JobImpl {
if (_penalizePeer) {
if (_log.shouldLog(Log.WARN))
_log.warn("Penalizing peer for timeout on search: " + _peer.toBase64());
ProfileManager.getInstance().dbLookupFailed(_peer);
_context.profileManager().dbLookupFailed(_peer);
} else {
if (_log.shouldLog(Log.ERROR))
_log.error("NOT (!!) Penalizing peer for timeout on search: " + _peer.toBase64());
@ -464,12 +472,12 @@ class SearchJob extends JobImpl {
_log.debug(getJobId() + ": State of successful search: " + _state);
if (_keepStats) {
long time = Clock.getInstance().now() - _state.getWhenStarted();
StatManager.getInstance().addRateData("netDb.successTime", time, 0);
StatManager.getInstance().addRateData("netDb.successPeers", _state.getAttempted().size(), time);
long time = _context.clock().now() - _state.getWhenStarted();
_context.statManager().addRateData("netDb.successTime", time, 0);
_context.statManager().addRateData("netDb.successPeers", _state.getAttempted().size(), time);
}
if (_onSuccess != null)
JobQueue.getInstance().addJob(_onSuccess);
_context.jobQueue().addJob(_onSuccess);
}
/**
@ -482,12 +490,12 @@ class SearchJob extends JobImpl {
_log.debug(getJobId() + ": State of failed search: " + _state);
if (_keepStats) {
long time = Clock.getInstance().now() - _state.getWhenStarted();
StatManager.getInstance().addRateData("netDb.failedTime", time, 0);
StatManager.getInstance().addRateData("netDb.failedPeers", _state.getAttempted().size(), time);
long time = _context.clock().now() - _state.getWhenStarted();
_context.statManager().addRateData("netDb.failedTime", time, 0);
_context.statManager().addRateData("netDb.failedPeers", _state.getAttempted().size(), time);
}
if (_onFailure != null)
JobQueue.getInstance().addJob(_onFailure);
_context.jobQueue().addJob(_onFailure);
}
public String getName() { return "Kademlia NetDb Search"; }

View File

@ -6,81 +6,84 @@ import net.i2p.data.i2np.DatabaseSearchReplyMessage;
import net.i2p.data.i2np.DatabaseStoreMessage;
import net.i2p.data.i2np.I2NPMessage;
import net.i2p.router.MessageSelector;
import net.i2p.util.Clock;
import net.i2p.router.RouterContext;
import net.i2p.util.Log;
/**
* Check to see the message is a reply from the peer regarding the current
* Check to see the message is a reply from the peer regarding the current
* search
*
*/
class SearchMessageSelector implements MessageSelector {
private final static Log _log = new Log(SearchMessageSelector.class);
private Log _log;
private RouterContext _context;
private static int __searchSelectorId = 0;
private Hash _peer;
private boolean _found;
private int _id;
private long _exp;
private SearchState _state;
public SearchMessageSelector(RouterInfo peer, long expiration, SearchState state) {
_peer = peer.getIdentity().getHash();
_found = false;
_exp = expiration;
_state = state;
_id = ++__searchSelectorId;
if (_log.shouldLog(Log.DEBUG))
_log.debug("[" + _id + "] Created: " + toString());
public SearchMessageSelector(RouterContext context, RouterInfo peer, long expiration, SearchState state) {
_context = context;
_log = context.logManager().getLog(SearchMessageSelector.class);
_peer = peer.getIdentity().getHash();
_found = false;
_exp = expiration;
_state = state;
_id = ++__searchSelectorId;
if (_log.shouldLog(Log.DEBUG))
_log.debug("[" + _id + "] Created: " + toString());
}
public String toString() { return "Search selector [" + _id + "] looking for a reply from " + _peer + " with regards to " + _state.getTarget(); }
public boolean continueMatching() {
if (_found) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("[" + _id + "] Dont continue matching! looking for a reply from " + _peer + " with regards to " + _state.getTarget());
return false;
}
long now = Clock.getInstance().now();
return now < _exp;
public boolean continueMatching() {
if (_found) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("[" + _id + "] Dont continue matching! looking for a reply from " + _peer + " with regards to " + _state.getTarget());
return false;
}
long now = _context.clock().now();
return now < _exp;
}
public long getExpiration() { return _exp; }
public boolean isMatch(I2NPMessage message) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("[" + _id + "] isMatch("+message.getClass().getName() + ") [want dbStore or dbSearchReply from " + _peer + " for " + _state.getTarget() + "]");
if (message instanceof DatabaseStoreMessage) {
DatabaseStoreMessage msg = (DatabaseStoreMessage)message;
if (msg.getKey().equals(_state.getTarget())) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("[" + _id + "] Was a DBStore of the key we're looking for. May not have been from who we're checking against though, but DBStore doesn't include that info");
_found = true;
return true;
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("[" + _id + "] DBStore of a key we're not looking for");
return false;
}
} else if (message instanceof DatabaseSearchReplyMessage) {
DatabaseSearchReplyMessage msg = (DatabaseSearchReplyMessage)message;
if (_peer.equals(msg.getFromHash())) {
if (msg.getSearchKey().equals(_state.getTarget())) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("[" + _id + "] Was a DBSearchReply from who we're checking with for a key we're looking for");
_found = true;
return true;
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("[" + _id + "] Was a DBSearchReply from who we're checking with but NOT for the key we're looking for");
return false;
}
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("[" + _id + "] DBSearchReply from someone we are not checking with [" + msg.getFromHash() + ", not " + _state.getTarget() + "]");
return false;
}
} else {
//_log.debug("Not a DbStore or DbSearchReply");
return false;
}
if (_log.shouldLog(Log.DEBUG))
_log.debug("[" + _id + "] isMatch("+message.getClass().getName() + ") [want dbStore or dbSearchReply from " + _peer + " for " + _state.getTarget() + "]");
if (message instanceof DatabaseStoreMessage) {
DatabaseStoreMessage msg = (DatabaseStoreMessage)message;
if (msg.getKey().equals(_state.getTarget())) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("[" + _id + "] Was a DBStore of the key we're looking for. May not have been from who we're checking against though, but DBStore doesn't include that info");
_found = true;
return true;
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("[" + _id + "] DBStore of a key we're not looking for");
return false;
}
} else if (message instanceof DatabaseSearchReplyMessage) {
DatabaseSearchReplyMessage msg = (DatabaseSearchReplyMessage)message;
if (_peer.equals(msg.getFromHash())) {
if (msg.getSearchKey().equals(_state.getTarget())) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("[" + _id + "] Was a DBSearchReply from who we're checking with for a key we're looking for");
_found = true;
return true;
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("[" + _id + "] Was a DBSearchReply from who we're checking with but NOT for the key we're looking for");
return false;
}
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("[" + _id + "] DBSearchReply from someone we are not checking with [" + msg.getFromHash() + ", not " + _state.getTarget() + "]");
return false;
}
} else {
//_log.debug("Not a DbStore or DbSearchReply");
return false;
}
}
}

View File

@ -9,12 +9,14 @@ import java.util.Set;
import net.i2p.data.Hash;
import net.i2p.util.Clock;
import net.i2p.router.RouterContext;
/**
* Data related to a particular search
*
*/
class SearchState {
private RouterContext _context;
private HashSet _pendingPeers;
private HashMap _pendingPeerTimes;
private HashSet _attemptedPeers;
@ -23,137 +25,138 @@ class SearchState {
private Hash _searchKey;
private volatile long _completed;
private volatile long _started;
public SearchState(Hash key) {
_searchKey = key;
_pendingPeers = new HashSet(16);
_attemptedPeers = new HashSet(16);
_failedPeers = new HashSet(16);
_successfulPeers = new HashSet(16);
_pendingPeerTimes = new HashMap(16);
_completed = -1;
_started = Clock.getInstance().now();
public SearchState(RouterContext context, Hash key) {
_context = context;
_searchKey = key;
_pendingPeers = new HashSet(16);
_attemptedPeers = new HashSet(16);
_failedPeers = new HashSet(16);
_successfulPeers = new HashSet(16);
_pendingPeerTimes = new HashMap(16);
_completed = -1;
_started = _context.clock().now();
}
public Hash getTarget() { return _searchKey; }
public Set getPending() {
synchronized (_pendingPeers) {
return (Set)_pendingPeers.clone();
}
public Set getPending() {
synchronized (_pendingPeers) {
return (Set)_pendingPeers.clone();
}
}
public Set getAttempted() {
synchronized (_attemptedPeers) {
return (Set)_attemptedPeers.clone();
}
public Set getAttempted() {
synchronized (_attemptedPeers) {
return (Set)_attemptedPeers.clone();
}
}
public boolean wasAttempted(Hash peer) {
synchronized (_attemptedPeers) {
return _attemptedPeers.contains(peer);
}
synchronized (_attemptedPeers) {
return _attemptedPeers.contains(peer);
}
}
public Set getSuccessful() {
synchronized (_successfulPeers) {
return (Set)_successfulPeers.clone();
}
public Set getSuccessful() {
synchronized (_successfulPeers) {
return (Set)_successfulPeers.clone();
}
}
public Set getFailed() {
synchronized (_failedPeers) {
return (Set)_failedPeers.clone();
}
public Set getFailed() {
synchronized (_failedPeers) {
return (Set)_failedPeers.clone();
}
}
public boolean completed() { return _completed != -1; }
public void complete(boolean completed) {
if (completed)
_completed = Clock.getInstance().now();
public void complete(boolean completed) {
if (completed)
_completed = _context.clock().now();
}
public long getWhenStarted() { return _started; }
public long getWhenCompleted() { return _completed; }
public void addPending(Collection pending) {
synchronized (_pendingPeers) {
_pendingPeers.addAll(pending);
for (Iterator iter = pending.iterator(); iter.hasNext(); )
_pendingPeerTimes.put(iter.next(), new Long(Clock.getInstance().now()));
}
synchronized (_attemptedPeers) {
_attemptedPeers.addAll(pending);
}
synchronized (_pendingPeers) {
_pendingPeers.addAll(pending);
for (Iterator iter = pending.iterator(); iter.hasNext(); )
_pendingPeerTimes.put(iter.next(), new Long(_context.clock().now()));
}
synchronized (_attemptedPeers) {
_attemptedPeers.addAll(pending);
}
}
/** how long did it take to get the reply, or -1 if we don't know */
public long dataFound(Hash peer) {
long rv = -1;
synchronized (_pendingPeers) {
_pendingPeers.remove(peer);
Long when = (Long)_pendingPeerTimes.remove(peer);
if (when != null)
rv = Clock.getInstance().now() - when.longValue();
}
synchronized (_successfulPeers) {
_successfulPeers.add(peer);
}
return rv;
long rv = -1;
synchronized (_pendingPeers) {
_pendingPeers.remove(peer);
Long when = (Long)_pendingPeerTimes.remove(peer);
if (when != null)
rv = _context.clock().now() - when.longValue();
}
synchronized (_successfulPeers) {
_successfulPeers.add(peer);
}
return rv;
}
/** how long did it take to get the reply, or -1 if we dont know */
public long replyFound(Hash peer) {
synchronized (_pendingPeers) {
_pendingPeers.remove(peer);
Long when = (Long)_pendingPeerTimes.remove(peer);
if (when != null)
return Clock.getInstance().now() - when.longValue();
else
return -1;
}
synchronized (_pendingPeers) {
_pendingPeers.remove(peer);
Long when = (Long)_pendingPeerTimes.remove(peer);
if (when != null)
return _context.clock().now() - when.longValue();
else
return -1;
}
}
public void replyTimeout(Hash peer) {
synchronized (_pendingPeers) {
_pendingPeers.remove(peer);
_pendingPeerTimes.remove(peer);
}
synchronized (_failedPeers) {
_failedPeers.add(peer);
}
synchronized (_pendingPeers) {
_pendingPeers.remove(peer);
_pendingPeerTimes.remove(peer);
}
synchronized (_failedPeers) {
_failedPeers.add(peer);
}
}
public String toString() {
StringBuffer buf = new StringBuffer(256);
buf.append("Searching for ").append(_searchKey);
buf.append(" ");
if (_completed <= 0)
buf.append(" completed? false ");
else
buf.append(" completed on ").append(new Date(_completed));
buf.append(" Attempted: ");
synchronized (_attemptedPeers) {
for (Iterator iter = _attemptedPeers.iterator(); iter.hasNext(); ) {
Hash peer = (Hash)iter.next();
buf.append(peer.toBase64()).append(" ");
}
}
buf.append(" Pending: ");
synchronized (_pendingPeers) {
for (Iterator iter = _pendingPeers.iterator(); iter.hasNext(); ) {
Hash peer = (Hash)iter.next();
buf.append(peer.toBase64()).append(" ");
}
}
buf.append(" Failed: ");
synchronized (_failedPeers) {
for (Iterator iter = _failedPeers.iterator(); iter.hasNext(); ) {
Hash peer = (Hash)iter.next();
buf.append(peer.toBase64()).append(" ");
}
}
buf.append(" Successful: ");
synchronized (_successfulPeers) {
for (Iterator iter = _successfulPeers.iterator(); iter.hasNext(); ) {
Hash peer = (Hash)iter.next();
buf.append(peer.toBase64()).append(" ");
}
}
return buf.toString();
public String toString() {
StringBuffer buf = new StringBuffer(256);
buf.append("Searching for ").append(_searchKey);
buf.append(" ");
if (_completed <= 0)
buf.append(" completed? false ");
else
buf.append(" completed on ").append(new Date(_completed));
buf.append(" Attempted: ");
synchronized (_attemptedPeers) {
for (Iterator iter = _attemptedPeers.iterator(); iter.hasNext(); ) {
Hash peer = (Hash)iter.next();
buf.append(peer.toBase64()).append(" ");
}
}
buf.append(" Pending: ");
synchronized (_pendingPeers) {
for (Iterator iter = _pendingPeers.iterator(); iter.hasNext(); ) {
Hash peer = (Hash)iter.next();
buf.append(peer.toBase64()).append(" ");
}
}
buf.append(" Failed: ");
synchronized (_failedPeers) {
for (Iterator iter = _failedPeers.iterator(); iter.hasNext(); ) {
Hash peer = (Hash)iter.next();
buf.append(peer.toBase64()).append(" ");
}
}
buf.append(" Successful: ");
synchronized (_successfulPeers) {
for (Iterator iter = _successfulPeers.iterator(); iter.hasNext(); ) {
Hash peer = (Hash)iter.next();
buf.append(peer.toBase64()).append(" ");
}
}
return buf.toString();
}
}

View File

@ -11,58 +11,60 @@ import net.i2p.router.JobImpl;
import net.i2p.router.ProfileManager;
import net.i2p.router.ReplyJob;
import net.i2p.util.Log;
import net.i2p.router.RouterContext;
/**
* Called after a match to a db search is found
*
*/
class SearchUpdateReplyFoundJob extends JobImpl implements ReplyJob {
private final static Log _log = new Log(SearchUpdateReplyFoundJob.class);
private Log _log;
private I2NPMessage _message;
private Hash _peer;
private SearchState _state;
private KademliaNetworkDatabaseFacade _facade;
private SearchJob _job;
public SearchUpdateReplyFoundJob(RouterInfo peer, SearchState state, KademliaNetworkDatabaseFacade facade, SearchJob job) {
super();
_peer = peer.getIdentity().getHash();
_state = state;
_facade = facade;
_job = job;
public SearchUpdateReplyFoundJob(RouterContext context, RouterInfo peer, SearchState state, KademliaNetworkDatabaseFacade facade, SearchJob job) {
super(context);
_log = context.logManager().getLog(SearchUpdateReplyFoundJob.class);
_peer = peer.getIdentity().getHash();
_state = state;
_facade = facade;
_job = job;
}
public String getName() { return "Update Reply Found for Kademlia Search"; }
public void runJob() {
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": Reply from " + _peer + " with message " + _message.getClass().getName());
if (_message instanceof DatabaseStoreMessage) {
long timeToReply = _state.dataFound(_peer);
DatabaseStoreMessage msg = (DatabaseStoreMessage)_message;
if (msg.getValueType() == DatabaseStoreMessage.KEY_TYPE_LEASESET) {
_facade.store(msg.getKey(), msg.getLeaseSet());
} else if (msg.getValueType() == DatabaseStoreMessage.KEY_TYPE_ROUTERINFO) {
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": dbStore received on search containing router " + msg.getKey() + " with publishDate of " + new Date(msg.getRouterInfo().getPublished()));
_facade.store(msg.getKey(), msg.getRouterInfo());
} else {
if (_log.shouldLog(Log.ERROR))
_log.error(getJobId() + ": Unknown db store type?!@ " + msg.getValueType());
}
ProfileManager.getInstance().dbLookupSuccessful(_peer, timeToReply);
} else if (_message instanceof DatabaseSearchReplyMessage) {
_job.replyFound((DatabaseSearchReplyMessage)_message, _peer);
} else {
if (_log.shouldLog(Log.ERROR))
_log.error(getJobId() + ": WTF, reply job matched a strange message: " + _message);
return;
}
_job.searchNext();
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": Reply from " + _peer + " with message " + _message.getClass().getName());
if (_message instanceof DatabaseStoreMessage) {
long timeToReply = _state.dataFound(_peer);
DatabaseStoreMessage msg = (DatabaseStoreMessage)_message;
if (msg.getValueType() == DatabaseStoreMessage.KEY_TYPE_LEASESET) {
_facade.store(msg.getKey(), msg.getLeaseSet());
} else if (msg.getValueType() == DatabaseStoreMessage.KEY_TYPE_ROUTERINFO) {
if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": dbStore received on search containing router " + msg.getKey() + " with publishDate of " + new Date(msg.getRouterInfo().getPublished()));
_facade.store(msg.getKey(), msg.getRouterInfo());
} else {
if (_log.shouldLog(Log.ERROR))
_log.error(getJobId() + ": Unknown db store type?!@ " + msg.getValueType());
}
_context.profileManager().dbLookupSuccessful(_peer, timeToReply);
} else if (_message instanceof DatabaseSearchReplyMessage) {
_job.replyFound((DatabaseSearchReplyMessage)_message, _peer);
} else {
if (_log.shouldLog(Log.ERROR))
_log.error(getJobId() + ": WTF, reply job matched a strange message: " + _message);
return;
}
_job.searchNext();
}
public void setMessage(I2NPMessage message) { _message = message; }
public void setMessage(I2NPMessage message) { _message = message; }
}

View File

@ -1,9 +1,9 @@
package net.i2p.router.networkdb.kademlia;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
@ -16,50 +16,52 @@ import net.i2p.data.Hash;
import net.i2p.router.JobImpl;
import net.i2p.router.JobQueue;
import net.i2p.util.Log;
import net.i2p.router.RouterContext;
/**
* Fire off search jobs for random keys from the explore pool, up to MAX_PER_RUN
* at a time.
* at a time.
*
*/
class StartExplorersJob extends JobImpl {
private final static Log _log = new Log(StartExplorersJob.class);
private Log _log;
private KademliaNetworkDatabaseFacade _facade;
private final static long RERUN_DELAY_MS = 3*60*1000; // every 3 minutes, explore MAX_PER_RUN keys
private final static int MAX_PER_RUN = 3; // don't explore more than 1 bucket at a time
public StartExplorersJob(KademliaNetworkDatabaseFacade facade) {
super();
_facade = facade;
public StartExplorersJob(RouterContext context, KademliaNetworkDatabaseFacade facade) {
super(context);
_log = context.logManager().getLog(StartExplorersJob.class);
_facade = facade;
}
public String getName() { return "Start Explorers Job"; }
public void runJob() {
Set toExplore = selectKeysToExplore();
_log.debug("Keys to explore during this run: " + toExplore);
_facade.removeFromExploreKeys(toExplore);
for (Iterator iter = toExplore.iterator(); iter.hasNext(); ) {
Hash key = (Hash)iter.next();
//_log.info("Starting explorer for " + key, new Exception("Exploring!"));
JobQueue.getInstance().addJob(new ExploreJob(_facade, key));
}
requeue(RERUN_DELAY_MS);
public void runJob() {
Set toExplore = selectKeysToExplore();
_log.debug("Keys to explore during this run: " + toExplore);
_facade.removeFromExploreKeys(toExplore);
for (Iterator iter = toExplore.iterator(); iter.hasNext(); ) {
Hash key = (Hash)iter.next();
//_log.info("Starting explorer for " + key, new Exception("Exploring!"));
_context.jobQueue().addJob(new ExploreJob(_context, _facade, key));
}
requeue(RERUN_DELAY_MS);
}
/**
* Run through the explore pool and pick out some values
*
*/
private Set selectKeysToExplore() {
Set queued = _facade.getExploreKeys();
if (queued.size() <= MAX_PER_RUN)
return queued;
Set rv = new HashSet(MAX_PER_RUN);
for (Iterator iter = queued.iterator(); iter.hasNext(); ) {
if (rv.size() >= MAX_PER_RUN) break;
rv.add(iter.next());
}
return rv;
Set queued = _facade.getExploreKeys();
if (queued.size() <= MAX_PER_RUN)
return queued;
Set rv = new HashSet(MAX_PER_RUN);
for (Iterator iter = queued.iterator(); iter.hasNext(); ) {
if (rv.size() >= MAX_PER_RUN) break;
rv.add(iter.next());
}
return rv;
}
}

View File

@ -49,15 +49,17 @@ import net.i2p.util.Clock;
import net.i2p.util.Log;
import net.i2p.util.RandomSource;
import net.i2p.stat.StatManager;
import net.i2p.router.RouterContext;
class StoreJob extends JobImpl {
private final Log _log = new Log(StoreJob.class);
private Log _log;
private KademliaNetworkDatabaseFacade _facade;
private StoreState _state;
private Job _onSuccess;
private Job _onFailure;
private long _timeoutMs;
private long _expiration;
private PeerSelector _peerSelector;
private final static int PARALLELIZATION = 1; // how many sent at a time
private final static int REDUNDANCY = 2; // we want the data sent to 2 peers
@ -72,22 +74,23 @@ class StoreJob extends JobImpl {
*/
private final static int EXPLORATORY_REDUNDANCY = 1;
private final static int STORE_PRIORITY = 100;
static {
StatManager.getInstance().createRateStat("netDb.storeSent", "How many netDb store messages have we sent?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
}
/**
* Create a new search for the routingKey specified
*
*/
public StoreJob(KademliaNetworkDatabaseFacade facade, Hash key, DataStructure data, Job onSuccess, Job onFailure, long timeoutMs) {
public StoreJob(RouterContext context, KademliaNetworkDatabaseFacade facade, Hash key,
DataStructure data, Job onSuccess, Job onFailure, long timeoutMs) {
super(context);
_log = context.logManager().getLog(StoreJob.class);
_context.statManager().createRateStat("netDb.storeSent", "How many netDb store messages have we sent?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
_facade = facade;
_state = new StoreState(key, data);
_onSuccess = onSuccess;
_onFailure = onFailure;
_timeoutMs = timeoutMs;
_expiration = Clock.getInstance().now() + timeoutMs;
_expiration = context.clock().now() + timeoutMs;
_peerSelector = new PeerSelector(context);
}
public String getName() { return "Kademlia NetDb Store";}
@ -96,7 +99,7 @@ class StoreJob extends JobImpl {
}
protected boolean isExpired() {
return Clock.getInstance().now() >= _expiration;
return _context.clock().now() >= _expiration;
}
/**
@ -169,10 +172,11 @@ class StoreJob extends JobImpl {
* @return ordered list of Hash objects
*/
protected List getClosestRouters(Hash key, int numClosest, Set alreadyChecked) {
Hash rkey = RoutingKeyGenerator.getInstance().getRoutingKey(key);
Hash rkey = _context.routingKeyGenerator().getRoutingKey(key);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Current routing key for " + key + ": " + rkey);
return PeerSelector.getInstance().selectNearestExplicit(rkey, numClosest, alreadyChecked, _facade.getKBuckets());
return _peerSelector.selectNearestExplicit(rkey, numClosest, alreadyChecked, _facade.getKBuckets());
}
/**
@ -181,7 +185,7 @@ class StoreJob extends JobImpl {
*
*/
protected void sendStore(RouterInfo router) {
DatabaseStoreMessage msg = new DatabaseStoreMessage();
DatabaseStoreMessage msg = new DatabaseStoreMessage(_context);
msg.setKey(_state.getTarget());
if (_state.getData() instanceof RouterInfo)
msg.setRouterInfo((RouterInfo)_state.getData());
@ -189,9 +193,9 @@ class StoreJob extends JobImpl {
msg.setLeaseSet((LeaseSet)_state.getData());
else
throw new IllegalArgumentException("Storing an unknown data type! " + _state.getData());
msg.setMessageExpiration(new Date(Clock.getInstance().now() + _timeoutMs));
msg.setMessageExpiration(new Date(_context.clock().now() + _timeoutMs));
if (router.getIdentity().equals(Router.getInstance().getRouterInfo().getIdentity())) {
if (router.getIdentity().equals(_context.router().getRouterInfo().getIdentity())) {
// don't send it to ourselves
if (_log.shouldLog(Log.ERROR))
_log.error("Dont send store to ourselves - why did we try?");
@ -214,7 +218,7 @@ class StoreJob extends JobImpl {
TunnelInfo info = null;
TunnelId outboundTunnelId = selectOutboundTunnel();
if (outboundTunnelId != null)
info = TunnelManagerFacade.getInstance().getTunnelInfo(outboundTunnelId);
info = _context.tunnelManager().getTunnelInfo(outboundTunnelId);
if (info == null) {
if (_log.shouldLog(Log.ERROR))
_log.error("selectOutboundTunnel didn't find a valid tunnel! outboundTunnelId = "
@ -226,11 +230,11 @@ class StoreJob extends JobImpl {
+ " is going to " + peer.getIdentity().getHash() + " via outbound tunnel: " + info);
// send it out our outboundTunnelId with instructions for our endpoint to forward it
// to the router specified (though no particular tunnelId on the target)
Job j = new SendTunnelMessageJob(msg, outboundTunnelId, peer.getIdentity().getHash(),
null, sent, null, fail, null, _expiration-Clock.getInstance().now(),
Job j = new SendTunnelMessageJob(_context, msg, outboundTunnelId, peer.getIdentity().getHash(),
null, sent, null, fail, null, _expiration-_context.clock().now(),
STORE_PRIORITY);
JobQueue.getInstance().addJob(j);
StatManager.getInstance().addRateData("netDb.storeSent", 1, 0);
_context.jobQueue().addJob(j);
_context.statManager().addRateData("netDb.storeSent", 1, 0);
}
private TunnelId selectOutboundTunnel() {
@ -240,7 +244,7 @@ class StoreJob extends JobImpl {
criteria.setReliabilityPriority(20);
criteria.setMaximumTunnelsRequired(1);
criteria.setMinimumTunnelsRequired(1);
List tunnelIds = TunnelManagerFacade.getInstance().selectOutboundTunnelIds(criteria);
List tunnelIds = _context.tunnelManager().selectOutboundTunnelIds(criteria);
if (tunnelIds.size() <= 0) {
_log.error("No outbound tunnels?!");
return null;
@ -263,7 +267,7 @@ class StoreJob extends JobImpl {
private Hash _peer;
public OptimisticSendSuccess(RouterInfo peer) {
super();
super(StoreJob.this._context);
_peer = peer.getIdentity().getHash();
}
@ -291,12 +295,12 @@ class StoreJob extends JobImpl {
protected class FailedJob extends JobImpl {
private Hash _peer;
public FailedJob(RouterInfo peer) {
super();
super(StoreJob.this._context);
_peer = peer.getIdentity().getHash();
}
public void runJob() {
_state.replyTimeout(_peer);
ProfileManager.getInstance().dbStoreFailed(_peer);
_context.profileManager().dbStoreFailed(_peer);
sendNext();
}
public String getName() { return "Kademlia Store Failed"; }
@ -352,7 +356,7 @@ class StoreJob extends JobImpl {
if (_log.shouldLog(Log.DEBUG))
_log.debug("State of successful send: " + _state);
if (_onSuccess != null)
JobQueue.getInstance().addJob(_onSuccess);
_context.jobQueue().addJob(_onSuccess);
_facade.noteKeySent(_state.getTarget());
}
@ -365,10 +369,10 @@ class StoreJob extends JobImpl {
if (_log.shouldLog(Log.DEBUG))
_log.debug("State of failed send: " + _state, new Exception("Who failed me?"));
if (_onFailure != null)
JobQueue.getInstance().addJob(_onFailure);
_context.jobQueue().addJob(_onFailure);
}
protected static class StoreState {
protected class StoreState {
private Hash _key;
private DataStructure _data;
private HashSet _pendingPeers;
@ -390,7 +394,7 @@ class StoreJob extends JobImpl {
_successfulPeers = new HashSet(16);
_successfulExploratoryPeers = new HashSet(16);
_completed = -1;
_started = Clock.getInstance().now();
_started = _context.clock().now();
}
public Hash getTarget() { return _key; }
@ -423,7 +427,7 @@ class StoreJob extends JobImpl {
public boolean completed() { return _completed != -1; }
public void complete(boolean completed) {
if (completed)
_completed = Clock.getInstance().now();
_completed = _context.clock().now();
}
public long getWhenStarted() { return _started; }
@ -433,7 +437,7 @@ class StoreJob extends JobImpl {
synchronized (_pendingPeers) {
_pendingPeers.addAll(pending);
for (Iterator iter = pending.iterator(); iter.hasNext(); )
_pendingPeerTimes.put(iter.next(), new Long(Clock.getInstance().now()));
_pendingPeerTimes.put(iter.next(), new Long(_context.clock().now()));
}
synchronized (_attemptedPeers) {
_attemptedPeers.addAll(pending);
@ -446,7 +450,7 @@ class StoreJob extends JobImpl {
_pendingPeers.remove(peer);
Long when = (Long)_pendingPeerTimes.remove(peer);
if (when != null)
rv = Clock.getInstance().now() - when.longValue();
rv = _context.clock().now() - when.longValue();
}
synchronized (_successfulPeers) {
_successfulPeers.add(peer);
@ -460,7 +464,7 @@ class StoreJob extends JobImpl {
_pendingPeers.remove(peer);
Long when = (Long)_pendingPeerTimes.remove(peer);
if (when != null)
rv = Clock.getInstance().now() - when.longValue();
rv = _context.clock().now() - when.longValue();
}
synchronized (_successfulExploratoryPeers) {
_successfulExploratoryPeers.add(peer);

View File

@ -1,9 +1,9 @@
package net.i2p.router.networkdb.kademlia;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
@ -23,33 +23,37 @@ import net.i2p.data.RouterInfo;
import net.i2p.router.ProfileManager;
import net.i2p.util.Clock;
import net.i2p.util.Log;
import net.i2p.router.RouterContext;
class TransientDataStore implements DataStore {
private final static Log _log = new Log(TransientDataStore.class);
private Log _log;
private Map _data; // hash --> DataStructure
protected RouterContext _context;
public TransientDataStore() {
_data = new HashMap(1024);
if (_log.shouldLog(Log.INFO))
_log.info("Data Store initialized");
public TransientDataStore(RouterContext ctx) {
_context = ctx;
_log = ctx.logManager().getLog(TransientDataStore.class);
_data = new HashMap(1024);
if (_log.shouldLog(Log.INFO))
_log.info("Data Store initialized");
}
public Set getKeys() {
synchronized (_data) {
return new HashSet(_data.keySet());
}
synchronized (_data) {
return new HashSet(_data.keySet());
}
}
public DataStructure get(Hash key) {
synchronized (_data) {
return (DataStructure)_data.get(key);
}
public DataStructure get(Hash key) {
synchronized (_data) {
return (DataStructure)_data.get(key);
}
}
public boolean isKnown(Hash key) {
synchronized (_data) {
return _data.containsKey(key);
}
synchronized (_data) {
return _data.containsKey(key);
}
}
/** nothing published more than 5 minutes in the future */
@ -58,95 +62,95 @@ class TransientDataStore implements DataStore {
private final static long MAX_FUTURE_EXPIRATION_DATE = 3*60*60*1000;
public void put(Hash key, DataStructure data) {
if (data == null) return;
_log.debug("Storing key " + key);
Object old = null;
synchronized (_data) {
old = _data.put(key, data);
}
if (data instanceof RouterInfo) {
ProfileManager.getInstance().heardAbout(key);
RouterInfo ri = (RouterInfo)data;
if (old != null) {
RouterInfo ori = (RouterInfo)old;
if (ri.getPublished() < ori.getPublished()) {
if (_log.shouldLog(Log.INFO))
_log.info("Almost clobbered an old router! " + key + ": [old published on " + new Date(ori.getPublished()) + " new on " + new Date(ri.getPublished()) + "]");
if (_log.shouldLog(Log.DEBUG))
_log.debug("Number of router options for " + key + ": " + ri.getOptions().size() + " (old one had: " + ori.getOptions().size() + ")", new Exception("Updated routerInfo"));
synchronized (_data) {
_data.put(key, old);
}
} else if (ri.getPublished() > Clock.getInstance().now() + MAX_FUTURE_PUBLISH_DATE) {
if (_log.shouldLog(Log.INFO))
_log.info("Hmm, someone tried to give us something with the publication date really far in the future (" + new Date(ri.getPublished()) + "), dropping it");
if (_log.shouldLog(Log.DEBUG))
_log.debug("Number of router options for " + key + ": " + ri.getOptions().size() + " (old one had: " + ori.getOptions().size() + ")", new Exception("Updated routerInfo"));
synchronized (_data) {
_data.put(key, old);
}
} else {
if (_log.shouldLog(Log.INFO))
_log.info("Updated the old router for " + key + ": [old published on " + new Date(ori.getPublished()) + " new on " + new Date(ri.getPublished()) + "]");
if (_log.shouldLog(Log.DEBUG))
_log.debug("Number of router options for " + key + ": " + ri.getOptions().size() + " (old one had: " + ori.getOptions().size() + ")", new Exception("Updated routerInfo"));
}
} else {
if (_log.shouldLog(Log.INFO))
_log.info("Brand new router for " + key + ": published on " + new Date(ri.getPublished()));
if (_log.shouldLog(Log.DEBUG))
_log.debug("Number of router options for " + key + ": " + ri.getOptions().size(), new Exception("Updated routerInfo"));
}
} else if (data instanceof LeaseSet) {
LeaseSet ls = (LeaseSet)data;
if (old != null) {
LeaseSet ols = (LeaseSet)old;
if (ls.getEarliestLeaseDate() < ols.getEarliestLeaseDate()) {
if (_log.shouldLog(Log.INFO))
_log.info("Almost clobbered an old leaseSet! " + key + ": [old published on " + new Date(ols.getEarliestLeaseDate()) + " new on " + new Date(ls.getEarliestLeaseDate()) + "]");
synchronized (_data) {
_data.put(key, old);
}
} else if (ls.getEarliestLeaseDate() > Clock.getInstance().now() + MAX_FUTURE_EXPIRATION_DATE) {
if (_log.shouldLog(Log.INFO))
_log.info("Hmm, someone tried to give us something with the expiration date really far in the future (" + new Date(ls.getEarliestLeaseDate()) + "), dropping it");
synchronized (_data) {
_data.put(key, old);
}
}
}
}
if (data == null) return;
_log.debug("Storing key " + key);
Object old = null;
synchronized (_data) {
old = _data.put(key, data);
}
if (data instanceof RouterInfo) {
_context.profileManager().heardAbout(key);
RouterInfo ri = (RouterInfo)data;
if (old != null) {
RouterInfo ori = (RouterInfo)old;
if (ri.getPublished() < ori.getPublished()) {
if (_log.shouldLog(Log.INFO))
_log.info("Almost clobbered an old router! " + key + ": [old published on " + new Date(ori.getPublished()) + " new on " + new Date(ri.getPublished()) + "]");
if (_log.shouldLog(Log.DEBUG))
_log.debug("Number of router options for " + key + ": " + ri.getOptions().size() + " (old one had: " + ori.getOptions().size() + ")", new Exception("Updated routerInfo"));
synchronized (_data) {
_data.put(key, old);
}
} else if (ri.getPublished() > _context.clock().now() + MAX_FUTURE_PUBLISH_DATE) {
if (_log.shouldLog(Log.INFO))
_log.info("Hmm, someone tried to give us something with the publication date really far in the future (" + new Date(ri.getPublished()) + "), dropping it");
if (_log.shouldLog(Log.DEBUG))
_log.debug("Number of router options for " + key + ": " + ri.getOptions().size() + " (old one had: " + ori.getOptions().size() + ")", new Exception("Updated routerInfo"));
synchronized (_data) {
_data.put(key, old);
}
} else {
if (_log.shouldLog(Log.INFO))
_log.info("Updated the old router for " + key + ": [old published on " + new Date(ori.getPublished()) + " new on " + new Date(ri.getPublished()) + "]");
if (_log.shouldLog(Log.DEBUG))
_log.debug("Number of router options for " + key + ": " + ri.getOptions().size() + " (old one had: " + ori.getOptions().size() + ")", new Exception("Updated routerInfo"));
}
} else {
if (_log.shouldLog(Log.INFO))
_log.info("Brand new router for " + key + ": published on " + new Date(ri.getPublished()));
if (_log.shouldLog(Log.DEBUG))
_log.debug("Number of router options for " + key + ": " + ri.getOptions().size(), new Exception("Updated routerInfo"));
}
} else if (data instanceof LeaseSet) {
LeaseSet ls = (LeaseSet)data;
if (old != null) {
LeaseSet ols = (LeaseSet)old;
if (ls.getEarliestLeaseDate() < ols.getEarliestLeaseDate()) {
if (_log.shouldLog(Log.INFO))
_log.info("Almost clobbered an old leaseSet! " + key + ": [old published on " + new Date(ols.getEarliestLeaseDate()) + " new on " + new Date(ls.getEarliestLeaseDate()) + "]");
synchronized (_data) {
_data.put(key, old);
}
} else if (ls.getEarliestLeaseDate() > _context.clock().now() + MAX_FUTURE_EXPIRATION_DATE) {
if (_log.shouldLog(Log.INFO))
_log.info("Hmm, someone tried to give us something with the expiration date really far in the future (" + new Date(ls.getEarliestLeaseDate()) + "), dropping it");
synchronized (_data) {
_data.put(key, old);
}
}
}
}
}
public int hashCode() {
return DataHelper.hashCode(_data);
public int hashCode() {
return DataHelper.hashCode(_data);
}
public boolean equals(Object obj) {
if ( (obj == null) || (obj.getClass() != getClass()) ) return false;
TransientDataStore ds = (TransientDataStore)obj;
return DataHelper.eq(ds._data, _data);
if ( (obj == null) || (obj.getClass() != getClass()) ) return false;
TransientDataStore ds = (TransientDataStore)obj;
return DataHelper.eq(ds._data, _data);
}
public String toString() {
StringBuffer buf = new StringBuffer();
buf.append("Transient DataStore: ").append(_data.size()).append("\nKeys: ");
Map data = new HashMap();
synchronized (_data) {
data.putAll(_data);
}
for (Iterator iter = data.keySet().iterator(); iter.hasNext();) {
Hash key = (Hash)iter.next();
DataStructure dp = (DataStructure)data.get(key);
buf.append("\n\t*Key: ").append(key.toString()).append("\n\tContent: ").append(dp.toString());
}
buf.append("\n");
return buf.toString();
StringBuffer buf = new StringBuffer();
buf.append("Transient DataStore: ").append(_data.size()).append("\nKeys: ");
Map data = new HashMap();
synchronized (_data) {
data.putAll(_data);
}
for (Iterator iter = data.keySet().iterator(); iter.hasNext();) {
Hash key = (Hash)iter.next();
DataStructure dp = (DataStructure)data.get(key);
buf.append("\n\t*Key: ").append(key.toString()).append("\n\tContent: ").append(dp.toString());
}
buf.append("\n");
return buf.toString();
}
public DataStructure remove(Hash key) {
synchronized (_data) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Removing key " + key.toBase64());
return (DataStructure)_data.remove(key);
}
synchronized (_data) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Removing key " + key.toBase64());
return (DataStructure)_data.remove(key);
}
}
}

Some files were not shown because too many files have changed in this diff Show More