2006-07-04 jrandom

* New NIO-based tcp transport (NTCP), enabled by default for outbound
      connections only.  Those who configure their NAT/firewall to allow
      inbound connections and specify the external host and port
      (dyndns/etc is ok) on /config.jsp can receive inbound connections.
      SSU is still enabled for use by default for all users as a fallback.
    * Substantial bugfix to the tunnel gateway processing to transfer
      messages sequentially instead of interleaved
    * Renamed GNU/crypto classes to avoid name clashes with kaffe and other
      GNU/Classpath based JVMs
    * Adjust the Fortuna PRNG's pooling system to reduce contention on
      refill with a background thread to refill the output buffer
    * Add per-transport support for the shitlist
    * Add a new async pumped tunnel gateway to reduce tunnel dispatcher
      contention
This commit is contained in:
jrandom
2006-07-04 21:17:44 +00:00
committed by zzz
parent 3d07205c9d
commit 208634e5de
50 changed files with 5007 additions and 306 deletions

View File

@ -37,6 +37,8 @@ public class ConfigNetHandler extends FormHandler {
private boolean _requireIntroductions;
private boolean _hiddenMode;
private boolean _dynamicKeys;
private String _ntcpHostname;
private String _ntcpPort;
private String _tcpPort;
private String _udpPort;
private String _inboundRate;
@ -78,6 +80,12 @@ public class ConfigNetHandler extends FormHandler {
public void setTcpPort(String port) {
_tcpPort = (port != null ? port.trim() : null);
}
public void setNtcphost(String host) {
_ntcpHostname = (host != null ? host.trim() : null);
}
public void setNtcpport(String port) {
_ntcpPort = (port != null ? port.trim() : null);
}
public void setUdpPort(String port) {
_udpPort = (port != null ? port.trim() : null);
}
@ -222,6 +230,30 @@ public class ConfigNetHandler extends FormHandler {
restartRequired = true;
}
}
if ( (_ntcpHostname != null) && (_ntcpHostname.length() > 0) && (_ntcpPort != null) && (_ntcpPort.length() > 0) ) {
String oldHost = _context.router().getConfigSetting(ConfigNetHelper.PROP_I2NP_NTCP_HOSTNAME);
String oldPort = _context.router().getConfigSetting(ConfigNetHelper.PROP_I2NP_NTCP_PORT);
if ( (oldHost == null) || (!oldHost.equalsIgnoreCase(_ntcpHostname)) ||
(oldPort == null) || (!oldPort.equalsIgnoreCase(_ntcpPort)) ) {
_context.router().setConfigSetting(ConfigNetHelper.PROP_I2NP_NTCP_HOSTNAME, _ntcpHostname);
_context.router().setConfigSetting(ConfigNetHelper.PROP_I2NP_NTCP_PORT, _ntcpPort);
addFormNotice("Updating inbound TCP settings from " + oldHost + ":" + oldPort
+ " to " + _ntcpHostname + ":" + _ntcpPort);
restartRequired = true;
}
} else {
String oldHost = _context.router().getConfigSetting(ConfigNetHelper.PROP_I2NP_NTCP_HOSTNAME);
String oldPort = _context.router().getConfigSetting(ConfigNetHelper.PROP_I2NP_NTCP_PORT);
if ( (oldHost != null) || (oldPort != null) ) {
_context.router().removeConfigSetting(ConfigNetHelper.PROP_I2NP_NTCP_HOSTNAME);
_context.router().removeConfigSetting(ConfigNetHelper.PROP_I2NP_NTCP_PORT);
addFormNotice("Updating inbound TCP settings from " + oldHost + ":" + oldPort
+ " so that we no longer receive inbound TCP connections");
restartRequired = true;
}
}
if ( (_udpPort != null) && (_udpPort.length() > 0) ) {
String oldPort = _context.router().getConfigSetting(ConfigNetHelper.PROP_I2NP_UDP_PORT);
if ( (oldPort == null) && (_udpPort.equals("8887")) ) {

View File

@ -48,6 +48,10 @@ public class ConfigNetHelper {
}
return "" + port;
}
public final static String PROP_I2NP_NTCP_HOSTNAME = "i2np.ntcp.hostname";
public final static String PROP_I2NP_NTCP_PORT = "i2np.ntcp.port";
public String getNtcphostname() { return _context.getProperty(PROP_I2NP_NTCP_HOSTNAME); }
public String getNtcpport() { return _context.getProperty(PROP_I2NP_NTCP_PORT); }
public String getUdpAddress() {
RouterAddress addr = _context.router().getRouterInfo().getTargetAddress("SSU");

View File

@ -66,6 +66,20 @@
Users behind symmetric NATs, such as OpenBSD's pf, are not currently supported.</p>
<input type="submit" name="recheckReachability" value="Check network reachability..." />
<hr />
<b>Inbound TCP connection configuration:</b><br />
Externally reachable hostname or IP address:
<input name ="ntcphost" type="text" size="16" value="<jsp:getProperty name="nethelper" property="ntcphostname" />" />
(dyndns and the like are fine)<br />
Externally reachable TCP port:
<input name ="ntcpport" type="text" size="6" value="<jsp:getProperty name="nethelper" property="ntcpport" />" /><br />
<p>You do <i>not</i> need to allow inbound TCP connections - outbound connections work with no
configuration. However, if you want to receive inbound TCP connections, you <b>must</b> poke a hole
in your NAT or firewall for unsolicited TCP connections. If you specify the wrong IP address or
hostname, or do not properly configure your NAT or firewall, your network performance will degrade
substantially. When in doubt, leave the hostname and port number blank.</p>
<p><b>Note: changing this setting will terminate all of your connections and effectively
restart your router.</b>
<hr />
<b>Dynamic Router Keys: </b>
<input type="checkbox" name="dynamicKeys" value="true" <jsp:getProperty name="nethelper" property="dynamicKeysChecked" /> /><br />
<p>

View File

@ -0,0 +1,198 @@
package gnu.crypto.hash;
// ----------------------------------------------------------------------------
// $Id: BaseHashStandalone.java,v 1.1 2006/02/26 16:30:59 jrandom Exp $
//
// Copyright (C) 2001, 2002, Free Software Foundation, Inc.
//
// This file is part of GNU Crypto.
//
// GNU Crypto is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2, or (at your option)
// any later version.
//
// GNU Crypto is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; see the file COPYING. If not, write to the
//
// Free Software Foundation Inc.,
// 51 Franklin Street, Fifth Floor,
// Boston, MA 02110-1301
// USA
//
// Linking this library statically or dynamically with other modules is
// making a combined work based on this library. Thus, the terms and
// conditions of the GNU General Public License cover the whole
// combination.
//
// As a special exception, the copyright holders of this library give
// you permission to link this library with independent modules to
// produce an executable, regardless of the license terms of these
// independent modules, and to copy and distribute the resulting
// executable under terms of your choice, provided that you also meet,
// for each linked independent module, the terms and conditions of the
// license of that module. An independent module is a module which is
// not derived from or based on this library. If you modify this
// library, you may extend this exception to your version of the
// library, but you are not obligated to do so. If you do not wish to
// do so, delete this exception statement from your version.
// ----------------------------------------------------------------------------
/**
* <p>A base abstract class to facilitate hash implementations.</p>
*
* @version $Revision: 1.1 $
*/
public abstract class BaseHashStandalone implements IMessageDigestStandalone {
// Constants and variables
// -------------------------------------------------------------------------
/** The canonical name prefix of the hash. */
protected String name;
/** The hash (output) size in bytes. */
protected int hashSize;
/** The hash (inner) block size in bytes. */
protected int blockSize;
/** Number of bytes processed so far. */
protected long count;
/** Temporary input buffer. */
protected byte[] buffer;
// Constructor(s)
// -------------------------------------------------------------------------
/**
* <p>Trivial constructor for use by concrete subclasses.</p>
*
* @param name the canonical name prefix of this instance.
* @param hashSize the block size of the output in bytes.
* @param blockSize the block size of the internal transform.
*/
protected BaseHashStandalone(String name, int hashSize, int blockSize) {
super();
this.name = name;
this.hashSize = hashSize;
this.blockSize = blockSize;
this.buffer = new byte[blockSize];
resetContext();
}
// Class methods
// -------------------------------------------------------------------------
// Instance methods
// -------------------------------------------------------------------------
// IMessageDigestStandalone interface implementation ---------------------------------
public String name() {
return name;
}
public int hashSize() {
return hashSize;
}
public int blockSize() {
return blockSize;
}
public void update(byte b) {
// compute number of bytes still unhashed; ie. present in buffer
int i = (int)(count % blockSize);
count++;
buffer[i] = b;
if (i == (blockSize - 1)) {
transform(buffer, 0);
}
}
public void update(byte[] b) {
update(b, 0, b.length);
}
public void update(byte[] b, int offset, int len) {
int n = (int)(count % blockSize);
count += len;
int partLen = blockSize - n;
int i = 0;
if (len >= partLen) {
System.arraycopy(b, offset, buffer, n, partLen);
transform(buffer, 0);
for (i = partLen; i + blockSize - 1 < len; i+= blockSize) {
transform(b, offset + i);
}
n = 0;
}
if (i < len) {
System.arraycopy(b, offset + i, buffer, n, len - i);
}
}
public byte[] digest() {
byte[] tail = padBuffer(); // pad remaining bytes in buffer
update(tail, 0, tail.length); // last transform of a message
byte[] result = getResult(); // make a result out of context
reset(); // reset this instance for future re-use
return result;
}
public void reset() { // reset this instance for future re-use
count = 0L;
for (int i = 0; i < blockSize; ) {
buffer[i++] = 0;
}
resetContext();
}
// methods to be implemented by concrete subclasses ------------------------
public abstract Object clone();
public abstract boolean selfTest();
/**
* <p>Returns the byte array to use as padding before completing a hash
* operation.</p>
*
* @return the bytes to pad the remaining bytes in the buffer before
* completing a hash operation.
*/
protected abstract byte[] padBuffer();
/**
* <p>Constructs the result from the contents of the current context.</p>
*
* @return the output of the completed hash operation.
*/
protected abstract byte[] getResult();
/** Resets the instance for future re-use. */
protected abstract void resetContext();
/**
* <p>The block digest transformation per se.</p>
*
* @param in the <i>blockSize</i> long block, as an array of bytes to digest.
* @param offset the index where the data to digest is located within the
* input buffer.
*/
protected abstract void transform(byte[] in, int offset);
}

View File

@ -0,0 +1,141 @@
package gnu.crypto.hash;
// ----------------------------------------------------------------------------
// $Id: IMessageDigestStandalone.java,v 1.1 2006/02/26 16:30:59 jrandom Exp $
//
// Copyright (C) 2001, 2002, Free Software Foundation, Inc.
//
// This file is part of GNU Crypto.
//
// GNU Crypto is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2, or (at your option)
// any later version.
//
// GNU Crypto is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; see the file COPYING. If not, write to the
//
// Free Software Foundation Inc.,
// 51 Franklin Street, Fifth Floor,
// Boston, MA 02110-1301
// USA
//
// Linking this library statically or dynamically with other modules is
// making a combined work based on this library. Thus, the terms and
// conditions of the GNU General Public License cover the whole
// combination.
//
// As a special exception, the copyright holders of this library give
// you permission to link this library with independent modules to
// produce an executable, regardless of the license terms of these
// independent modules, and to copy and distribute the resulting
// executable under terms of your choice, provided that you also meet,
// for each linked independent module, the terms and conditions of the
// license of that module. An independent module is a module which is
// not derived from or based on this library. If you modify this
// library, you may extend this exception to your version of the
// library, but you are not obligated to do so. If you do not wish to
// do so, delete this exception statement from your version.
// ----------------------------------------------------------------------------
/**
* <p>The basic visible methods of any hash algorithm.</p>
*
* <p>A hash (or message digest) algorithm produces its output by iterating a
* basic compression function on blocks of data.</p>
*
* @version $Revision: 1.1 $
*/
public interface IMessageDigestStandalone extends Cloneable {
// Constants
// -------------------------------------------------------------------------
// Methods
// -------------------------------------------------------------------------
/**
* <p>Returns the canonical name of this algorithm.</p>
*
* @return the canonical name of this instance.
*/
String name();
/**
* <p>Returns the output length in bytes of this message digest algorithm.</p>
*
* @return the output length in bytes of this message digest algorithm.
*/
int hashSize();
/**
* <p>Returns the algorithm's (inner) block size in bytes.</p>
*
* @return the algorithm's inner block size in bytes.
*/
int blockSize();
/**
* <p>Continues a message digest operation using the input byte.</p>
*
* @param b the input byte to digest.
*/
void update(byte b);
/**
* <p>Continues a message digest operation, by filling the buffer, processing
* data in the algorithm's HASH_SIZE-bit block(s), updating the context and
* count, and buffering the remaining bytes in buffer for the next
* operation.</p>
*
* @param in the input block.
*/
void update(byte[] in);
/**
* <p>Continues a message digest operation, by filling the buffer, processing
* data in the algorithm's HASH_SIZE-bit block(s), updating the context and
* count, and buffering the remaining bytes in buffer for the next
* operation.</p>
*
* @param in the input block.
* @param offset start of meaningful bytes in input block.
* @param length number of bytes, in input block, to consider.
*/
void update(byte[] in, int offset, int length);
/**
* <p>Completes the message digest by performing final operations such as
* padding and resetting the instance.</p>
*
* @return the array of bytes representing the hash value.
*/
byte[] digest();
/**
* <p>Resets the current context of this instance clearing any eventually cached
* intermediary values.</p>
*/
void reset();
/**
* <p>A basic test. Ensures that the digest of a pre-determined message is equal
* to a known pre-computed value.</p>
*
* @return <tt>true</tt> if the implementation passes a basic self-test.
* Returns <tt>false</tt> otherwise.
*/
boolean selfTest();
/**
* <p>Returns a clone copy of this instance.</p>
*
* @return a clone copy of this instance.
*/
Object clone();
}

View File

@ -1,7 +1,7 @@
package gnu.crypto.hash;
// ----------------------------------------------------------------------------
// $Id: Sha256Standalone.java,v 1.1 2006/02/26 16:30:59 jrandom Exp $
// $Id: Sha256Standalone.java,v 1.2 2006/03/16 16:45:19 jrandom Exp $
//
// Copyright (C) 2003 Free Software Foundation, Inc.
//
@ -59,9 +59,9 @@ package gnu.crypto.hash;
* renamed from Sha256 to avoid conflicts with JVMs using gnu-crypto as their JCE
* provider.
*
* @version $Revision: 1.1 $
* @version $Revision: 1.2 $
*/
public class Sha256Standalone extends BaseHash {
public class Sha256Standalone extends BaseHashStandalone {
// Constants and variables
// -------------------------------------------------------------------------
private static final int[] k = {
@ -143,7 +143,7 @@ public class Sha256Standalone extends BaseHash {
return new Sha256Standalone(this);
}
// Implementation of concrete methods in BaseHash --------------------------
// Implementation of concrete methods in BaseHashStandalone --------------------------
private int transformResult[] = new int[8];
protected void transform(byte[] in, int offset) {

View File

@ -0,0 +1,172 @@
package gnu.crypto.prng;
import java.util.*;
/**
* fortuna instance that tries to avoid blocking if at all possible by using separate
* filled buffer segments rather than one buffer (and blocking when that buffer's data
* has been eaten)
*/
public class AsyncFortunaStandalone extends FortunaStandalone implements Runnable {
private static final int BUFFERS = 16;
private static final int BUFSIZE = 256*1024;
private final byte asyncBuffers[][] = new byte[BUFFERS][BUFSIZE];
private final int status[] = new int[BUFFERS];
private int nextBuf = 0;
private static final int STATUS_NEED_FILL = 0;
private static final int STATUS_FILLING = 1;
private static final int STATUS_FILLED = 2;
private static final int STATUS_LIVE = 3;
public AsyncFortunaStandalone() {
super();
for (int i = 0; i < BUFFERS; i++)
status[i] = STATUS_NEED_FILL;
}
public void startup() {
Thread refillThread = new Thread(this, "PRNG");
refillThread.setDaemon(true);
refillThread.setPriority(Thread.MIN_PRIORITY+1);
refillThread.start();
}
/** the seed is only propogated once the prng is started with startup() */
public void seed(byte val[]) {
Map props = new HashMap(1);
props.put(SEED, (Object)val);
init(props);
//fillBlock();
}
protected void allocBuffer() {}
/**
* make the next available filled buffer current, scheduling any unfilled
* buffers for refill, and blocking until at least one buffer is ready
*/
protected void rotateBuffer() {
synchronized (asyncBuffers) {
// wait until we get some filled
long before = System.currentTimeMillis();
long waited = 0;
while (status[nextBuf] != STATUS_FILLED) {
System.out.println(Thread.currentThread().getName() + ": Next PRNG buffer "
+ nextBuf + " isn't ready (" + status[nextBuf] + ")");
//new Exception("source").printStackTrace();
asyncBuffers.notifyAll();
try {
asyncBuffers.wait();
} catch (InterruptedException ie) {}
waited = System.currentTimeMillis()-before;
}
if (waited > 0)
System.out.println(Thread.currentThread().getName() + ": Took " + waited
+ "ms for a full PRNG buffer to be found");
//System.out.println(Thread.currentThread().getName() + ": Switching to prng buffer " + nextBuf);
buffer = asyncBuffers[nextBuf];
status[nextBuf] = STATUS_LIVE;
int prev=nextBuf-1;
if (prev<0)
prev = BUFFERS-1;
if (status[prev] == STATUS_LIVE)
status[prev] = STATUS_NEED_FILL;
nextBuf++;
if (nextBuf >= BUFFERS)
nextBuf = 0;
asyncBuffers.notify();
}
}
public void run() {
while (true) {
int toFill = -1;
try {
synchronized (asyncBuffers) {
for (int i = 0; i < BUFFERS; i++) {
if (status[i] == STATUS_NEED_FILL) {
status[i] = STATUS_FILLING;
toFill = i;
break;
}
}
if (toFill == -1) {
//System.out.println(Thread.currentThread().getName() + ": All pending buffers full");
asyncBuffers.wait();
}
}
} catch (InterruptedException ie) {}
if (toFill != -1) {
//System.out.println(Thread.currentThread().getName() + ": Filling prng buffer " + toFill);
long before = System.currentTimeMillis();
doFill(asyncBuffers[toFill]);
long after = System.currentTimeMillis();
synchronized (asyncBuffers) {
status[toFill] = STATUS_FILLED;
//System.out.println(Thread.currentThread().getName() + ": Prng buffer " + toFill + " filled after " + (after-before));
asyncBuffers.notifyAll();
}
Thread.yield();
try { Thread.sleep((after-before)*5); } catch (InterruptedException ie) {}
}
}
}
public void fillBlock()
{
rotateBuffer();
}
private void doFill(byte buf[]) {
long start = System.currentTimeMillis();
if (pool0Count >= MIN_POOL_SIZE
&& System.currentTimeMillis() - lastReseed > 100)
{
reseedCount++;
//byte[] seed = new byte[0];
for (int i = 0; i < NUM_POOLS; i++)
{
if (reseedCount % (1 << i) == 0) {
generator.addRandomBytes(pools[i].digest());
}
}
lastReseed = System.currentTimeMillis();
}
generator.nextBytes(buf);
long now = System.currentTimeMillis();
long diff = now-lastRefill;
lastRefill = now;
long refillTime = now-start;
//System.out.println("Refilling " + (++refillCount) + " after " + diff + " for the PRNG took " + refillTime);
}
public static void main(String args[]) {
try {
AsyncFortunaStandalone rand = new AsyncFortunaStandalone();
byte seed[] = new byte[1024];
rand.seed(seed);
System.out.println("Before starting prng");
rand.startup();
System.out.println("Starting prng, waiting 1 minute");
try { Thread.sleep(60*1000); } catch (InterruptedException ie) {}
System.out.println("PRNG started, beginning test");
long before = System.currentTimeMillis();
byte buf[] = new byte[1024];
java.io.ByteArrayOutputStream baos = new java.io.ByteArrayOutputStream();
java.util.zip.GZIPOutputStream gos = new java.util.zip.GZIPOutputStream(baos);
for (int i = 0; i < 1024; i++) {
rand.nextBytes(buf);
gos.write(buf);
}
long after = System.currentTimeMillis();
gos.finish();
byte compressed[] = baos.toByteArray();
System.out.println("Compressed size of 1MB: " + compressed.length + " took " + (after-before));
} catch (Exception e) { e.printStackTrace(); }
try { Thread.sleep(5*60*1000); } catch (InterruptedException ie) {}
}
}

View File

@ -49,7 +49,7 @@ import java.util.Map;
* Modified slightly by jrandom for I2P (removing unneeded exceptions)
* @version $Revision: 1.1 $
*/
public abstract class BasePRNGStandalone implements IRandom {
public abstract class BasePRNGStandalone implements IRandomStandalone {
// Constants and variables
// -------------------------------------------------------------------------
@ -88,7 +88,7 @@ public abstract class BasePRNGStandalone implements IRandom {
// Instance methods
// -------------------------------------------------------------------------
// IRandom interface implementation ----------------------------------------
// IRandomStandalone interface implementation ----------------------------------------
public String name() {
return name;

View File

@ -97,20 +97,22 @@ import net.i2p.crypto.CryptixAESKeyCache;
* gnu-crypto implementation, which has been imported into GNU/classpath
*
*/
public class FortunaStandalone extends BasePRNGStandalone implements Serializable, RandomEventListener
public class FortunaStandalone extends BasePRNGStandalone implements Serializable, RandomEventListenerStandalone
{
private static final long serialVersionUID = 0xFACADE;
private static final int SEED_FILE_SIZE = 64;
private static final int NUM_POOLS = 32;
private static final int MIN_POOL_SIZE = 64;
private final Generator generator;
private final Sha256Standalone[] pools;
private long lastReseed;
private int pool;
private int pool0Count;
private int reseedCount;
static final int NUM_POOLS = 32;
static final int MIN_POOL_SIZE = 64;
final Generator generator;
final Sha256Standalone[] pools;
long lastReseed;
int pool;
int pool0Count;
int reseedCount;
static long refillCount = 0;
static long lastRefill = System.currentTimeMillis();
public static final String SEED = "gnu.crypto.prng.fortuna.seed";
@ -124,6 +126,9 @@ public class FortunaStandalone extends BasePRNGStandalone implements Serializabl
lastReseed = 0;
pool = 0;
pool0Count = 0;
allocBuffer();
}
protected void allocBuffer() {
buffer = new byte[4*1024*1024]; //256]; // larger buffer to reduce churn
}
@ -145,6 +150,7 @@ public class FortunaStandalone extends BasePRNGStandalone implements Serializabl
public void fillBlock()
{
long start = System.currentTimeMillis();
if (pool0Count >= MIN_POOL_SIZE
&& System.currentTimeMillis() - lastReseed > 100)
{
@ -159,6 +165,11 @@ public class FortunaStandalone extends BasePRNGStandalone implements Serializabl
lastReseed = System.currentTimeMillis();
}
generator.nextBytes(buffer);
long now = System.currentTimeMillis();
long diff = now-lastRefill;
lastRefill = now;
long refillTime = now-start;
System.out.println("Refilling " + (++refillCount) + " after " + diff + " for the PRNG took " + refillTime);
}
public void addRandomByte(byte b)
@ -177,7 +188,7 @@ public class FortunaStandalone extends BasePRNGStandalone implements Serializabl
pool = (pool + 1) % NUM_POOLS;
}
public void addRandomEvent(RandomEvent event)
public void addRandomEvent(RandomEventStandalone event)
{
if (event.getPoolNumber() < 0 || event.getPoolNumber() >= pools.length)
throw new IllegalArgumentException("pool number out of range: "
@ -338,6 +349,34 @@ public class FortunaStandalone extends BasePRNGStandalone implements Serializabl
}
public static void main(String args[]) {
byte in[] = new byte[16];
byte out[] = new byte[16];
byte key[] = new byte[32];
try {
CryptixAESKeyCache.KeyCacheEntry buf = CryptixAESKeyCache.createNew();
Object cryptixKey = CryptixRijndael_Algorithm.makeKey(key, 16, buf);
long beforeAll = System.currentTimeMillis();
for (int i = 0; i < 256; i++) {
//long before =System.currentTimeMillis();
for (int j = 0; j < 1024; j++)
CryptixRijndael_Algorithm.blockEncrypt(in, out, 0, 0, cryptixKey);
//long after = System.currentTimeMillis();
//System.out.println("encrypting 16KB took " + (after-before));
}
long after = System.currentTimeMillis();
System.out.println("encrypting 4MB took " + (after-beforeAll));
} catch (Exception e) { e.printStackTrace(); }
try {
CryptixAESKeyCache.KeyCacheEntry buf = CryptixAESKeyCache.createNew();
Object cryptixKey = CryptixRijndael_Algorithm.makeKey(key, 16, buf);
byte data[] = new byte[4*1024*1024];
long beforeAll = System.currentTimeMillis();
CryptixRijndael_Algorithm.ecbBulkEncrypt(data, data, cryptixKey);
long after = System.currentTimeMillis();
System.out.println("encrypting 4MB took " + (after-beforeAll));
} catch (Exception e) { e.printStackTrace(); }
/*
FortunaStandalone f = new FortunaStandalone();
java.util.HashMap props = new java.util.HashMap();
byte initSeed[] = new byte[1234];
@ -351,5 +390,6 @@ public class FortunaStandalone extends BasePRNGStandalone implements Serializabl
}
long time = System.currentTimeMillis() - before;
System.out.println("512MB took " + time + ", or " + (8*64d)/((double)time/1000d) +"MBps");
*/
}
}

View File

@ -1,7 +1,7 @@
package gnu.crypto.prng;
// ----------------------------------------------------------------------------
// $Id: IRandom.java,v 1.12 2005/10/06 04:24:17 rsdio Exp $
// $Id: IRandomStandalone.java,v 1.1 2005/10/22 13:10:00 jrandom Exp $
//
// Copyright (C) 2001, 2002, 2003 Free Software Foundation, Inc.
//
@ -82,9 +82,9 @@ import java.util.Map;
* Menezes, A., van Oorschot, P. and S. Vanstone.</li>
* </ol>
*
* @version $Revision: 1.12 $
* @version $Revision: 1.1 $
*/
public interface IRandom extends Cloneable {
public interface IRandomStandalone extends Cloneable {
// Constants
// -------------------------------------------------------------------------
@ -110,32 +110,32 @@ public interface IRandom extends Cloneable {
void init(Map attributes);
/**
* <p>Returns the next 8 bits of random data generated from this instance.</p>
*
* @return the next 8 bits of random data generated from this instance.
* @exception IllegalStateException if the instance is not yet initialised.
* @exception LimitReachedException if this instance has reached its
* theoretical limit for generating non-repetitive pseudo-random data.
*/
byte nextByte() throws IllegalStateException, LimitReachedException;
* <p>Returns the next 8 bits of random data generated from this instance.</p>
*
* @return the next 8 bits of random data generated from this instance.
* @exception IllegalStateException if the instance is not yet initialised.
* @exception LimLimitReachedExceptionStandalone this instance has reached its
* theoretical limit for generating non-repetitive pseudo-random data.
*/
byte nextByte() throws IllegalStateException, LimitReachedExceptionStandalone;
/**
* <p>Fills the designated byte array, starting from byte at index
* <code>offset</code>, for a maximum of <code>length</code> bytes with the
* output of this generator instance.
*
* @param out the placeholder to contain the generated random bytes.
* @param offset the starting index in <i>out</i> to consider. This method
* does nothing if this parameter is not within <code>0</code> and
* <code>out.length</code>.
* @param length the maximum number of required random bytes. This method
* does nothing if this parameter is less than <code>1</code>.
* @exception IllegalStateException if the instance is not yet initialised.
* @exception LimitReachedException if this instance has reached its
* theoretical limit for generating non-repetitive pseudo-random data.
*/
* <p>Fills the designated byte array, starting from byte at index
* <code>offset</code>, for a maximum of <code>length</code> bytes with the
* output of this generator instance.
*
* @param out the placeholder to contain the generated random bytes.
* @param offset the starting index in <i>out</i> to consider. This method
* does nothing if this parameter is not within <code>0</code> and
* <code>out.length</code>.
* @param length the maximum number of required random bytes. This method
* does nothing if this parameter is less than <code>1</code>.
* @exception IllegalStateException if the instance is not yet initialised.
* @exception LimitLimitReachedExceptionStandalonehis instance has reached its
* theoretical limit for generating non-repetitive pseudo-random data.
*/
void nextBytes(byte[] out, int offset, int length)
throws IllegalStateException, LimitReachedException;
throws IllegalStateException, LimitReachedExceptionStandalone;
/**
* <p>Supplement, or possibly replace, the random state of this PRNG with

View File

@ -1,7 +1,7 @@
package gnu.crypto.prng;
// ----------------------------------------------------------------------------
// $Id: LimitReachedException.java,v 1.5 2005/10/06 04:24:17 rsdio Exp $
// $Id: LimitReachedExceptionStandalone.java,v 1.1 2005/10/22 13:10:00 jrandom Exp $
//
// Copyright (C) 2001, 2002, Free Software Foundation, Inc.
//
@ -47,9 +47,9 @@ package gnu.crypto.prng;
* A checked exception that indicates that a pseudo random number generated has
* reached its theoretical limit in generating random bytes.
*
* @version $Revision: 1.5 $
* @version $Revision: 1.1 $
*/
public class LimitReachedException extends Exception {
public class LimitReachedExceptionStandalone extends Exception {
// Constants and variables
// -------------------------------------------------------------------------
@ -57,11 +57,11 @@ public class LimitReachedException extends Exception {
// Constructor(s)
// -------------------------------------------------------------------------
public LimitReachedException() {
public LimitReachedExceptionStandalone() {
super();
}
public LimitReachedException(String msg) {
public LimitReachedExceptionStandalone(String msg) {
super(msg);
}

View File

@ -1,4 +1,4 @@
/* RandomEventListener.java -- event listener
/* RandomEventListenerStandalone.java -- event listener
Copyright (C) 2004 Free Software Foundation, Inc.
This file is part of GNU Crypto.
@ -47,7 +47,7 @@ import java.util.EventListener;
* An interface for entropy accumulators that will be notified of random
* events.
*/
public interface RandomEventListener extends EventListener
public interface RandomEventListenerStandalone extends EventListener
{
void addRandomEvent(RandomEvent event);
void addRandomEvent(RandomEventStandalone event);
}

View File

@ -1,4 +1,4 @@
/* RandomEvent.java -- a random event.
/* RandomEventStandalone.java -- a random event.
Copyright (C) 2004 Free Software Foundation, Inc.
This file is part of GNU Crypto.
@ -47,14 +47,14 @@ import java.util.EventObject;
* An interface for entropy accumulators that will be notified of random
* events.
*/
public class RandomEvent extends EventObject
public class RandomEventStandalone extends EventObject
{
private final byte sourceNumber;
private final byte poolNumber;
private final byte[] data;
public RandomEvent(Object source, byte sourceNumber, byte poolNumber,
public RandomEventStandalone(Object source, byte sourceNumber, byte poolNumber,
byte[] data)
{
super(source);

View File

@ -320,9 +320,9 @@ public class DHSessionKeyBuilder {
if (_myPrivateValue == null) generateMyValue();
_sessionKey = calculateSessionKey(_myPrivateValue, _peerValue);
} else {
System.err.println("Not ready yet.. privateValue and peerValue must be set ("
+ (_myPrivateValue != null ? "set" : "null") + ","
+ (_peerValue != null ? "set" : "null") + ")");
//System.err.println("Not ready yet.. privateValue and peerValue must be set ("
// + (_myPrivateValue != null ? "set" : "null") + ","
// + (_peerValue != null ? "set" : "null") + ")");
}
return _sessionKey;
}

View File

@ -67,7 +67,7 @@ class TransientSessionKeyManager extends SessionKeyManager {
_log = context.logManager().getLog(TransientSessionKeyManager.class);
_context = context;
_outboundSessions = new HashMap(1024);
_inboundTagSets = new HashMap(64*1024);
_inboundTagSets = new HashMap(1024);
context.statManager().createRateStat("crypto.sessionTagsExpired", "How many tags/sessions are expired?", "Encryption", new long[] { 10*60*1000, 60*60*1000, 3*60*60*1000 });
context.statManager().createRateStat("crypto.sessionTagsRemaining", "How many tags/sessions are remaining after a cleanup?", "Encryption", new long[] { 10*60*1000, 60*60*1000, 3*60*60*1000 });
SimpleTimer.getInstance().addEvent(new CleanupEvent(), 60*1000);

View File

@ -822,6 +822,8 @@ public class DataHelper {
return (ms / (60 * 1000)) + "m";
} else if (ms < 3 * 24 * 60 * 60 * 1000) {
return (ms / (60 * 60 * 1000)) + "h";
} else if (ms > 365 * 24 * 60 * 60 * 1000) {
return "n/a";
} else {
return (ms / (24 * 60 * 60 * 1000)) + "d";
}

View File

@ -14,7 +14,7 @@ import java.security.SecureRandom;
import net.i2p.I2PAppContext;
import net.i2p.crypto.EntropyHarvester;
import gnu.crypto.prng.FortunaStandalone;
import gnu.crypto.prng.AsyncFortunaStandalone;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
@ -26,13 +26,13 @@ import java.io.IOException;
*
*/
public class FortunaRandomSource extends RandomSource implements EntropyHarvester {
private FortunaStandalone _fortuna;
private AsyncFortunaStandalone _fortuna;
private double _nextGaussian;
private boolean _haveNextGaussian;
public FortunaRandomSource(I2PAppContext context) {
super(context);
_fortuna = new FortunaStandalone();
_fortuna = new AsyncFortunaStandalone();
byte seed[] = new byte[1024];
if (initSeed(seed)) {
_fortuna.seed(seed);
@ -41,6 +41,7 @@ public class FortunaRandomSource extends RandomSource implements EntropyHarveste
sr.nextBytes(seed);
_fortuna.seed(seed);
}
_fortuna.startup();
// kickstart it
_fortuna.nextBytes(seed);
_haveNextGaussian = false;
@ -202,6 +203,13 @@ public class FortunaRandomSource extends RandomSource implements EntropyHarveste
public static void main(String args[]) {
try {
RandomSource rand = I2PAppContext.getGlobalContext().random();
if (true) {
for (int i = 0; i < 1000; i++)
if (rand.nextFloat() < 0)
throw new RuntimeException("negative!");
System.out.println("All positive");
return;
}
java.io.ByteArrayOutputStream baos = new java.io.ByteArrayOutputStream();
java.util.zip.GZIPOutputStream gos = new java.util.zip.GZIPOutputStream(baos);
for (int i = 0; i < 1024*1024; i++) {

View File

@ -48,6 +48,12 @@ public class I2PThread extends Thread {
if ( (_log == null) || (_log.shouldLog(Log.DEBUG)) )
_createdBy = new Exception("Created by");
}
public I2PThread(Runnable r, String name, boolean isDaemon) {
super(r, name);
setDaemon(isDaemon);
if ( (_log == null) || (_log.shouldLog(Log.DEBUG)) )
_createdBy = new Exception("Created by");
}
private void log(int level, String msg) { log(level, msg, null); }
private void log(int level, String msg, Throwable t) {
@ -113,4 +119,4 @@ public class I2PThread extends Thread {
} catch (Throwable tt) { // nop
}
}
}
}

View File

@ -31,14 +31,14 @@ public class SimpleTimer {
_context = I2PAppContext.getGlobalContext();
_log = _context.logManager().getLog(SimpleTimer.class);
_events = new TreeMap();
_eventTimes = new HashMap(1024);
_eventTimes = new HashMap(256);
_readyEvents = new ArrayList(4);
I2PThread runner = new I2PThread(new SimpleTimerRunner());
runner.setName(name);
runner.setDaemon(true);
runner.start();
for (int i = 0; i < 3; i++) {
I2PThread executor = new I2PThread(new Executor());
I2PThread executor = new I2PThread(new Executor(_context, _log, _readyEvents));
executor.setName(name + "Executor " + i);
executor.setDaemon(true);
executor.start();
@ -114,7 +114,7 @@ public class SimpleTimer {
long timeToAdd = System.currentTimeMillis() - now;
if (timeToAdd > 50) {
if (_log.shouldLog(Log.WARN))
_log.warn("timer contention: took " + timeToAdd + "ms to add a job");
_log.warn("timer contention: took " + timeToAdd + "ms to add a job with " + totalEvents + " queued");
}
}
@ -141,14 +141,6 @@ public class SimpleTimer {
public void timeReached();
}
private void log(String msg, Throwable t) {
synchronized (this) {
if (_log == null)
_log = I2PAppContext.getGlobalContext().logManager().getLog(SimpleTimer.class);
}
_log.log(Log.CRIT, msg, t);
}
private long _occurredTime;
private long _occurredEventCount;
private TimedEvent _recentEvents[] = new TimedEvent[5];
@ -228,30 +220,45 @@ public class SimpleTimer {
}
}
}
private class Executor implements Runnable {
public void run() {
while (true) {
TimedEvent evt = null;
synchronized (_readyEvents) {
if (_readyEvents.size() <= 0)
try { _readyEvents.wait(); } catch (InterruptedException ie) {}
if (_readyEvents.size() > 0)
evt = (TimedEvent)_readyEvents.remove(0);
}
if (evt != null) {
long before = _context.clock().now();
try {
evt.timeReached();
} catch (Throwable t) {
log("wtf, event borked: " + evt, t);
}
long time = _context.clock().now() - before;
if ( (time > 1000) && (_log != null) && (_log.shouldLog(Log.WARN)) )
_log.warn("wtf, event execution took " + time + ": " + evt);
}
class Executor implements Runnable {
private I2PAppContext _context;
private Log _log;
private List _readyEvents;
public Executor(I2PAppContext ctx, Log log, List events) {
_context = ctx;
_readyEvents = events;
}
public void run() {
while (true) {
SimpleTimer.TimedEvent evt = null;
synchronized (_readyEvents) {
if (_readyEvents.size() <= 0)
try { _readyEvents.wait(); } catch (InterruptedException ie) {}
if (_readyEvents.size() > 0)
evt = (SimpleTimer.TimedEvent)_readyEvents.remove(0);
}
if (evt != null) {
long before = _context.clock().now();
try {
evt.timeReached();
} catch (Throwable t) {
log("wtf, event borked: " + evt, t);
}
long time = _context.clock().now() - before;
if ( (time > 1000) && (_log != null) && (_log.shouldLog(Log.WARN)) )
_log.warn("wtf, event execution took " + time + ": " + evt);
}
}
}
private void log(String msg, Throwable t) {
synchronized (this) {
if (_log == null)
_log = I2PAppContext.getGlobalContext().logManager().getLog(SimpleTimer.class);
}
_log.log(Log.CRIT, msg, t);
}
}

View File

@ -1,4 +1,20 @@
$Id: history.txt,v 1.490 2006-06-14 00:24:35 cervantes Exp $
$Id: history.txt,v 1.491 2006-07-01 17:44:34 complication Exp $
2006-07-04 jrandom
* New NIO-based tcp transport (NTCP), enabled by default for outbound
connections only. Those who configure their NAT/firewall to allow
inbound connections and specify the external host and port
(dyndns/etc is ok) on /config.jsp can receive inbound connections.
SSU is still enabled for use by default for all users as a fallback.
* Substantial bugfix to the tunnel gateway processing to transfer
messages sequentially instead of interleaved
* Renamed GNU/crypto classes to avoid name clashes with kaffe and other
GNU/Classpath based JVMs
* Adjust the Fortuna PRNG's pooling system to reduce contention on
refill with a background thread to refill the output buffer
* Add per-transport support for the shitlist
* Add a new async pumped tunnel gateway to reduce tunnel dispatcher
contention
2006-07-01 Complication
* Ensure that the I2PTunnel web interface won't update tunnel settings

View File

@ -48,6 +48,7 @@ public class OutNetMessage {
private MessageSelector _replySelector;
private Set _failedTransports;
private long _sendBegin;
private long _transmitBegin;
private Exception _createdBy;
private long _created;
/** for debugging, contains a mapping of even name to Long (e.g. "begin sending", "handleOutbound", etc) */
@ -57,6 +58,10 @@ public class OutNetMessage {
* (some JVMs have less than 10ms resolution, so the Long above doesn't guarantee order)
*/
private List _timestampOrder;
private int _queueSize;
private long _prepareBegin;
private long _prepareEnd;
private Object _preparationBuf;
public OutNetMessage(RouterContext context) {
_context = context;
@ -148,6 +153,7 @@ public class OutNetMessage {
_messageType = msg.getClass().getName();
_messageTypeId = msg.getType();
_messageId = msg.getUniqueId();
_messageSize = _message.getMessageSize();
}
}
@ -235,9 +241,31 @@ public class OutNetMessage {
/** when did the sending process begin */
public long getSendBegin() { return _sendBegin; }
public void beginSend() { _sendBegin = _context.clock().now(); }
public void beginTransmission() { _transmitBegin = _context.clock().now(); }
public void beginPrepare() { _prepareBegin = _context.clock().now(); }
public void prepared() { prepared(null); }
public void prepared(Object buf) {
_prepareEnd = _context.clock().now();
_preparationBuf = buf;
}
public Object releasePreparationBuffer() {
Object rv = _preparationBuf;
_preparationBuf = null;
return rv;
}
public long getCreated() { return _created; }
/** time since the message was created */
public long getLifetime() { return _context.clock().now() - _created; }
/** time the transport tries to send the message (including any queueing) */
public long getSendTime() { return _context.clock().now() - _sendBegin; }
/** time during which the i2np message is actually in flight */
public long getTransmissionTime() { return _context.clock().now() - _transmitBegin; }
/** how long it took to prepare the i2np message for transmission (including serialization and transport layer encryption) */
public long getPreparationTime() { return _prepareEnd - _prepareBegin; }
/** number of messages ahead of this one going to the targetted peer when it is first enqueued */
public int getQueueSize() { return _queueSize; }
public void setQueueSize(int size) { _queueSize = size; }
/**
* We've done what we need to do with the data from this message, though
@ -245,6 +273,8 @@ public class OutNetMessage {
*/
public void discardData() {
long timeToDiscard = _context.clock().now() - _created;
if ( (_message != null) && (_messageSize <= 0) )
_messageSize = _message.getMessageSize();
if (_log.shouldLog(Log.DEBUG))
_log.debug("Discard " + _messageSize + "byte " + _messageType + " message after "
+ timeToDiscard);

View File

@ -15,9 +15,9 @@ import net.i2p.CoreVersion;
*
*/
public class RouterVersion {
public final static String ID = "$Revision: 1.429 $ $Date: 2006-06-13 21:17:48 $";
public final static String ID = "$Revision: 1.430 $ $Date: 2006-07-01 17:44:36 $";
public final static String VERSION = "0.6.1.21";
public final static long BUILD = 1;
public final static long BUILD = 2;
public static void main(String args[]) {
System.out.println("I2P Router version: " + VERSION + "-" + BUILD);
System.out.println("Router ID: " + RouterVersion.ID);

View File

@ -10,10 +10,8 @@ package net.i2p.router;
import java.io.IOException;
import java.io.Writer;
import java.util.Date;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.*;
import net.i2p.data.DataHelper;
import net.i2p.data.Hash;
import net.i2p.router.peermanager.PeerProfile;
@ -28,29 +26,36 @@ import net.i2p.util.Log;
public class Shitlist {
private Log _log;
private RouterContext _context;
private Map _shitlist; // H(routerIdent) --> Date
private Map _shitlistCause; // H(routerIdent) --> String
private Map _entries;
private class Entry {
/** when it should expire, per the i2p clock */
long expireOn;
/** why they were shitlisted */
String cause;
/** what transports they were shitlisted for (String), or null for all transports */
Set transports;
}
public final static long SHITLIST_DURATION_MS = 4*60*1000; // 4 minute shitlist
public Shitlist(RouterContext context) {
_context = context;
_log = context.logManager().getLog(Shitlist.class);
_shitlist = new HashMap(5);
_shitlistCause = new HashMap(5);
_entries = new HashMap(32);
}
public int getRouterCount() {
purge();
synchronized (_shitlist) {
return _shitlist.size();
synchronized (_entries) {
return _entries.size();
}
}
public boolean shitlistRouter(Hash peer) {
return shitlistRouter(peer, null);
}
public boolean shitlistRouter(Hash peer, String reason) {
public boolean shitlistRouter(Hash peer, String reason) { return shitlistRouter(peer, reason, null); }
public boolean shitlistRouter(Hash peer, String reason, String transport) {
if (peer == null) {
_log.error("wtf, why did we try to shitlist null?", new Exception("shitfaced"));
return false;
@ -73,14 +78,26 @@ public class Shitlist {
if (period > 60*60*1000)
period = 60*60*1000;
synchronized (_shitlist) {
Date oldDate = (Date)_shitlist.put(peer, new Date(_context.clock().now() + period));
wasAlready = (null == oldDate);
if (reason != null) {
if (!wasAlready || (!_shitlistCause.containsKey(peer)) )
_shitlistCause.put(peer, reason);
} else {
_shitlistCause.remove(peer);
Entry e = new Entry();
e.expireOn = _context.clock().now() + period;
e.cause = reason;
e.transports = null;
if (transport != null) {
e.transports = new HashSet(1);
e.transports.add(transport);
}
synchronized (_entries) {
Entry old = (Entry)_entries.put(peer, e);
if (old != null) {
wasAlready = true;
_entries.put(peer, old);
if (e.transports == null) {
old.transports = null;
} else if (old.transports != null) {
old.transports.addAll(e.transports);
}
e = old;
}
}
@ -95,87 +112,89 @@ public class Shitlist {
public void unshitlistRouter(Hash peer) {
unshitlistRouter(peer, true);
}
private void unshitlistRouter(Hash peer, boolean realUnshitlist) {
private void unshitlistRouter(Hash peer, boolean realUnshitlist) { unshitlistRouter(peer, realUnshitlist, null); }
public void unshitlistRouter(Hash peer, String transport) { unshitlistRouter(peer, true, transport); }
private void unshitlistRouter(Hash peer, boolean realUnshitlist, String transport) {
if (peer == null) return;
if (_log.shouldLog(Log.INFO))
_log.info("Unshitlisting router " + peer.toBase64());
synchronized (_shitlist) {
_shitlist.remove(peer);
_shitlistCause.remove(peer);
_log.info("Unshitlisting router " + peer.toBase64()
+ (transport != null ? "/" + transport : ""));
boolean fully = false;
synchronized (_entries) {
Entry e = (Entry)_entries.remove(peer);
if ( (e == null) || (e.transports == null) || (transport == null) || (e.transports.size() <= 1) ) {
// fully unshitlisted
fully = true;
} else {
e.transports.remove(transport);
_entries.put(peer, e);
}
}
if (realUnshitlist) {
if (fully) {
if (realUnshitlist) {
PeerProfile prof = _context.profileOrganizer().getProfile(peer);
if (prof != null)
prof.unshitlist();
}
_context.messageHistory().unshitlist(peer);
}
}
public boolean isShitlisted(Hash peer) { return isShitlisted(peer, null); }
public boolean isShitlisted(Hash peer, String transport) {
boolean rv = false;
boolean unshitlist = false;
synchronized (_entries) {
Entry entry = (Entry)_entries.get(peer);
if (entry == null) {
rv = false;
} else {
if (entry.expireOn <= _context.clock().now()) {
_entries.remove(peer);
unshitlist = true;
rv = false;
} else {
if (entry.transports == null) {
rv = true;
} else if (entry.transports.contains(transport)) {
rv = true;
} else {
rv = false;
}
}
}
}
if (unshitlist) {
PeerProfile prof = _context.profileOrganizer().getProfile(peer);
if (prof != null)
prof.unshitlist();
}
_context.messageHistory().unshitlist(peer);
}
public boolean isShitlisted(Hash peer) {
Date shitlistDate = null;
synchronized (_shitlist) {
shitlistDate = (Date)_shitlist.get(peer);
}
if (shitlistDate == null) return false;
// check validity
if (shitlistDate.getTime() > _context.clock().now()) {
return true;
} else {
unshitlistRouter(peer, false);
return false;
}
}
/**
* We already unshitlist on isShitlisted, but this purge
* lets us get the correct value when rendering the HTML or
* getting the shitlist count. wheee
*
*/
private void purge() {
Map shitlist = null;
synchronized (_shitlist) {
shitlist = new HashMap(_shitlist);
_context.messageHistory().unshitlist(peer);
}
long limit = _context.clock().now();
for (Iterator iter = shitlist.keySet().iterator(); iter.hasNext(); ) {
Hash key = (Hash)iter.next();
Date shitDate = (Date)shitlist.get(key);
if (shitDate.getTime() < limit) {
unshitlistRouter(key, false);
}
}
return rv;
}
public void renderStatusHTML(Writer out) throws IOException {
StringBuffer buf = new StringBuffer(1024);
buf.append("<h2>Shitlist</h2>");
Map shitlist = null;
Map causes = null;
Map entries = null;
purge();
synchronized (_shitlist) {
shitlist = new HashMap(_shitlist);
causes = new HashMap(_shitlistCause);
synchronized (_entries) {
entries = new HashMap(_entries);
}
buf.append("<ul>");
for (Iterator iter = shitlist.keySet().iterator(); iter.hasNext(); ) {
for (Iterator iter = entries.keySet().iterator(); iter.hasNext(); ) {
Hash key = (Hash)iter.next();
Date shitDate = (Date)shitlist.get(key);
Entry entry = (Entry)entries.get(key);
buf.append("<li><b>").append(key.toBase64()).append("</b>");
buf.append(" <a href=\"netdb.jsp#").append(key.toBase64().substring(0, 6)).append("\">(?)</a>");
buf.append(" expiring on ");
buf.append(shitDate);
String cause = (String)causes.get(key);
if (cause != null) {
buf.append(" expiring in ");
buf.append(DataHelper.formatDuration(entry.expireOn-_context.clock().now()));
if (entry.cause != null) {
buf.append("<br />\n");
buf.append(cause);
buf.append(entry.cause);
}
buf.append("</li>\n");
}

View File

@ -159,7 +159,8 @@ class PersistentDataStore extends TransientDataStore {
}
private void write(Hash key, DataStructure data) {
_log.info("Writing key " + key);
if (_log.shouldLog(Log.INFO))
_log.info("Writing key " + key);
FileOutputStream fos = null;
File dbFile = null;
try {

View File

@ -361,6 +361,8 @@ public class ProfileOrganizer {
Hash cur = (Hash)_notFailingPeersList.get(curIndex);
if (matches.contains(cur) ||
(exclude != null && exclude.contains(cur))) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("matched? " + matches.contains(cur) + " exclude: " + exclude + " cur=" + cur.toBase64());
continue;
} else if (onlyNotFailing && _highCapacityPeers.containsKey(cur)) {
// we dont want the good peers, just random ones
@ -368,12 +370,14 @@ public class ProfileOrganizer {
} else {
if (isSelectable(cur))
selected.add(cur);
else if (_log.shouldLog(Log.DEBUG))
_log.debug("Not selectable: " + cur.toBase64());
}
}
}
if (_log.shouldLog(Log.INFO))
_log.info("Selecting all not failing (strict? " + onlyNotFailing + " start=" + start
+ ") found " + selected.size() + " new peers: " + selected);
+ ") found " + selected.size() + " new peers: " + selected + " all=" + _notFailingPeersList.size() + " strict=" + _strictCapacityOrder.size());
matches.addAll(selected);
}
if (matches.size() < howMany) {

View File

@ -21,6 +21,8 @@ import net.i2p.data.RouterAddress;
import net.i2p.router.CommSystemFacade;
import net.i2p.router.OutNetMessage;
import net.i2p.router.RouterContext;
import net.i2p.router.transport.ntcp.NTCPAddress;
import net.i2p.router.transport.ntcp.NTCPTransport;
import net.i2p.router.transport.tcp.TCPTransport;
import net.i2p.util.Log;
@ -89,20 +91,30 @@ public class CommSystemFacadeImpl extends CommSystemFacade {
public Set createAddresses() {
Map addresses = null;
boolean newCreated = false;
if (_manager != null)
if (_manager != null) {
addresses = _manager.getAddresses();
else
} else {
addresses = new HashMap(1);
newCreated = true;
}
if (!addresses.containsKey(TCPTransport.STYLE)) {
RouterAddress addr = createTCPAddress();
if (addr != null)
addresses.put(TCPTransport.STYLE, addr);
}
if (!addresses.containsKey(NTCPTransport.STYLE)) {
RouterAddress addr = createNTCPAddress(_context);
if (_log.shouldLog(Log.INFO))
_log.info("NTCP address: " + addr);
if (addr != null)
addresses.put(NTCPTransport.STYLE, addr);
}
if (_log.shouldLog(Log.INFO))
_log.info("Creating addresses: " + addresses);
_log.info("Creating addresses: " + addresses + " isNew? " + newCreated, new Exception("creator"));
return new HashSet(addresses.values());
}
@ -134,4 +146,41 @@ public class CommSystemFacadeImpl extends CommSystemFacade {
addr.setTransportStyle(TCPTransport.STYLE);
return addr;
}
public final static String PROP_I2NP_NTCP_HOSTNAME = "i2np.ntcp.hostname";
public final static String PROP_I2NP_NTCP_PORT = "i2np.ntcp.port";
public static RouterAddress createNTCPAddress(RouterContext ctx) {
if (!TransportManager.enableNTCP(ctx)) return null;
RouterAddress addr = new RouterAddress();
addr.setCost(10);
addr.setExpiration(null);
Properties props = new Properties();
String name = ctx.router().getConfigSetting(PROP_I2NP_NTCP_HOSTNAME);
String port = ctx.router().getConfigSetting(PROP_I2NP_NTCP_PORT);
/*
boolean isNew = false;
if (name == null) {
name = "localhost";
isNew = true;
}
if (port == null) {
port = String.valueOf(ctx.random().nextInt(10240)+1024);
isNew = true;
}
*/
if ( (name == null) || (port == null) )
return null;
props.setProperty(NTCPAddress.PROP_HOST, name);
props.setProperty(NTCPAddress.PROP_PORT, port);
addr.setOptions(props);
addr.setTransportStyle(NTCPTransport.STYLE);
//if (isNew) {
if (false) return null;
ctx.router().setConfigSetting(PROP_I2NP_NTCP_HOSTNAME, name);
ctx.router().setConfigSetting(PROP_I2NP_NTCP_PORT, port);
ctx.router().saveConfig();
//}
return addr;
}
}

View File

@ -129,13 +129,14 @@ public class FIFOBandwidthLimiter {
* Request some bytes, blocking until they become available
*
*/
public Request requestInbound(int bytesIn, String purpose) {
public Request requestInbound(int bytesIn, String purpose) { return requestInbound(bytesIn, purpose, null, null); }
public Request requestInbound(int bytesIn, String purpose, CompleteListener lsnr, Object attachment) {
if (_inboundUnlimited) {
_totalAllocatedInboundBytes += bytesIn;
return _noop;
}
SimpleRequest req = new SimpleRequest(bytesIn, 0, purpose);
SimpleRequest req = new SimpleRequest(bytesIn, 0, purpose, lsnr, attachment);
requestInbound(req, bytesIn, purpose);
return req;
}
@ -156,13 +157,14 @@ public class FIFOBandwidthLimiter {
* Request some bytes, blocking until they become available
*
*/
public Request requestOutbound(int bytesOut, String purpose) {
public Request requestOutbound(int bytesOut, String purpose) { return requestOutbound(bytesOut, purpose, null, null); }
public Request requestOutbound(int bytesOut, String purpose, CompleteListener lsnr, Object attachment) {
if (_outboundUnlimited) {
_totalAllocatedOutboundBytes += bytesOut;
return _noop;
}
SimpleRequest req = new SimpleRequest(0, bytesOut, purpose);
SimpleRequest req = new SimpleRequest(0, bytesOut, purpose, lsnr, attachment);
requestOutbound(req, bytesOut, purpose);
return req;
}
@ -296,8 +298,8 @@ public class FIFOBandwidthLimiter {
_recvBps = (0.9f)*_recvBps + (0.1f)*((float)recv*1000)/(float)time;
if (_log.shouldLog(Log.WARN)) {
if (_log.shouldLog(Log.INFO))
_log.info("BW: time = " + time + " sent: " + sent + " recv: " + recv);
//if (_log.shouldLog(Log.INFO))
// _log.info("BW: time = " + time + " sent: " + _sendBps + " recv: " + _recvBps);
_context.statManager().getStatLog().addData("bw", "bw.sendBps1s", (long)_sendBps, sent);
_context.statManager().getStatLog().addData("bw", "bw.recvBps1s", (long)_recvBps, recv);
}
@ -305,19 +307,19 @@ public class FIFOBandwidthLimiter {
// Maintain an approximate average with a 15-second halflife
// Weights (0.955 and 0.045) are tuned so that transition between two values (e.g. 0..10)
// would reach their midpoint (e.g. 5) in 15s
if (_sendBps15s <= 0)
_sendBps15s = ((float)sent*15*1000f)/(float)time;
else
//if (_sendBps15s <= 0)
// _sendBps15s = (0.045f)*((float)sent*15*1000f)/(float)time;
//else
_sendBps15s = (0.955f)*_sendBps15s + (0.045f)*((float)sent*1000f)/(float)time;
if (_recvBps15s <= 0)
_recvBps15s = ((float)recv*15*1000f)/(float)time;
else
//if (_recvBps15s <= 0)
// _recvBps15s = (0.045f)*((float)recv*15*1000f)/(float)time;
//else
_recvBps15s = (0.955f)*_recvBps15s + (0.045f)*((float)recv*1000)/(float)time;
if (_log.shouldLog(Log.WARN)) {
if (_log.shouldLog(Log.INFO))
_log.info("BW15: time = " + time + " sent: " + sent + " recv: " + recv);
if (_log.shouldLog(Log.DEBUG))
_log.debug("BW15: time = " + time + " sent: " + _sendBps + " recv: " + _recvBps);
_context.statManager().getStatLog().addData("bw", "bw.sendBps15s", (long)_sendBps15s, sent);
_context.statManager().getStatLog().addData("bw", "bw.recvBps15s", (long)_recvBps15s, recv);
}
@ -432,10 +434,12 @@ public class FIFOBandwidthLimiter {
i--;
continue;
}
if (req.getAllocationsSinceWait() > 0) {
if ( (req.getAllocationsSinceWait() > 0) && (req.getCompleteListener() == null) ) {
// we have already allocated some values to this request, but
// they haven't taken advantage of it yet (most likely they're
// IO bound)
// (also, the complete listener is only set for situations where
// waitForNextAllocation() is never called)
continue;
}
// ok, they are really waiting for us to give them stuff
@ -482,8 +486,8 @@ public class FIFOBandwidthLimiter {
locked_satisfyOutboundAvailable(satisfied);
} else {
// no bandwidth available
if (_log.shouldLog(Log.DEBUG))
_log.debug("Still denying the " + _pendingOutboundRequests.size()
if (_log.shouldLog(Log.INFO))
_log.info("Still denying the " + _pendingOutboundRequests.size()
+ " pending outbound requests (status: " + getStatus().toString()
+ ", longest waited " + locked_getLongestOutboundWait() + " out)");
}
@ -549,6 +553,8 @@ public class FIFOBandwidthLimiter {
// we have already allocated some values to this request, but
// they haven't taken advantage of it yet (most likely they're
// IO bound)
if (_log.shouldLog(Log.WARN))
_log.warn("multiple allocations since wait... ntcp shouldn't do this: " + req.getRequestName());
continue;
}
// ok, they are really waiting for us to give them stuff
@ -563,6 +569,15 @@ public class FIFOBandwidthLimiter {
req.allocateBytes(0, allocated);
satisfied.add(req);
if (req.getPendingOutboundRequested() > 0) {
if (req.attachment() != null) {
if (_log.shouldLog(Log.INFO))
_log.info("Allocating " + allocated + " bytes outbound as a partial grant to "
+ req.getRequestName() + " (wanted "
+ req.getTotalOutboundRequested() + " bytes, waited "
+ waited
+ "ms) pending " + _pendingOutboundRequests.size()
+ ", longest waited " + locked_getLongestOutboundWait() + " out");
}
if (_log.shouldLog(Log.DEBUG))
_log.debug("Allocating " + allocated + " bytes outbound as a partial grant to "
+ req.getRequestName() + " (wanted "
@ -571,6 +586,15 @@ public class FIFOBandwidthLimiter {
+ "ms) pending " + _pendingOutboundRequests.size()
+ ", longest waited " + locked_getLongestOutboundWait() + " out");
} else {
if (req.attachment() != null) {
if (_log.shouldLog(Log.INFO))
_log.info("Allocating " + allocated + " bytes outbound to finish the partial grant to "
+ req.getRequestName() + " (total "
+ req.getTotalOutboundRequested() + " bytes, waited "
+ waited
+ "ms) pending " + _pendingOutboundRequests.size()
+ ", longest waited " + locked_getLongestOutboundWait() + " out)");
}
if (_log.shouldLog(Log.DEBUG))
_log.debug("Allocating " + allocated + " bytes outbound to finish the partial grant to "
+ req.getRequestName() + " (total "
@ -629,7 +653,10 @@ public class FIFOBandwidthLimiter {
private String _target;
private int _allocationsSinceWait;
private boolean _aborted;
private boolean _waited;
List satisfiedBuffer;
private CompleteListener _lsnr;
private Object _attachment;
public SimpleRequest() {
satisfiedBuffer = new ArrayList(1);
@ -639,7 +666,14 @@ public class FIFOBandwidthLimiter {
satisfiedBuffer = new ArrayList(1);
init(in, out, target);
}
public SimpleRequest(int in, int out, String target, CompleteListener lsnr, Object attachment) {
satisfiedBuffer = new ArrayList(1);
_lsnr = lsnr;
_attachment = attachment;
init(in, out, target);
}
public void init(int in, int out, String target) {
_waited = false;
_inTotal = in;
_outTotal = out;
_inAllocated = 0;
@ -659,36 +693,77 @@ public class FIFOBandwidthLimiter {
public int getPendingInboundRequested() { return _inTotal - _inAllocated; }
public boolean getAborted() { return _aborted; }
public void abort() { _aborted = true; }
public CompleteListener getCompleteListener() { return _lsnr; }
public void setCompleteListener(CompleteListener lsnr) {
boolean complete = false;
synchronized (SimpleRequest.this) {
_lsnr = lsnr;
if (isComplete()) {
complete = true;
}
}
if (complete && lsnr != null) {
if (_log.shouldLog(Log.INFO))
_log.info("complete listener set AND completed: " + lsnr);
lsnr.complete(SimpleRequest.this);
}
}
private boolean isComplete() { return (_outAllocated >= _outTotal) && (_inAllocated >= _inTotal); }
public void waitForNextAllocation() {
_waited = true;
_allocationsSinceWait = 0;
if ( (_outAllocated >= _outTotal) &&
(_inAllocated >= _inTotal) )
return;
boolean complete = false;
try {
synchronized (SimpleRequest.this) {
SimpleRequest.this.wait();
if (isComplete())
complete = true;
else
SimpleRequest.this.wait();
}
} catch (InterruptedException ie) {}
if (complete && _lsnr != null)
_lsnr.complete(SimpleRequest.this);
}
int getAllocationsSinceWait() { return _allocationsSinceWait; }
int getAllocationsSinceWait() { return _waited ? _allocationsSinceWait : 0; }
void allocateAll() {
_inAllocated = _inTotal;
_outAllocated = _outTotal;
_outAllocated = _outTotal;
_allocationsSinceWait++;
if (_lsnr == null)
_allocationsSinceWait++;
if (_log.shouldLog(Log.DEBUG)) _log.debug("allocate all");
notifyAllocation();
}
void allocateBytes(int in, int out) {
_inAllocated += in;
_outAllocated += out;
_allocationsSinceWait++;
if (_lsnr == null)
_allocationsSinceWait++;
if (isComplete()) {
if (_log.shouldLog(Log.INFO))
_log.info("allocate " + in +"/"+ out + " completed, listener=" + _lsnr);
}
//notifyAllocation(); // handled within the satisfy* methods
}
void notifyAllocation() {
boolean complete = false;
synchronized (SimpleRequest.this) {
if (isComplete())
complete = true;
SimpleRequest.this.notifyAll();
}
if (complete && _lsnr != null) {
_lsnr.complete(SimpleRequest.this);
if (_log.shouldLog(Log.INFO))
_log.info("at completion for " + _inTotal + "/" + _outTotal
+ ", recvBps=" + _recvBps + "/"+ _recvBps15s + " listener is " + _lsnr);
}
}
public void attach(Object obj) { _attachment = obj; }
public Object attachment() { return _attachment; }
public String toString() { return getRequestName(); }
}
public interface Request {
@ -712,10 +787,20 @@ public class FIFOBandwidthLimiter {
public boolean getAborted();
/** thar be dragons */
public void init(int in, int out, String target);
public void setCompleteListener(CompleteListener lsnr);
public void attach(Object obj);
public Object attachment();
public CompleteListener getCompleteListener();
}
public interface CompleteListener {
public void complete(Request req);
}
private static final NoopRequest _noop = new NoopRequest();
private static class NoopRequest implements Request {
private CompleteListener _lsnr;
private Object _attachment;
public void abort() {}
public boolean getAborted() { return false; }
public int getPendingInboundRequested() { return 0; }
@ -726,5 +811,12 @@ public class FIFOBandwidthLimiter {
public int getTotalOutboundRequested() { return 0; }
public void waitForNextAllocation() {}
public void init(int in, int out, String target) {}
public CompleteListener getCompleteListener() { return _lsnr; }
public void setCompleteListener(CompleteListener lsnr) {
_lsnr = lsnr;
lsnr.complete(NoopRequest.this);
}
public void attach(Object obj) { _attachment = obj; }
public Object attachment() { return _attachment; }
}
}

View File

@ -138,8 +138,11 @@ public abstract class TransportImpl implements Transport {
if (_log.shouldLog(Log.WARN))
_log.warn("afterSend slow: [success=" + sendSuccessful + "] " + msg.getMessageSize() + "byte "
+ msg.getMessageType() + " " + msg.getMessageId() + " from "
+ _context.routerHash().toBase64().substring(0,6) + " took " + msToSend);
+ _context.routerHash().toBase64().substring(0,6) + " took " + msToSend
+ "/" + msg.getTransmissionTime());
}
//if (true)
// _log.error("(not error) I2NP message sent? " + sendSuccessful + " " + msg.getMessageId() + " after " + msToSend + "/" + msg.getTransmissionTime());
long lifetime = msg.getLifetime();
if (lifetime > 3000) {
@ -147,7 +150,7 @@ public abstract class TransportImpl implements Transport {
if (!sendSuccessful)
level = Log.INFO;
if (_log.shouldLog(level))
_log.log(level, "afterSend slow (" + lifetime + "): [success=" + sendSuccessful + "]" + msg.getMessageSize() + "byte "
_log.log(level, "afterSend slow (" + lifetime + "/" + msToSend + "/" + msg.getTransmissionTime() + "): [success=" + sendSuccessful + "]" + msg.getMessageSize() + "byte "
+ msg.getMessageType() + " " + msg.getMessageId() + " from " + _context.routerHash().toBase64().substring(0,6)
+ " to " + msg.getTarget().getIdentity().calculateHash().toBase64().substring(0,6) + ": " + msg.toString());
} else {
@ -229,7 +232,7 @@ public abstract class TransportImpl implements Transport {
if (allTime > 5*1000) {
if (_log.shouldLog(Log.INFO))
_log.info("Took too long from preperation to afterSend(ok? " + sendSuccessful
+ "): " + allTime + "ms " + " after failing on: "
+ "): " + allTime + "ms/" + sendTime + "ms after failing on: "
+ msg.getFailedTransports() + " and succeeding on " + getStyle());
if ( (allTime > 60*1000) && (sendSuccessful) ) {
// WTF!!@#
@ -299,6 +302,9 @@ public abstract class TransportImpl implements Transport {
*
*/
public void messageReceived(I2NPMessage inMsg, RouterIdentity remoteIdent, Hash remoteIdentHash, long msToReceive, int bytesReceived) {
//if (true)
// _log.error("(not error) I2NP message received: " + inMsg.getUniqueId() + " after " + msToReceive);
int level = Log.INFO;
if (msToReceive > 5000)
level = Log.WARN;
@ -370,11 +376,18 @@ public abstract class TransportImpl implements Transport {
public void recheckReachability() {}
public static boolean isPubliclyRoutable(byte addr[]) {
if ((addr[0]&0xFF) == 127) return false;
if ((addr[0]&0xFF) == 10) return false;
if ( ((addr[0]&0xFF) == 172) && ((addr[1]&0xFF) >= 16) && ((addr[1]&0xFF) <= 31) ) return false;
if ( ((addr[0]&0xFF) == 192) && ((addr[1]&0xFF) == 168) ) return false;
if ((addr[0]&0xFF) >= 224) return false; // no multicast
return true; // or at least possible to be true
if (addr.length == 4) {
if ((addr[0]&0xFF) == 127) return false;
if ((addr[0]&0xFF) == 10) return false;
if ( ((addr[0]&0xFF) == 172) && ((addr[1]&0xFF) >= 16) && ((addr[1]&0xFF) <= 31) ) return false;
if ( ((addr[0]&0xFF) == 192) && ((addr[1]&0xFF) == 168) ) return false;
if ((addr[0]&0xFF) >= 224) return false; // no multicast
return true; // or at least possible to be true
} else if (addr.length == 16) {
return false;
} else {
// ipv?
return false;
}
}
}

View File

@ -27,6 +27,7 @@ import net.i2p.router.CommSystemFacade;
import net.i2p.router.RouterContext;
import net.i2p.router.transport.tcp.TCPTransport;
import net.i2p.router.transport.udp.UDPTransport;
import net.i2p.router.transport.ntcp.NTCPTransport;
import net.i2p.util.Log;
public class TransportManager implements TransportEventListener {
@ -36,7 +37,9 @@ public class TransportManager implements TransportEventListener {
private final static String PROP_DISABLE_TCP = "i2np.tcp.disable";
private final static String PROP_ENABLE_UDP = "i2np.udp.enable";
private static final String DEFAULT_ENABLE_UDP = "true";
private final static String PROP_ENABLE_NTCP = "i2np.ntcp.enable";
private final static String DEFAULT_ENABLE_NTCP = "true";
private final static String DEFAULT_ENABLE_UDP = "true";
public TransportManager(RouterContext context) {
_context = context;
@ -76,6 +79,17 @@ public class TransportManager implements TransportEventListener {
udp.setListener(this);
_transports.add(udp);
}
enableNTCP(_context);
NTCPTransport ntcp = new NTCPTransport(_context);
ntcp.setListener(this);
_transports.add(ntcp);
}
static boolean enableNTCP(RouterContext ctx) {
String enableNTCP = ctx.router().getConfigSetting(PROP_ENABLE_NTCP);
if (enableNTCP == null)
enableNTCP = DEFAULT_ENABLE_NTCP;
return "true".equalsIgnoreCase(enableNTCP);
}
public void startListening() {
@ -92,7 +106,7 @@ public class TransportManager implements TransportEventListener {
public void restart() {
stopListening();
try { Thread.sleep(1*1000); } catch (InterruptedException ie) {}
try { Thread.sleep(5*1000); } catch (InterruptedException ie) {}
startListening();
}
@ -260,6 +274,8 @@ public class TransportManager implements TransportEventListener {
Transport t = (Transport)_transports.get(i);
if (t.getCurrentAddress() != null)
buf.append(t.getCurrentAddress()).append("\n\n");
else
buf.append(t.getStyle()).append(" is used for outbound connections only");
}
buf.append("</pre>\n");
out.write(buf.toString());

View File

@ -0,0 +1,851 @@
package net.i2p.router.transport.ntcp;
import java.io.*;
import java.net.InetAddress;
import java.net.Socket;
import java.net.UnknownHostException;
import java.nio.channels.SelectionKey;
import net.i2p.I2PAppContext;
import net.i2p.crypto.DHSessionKeyBuilder;
import net.i2p.data.*;
import net.i2p.router.Router;
import net.i2p.router.RouterContext;
import net.i2p.util.Log;
import java.nio.ByteBuffer;
/*
* Alice contacts Bob
* =========================================================
* X+(H(X) xor Bob.identHash)----------------------------->
* <----------------------------------------Y+E(H(X+Y)+tsB, sk, Y[239:255])
* E(#+Alice.identity+tsA+padding+S(X+Y+Bob.identHash+tsA+tsB+padding), sk, hX_xor_Bob.identHash[16:31])--->
* <----------------------E(S(X+Y+Alice.identHash+tsA+tsB)+padding, sk, prev)
*
* Alternately, when Bob receives a connection, it could be a
* check connection (perhaps prompted by Bob asking for someone
* to verify his listener). check connections are formatted per
* {@link #isCheckInfo()}
*/
public class EstablishState {
private RouterContext _context;
private Log _log;
// bob receives (and alice sends)
private byte _X[];
private byte _hX_xor_bobIdentHash[];
private int _aliceIdentSize;
/** contains the decrypted aliceIndexSize + aliceIdent + tsA + padding + aliceSig */
private ByteArrayOutputStream _sz_aliceIdent_tsA_padding_aliceSig;
/** how long we expect _sz_aliceIdent_tsA_padding_aliceSig to be when its full */
private int _sz_aliceIdent_tsA_padding_aliceSigSize;
// alice receives (and bob sends)
private byte _Y[];
private transient byte _e_hXY_tsB[];
private transient long _tsB;
private transient long _tsA;
private transient byte _e_bobSig[];
/** previously received encrypted block (or the IV) */
private byte _prevEncrypted[];
/** current encrypted block we are reading */
private byte _curEncrypted[];
/**
* next index in _curEncrypted to write to (equals _curEncrypted length if the block is
* ready to decrypt)
*/
private int _curEncryptedOffset;
/** decryption buffer */
private byte _curDecrypted[];
/** bytes received so far */
private int _received;
/** bytes sent so far */
private int _sent;
private byte _extra[];
private DHSessionKeyBuilder _dh;
private NTCPTransport _transport;
private NTCPConnection _con;
private boolean _corrupt;
/** error causing the corruption */
private String _err;
/** exception causing the error */
private Exception _e;
private boolean _verified;
private boolean _confirmWritten;
public EstablishState(RouterContext ctx, NTCPTransport transport, NTCPConnection con) {
_context = ctx;
_log = ctx.logManager().getLog(getClass());
_transport = transport;
_con = con;
_verified = false;
_corrupt = false;
_confirmWritten = false;
_dh = new DHSessionKeyBuilder();
if (_con.isInbound()) {
_X = new byte[256];
_hX_xor_bobIdentHash = new byte[Hash.HASH_LENGTH];
_sz_aliceIdent_tsA_padding_aliceSig = new ByteArrayOutputStream(512);
} else {
_X = _dh.getMyPublicValueBytes();
_Y = new byte[256];
_hX_xor_bobIdentHash = new byte[Hash.HASH_LENGTH];
byte hx[] = ctx.sha().calculateHash(_X).getData();
DataHelper.xor(hx, 0, con.getRemotePeer().calculateHash().getData(), 0, _hX_xor_bobIdentHash, 0, hx.length);
}
_prevEncrypted = new byte[16];
_curEncrypted = new byte[16];
_curEncryptedOffset = 0;
_curDecrypted = new byte[16];
_received = 0;
}
/**
* parse the contents of the buffer as part of the handshake. if the
* handshake is completed and there is more data remaining, the buffer is
* updated so that the next read will be the (still encrypted) remaining
* data (available from getExtraBytes)
*/
public void receive(ByteBuffer src) {
if (_corrupt || _verified)
throw new IllegalStateException(prefix() + "received after completion [corrupt?" + _corrupt + " verified? " + _verified + "] on " + _con);
if (!src.hasRemaining())
return; // nothing to receive
if (_log.shouldLog(Log.DEBUG))
_log.debug(prefix()+"receive " + src);
if (_con.isInbound())
receiveInbound(src);
else
receiveOutbound(src);
}
/**
* we have written all of the data required to confirm the connection
* establishment
*/
public boolean confirmWritten() { return _confirmWritten; }
/** we are Bob, so receive these bytes as part of an inbound connection */
private void receiveInbound(ByteBuffer src) {
if (_log.shouldLog(Log.DEBUG))
_log.debug(prefix()+"Receiving inbound: prev received=" + _received + " src.remaining=" + src.remaining());
while (_received < _X.length && src.hasRemaining()) {
byte c = src.get();
_X[_received++] = c;
//if (_log.shouldLog(Log.DEBUG)) _log.debug("recv x" + (int)c + " received=" + _received);
if (_received >= _X.length) {
if (isCheckInfo(_context, _context.routerHash(), _X)) {
fail("Incoming connection was a check connection");
return;
}
}
}
while (_received < _X.length + _hX_xor_bobIdentHash.length && src.hasRemaining()) {
int i = _received-_X.length;
_received++;
byte c = src.get();
_hX_xor_bobIdentHash[i] = c;
//if (_log.shouldLog(Log.DEBUG)) _log.debug("recv bih" + (int)c + " received=" + _received);
}
if (_received >= _X.length + _hX_xor_bobIdentHash.length) {
if (_dh.getSessionKey() == null) {
if (_log.shouldLog(Log.DEBUG))
_log.debug(prefix()+"Enough data for a DH received");
// first verify that Alice knows who she is trying to talk with and that the X
// isn't corrupt
Hash hX = _context.sha().calculateHash(_X);
byte realXor[] = DataHelper.xor(hX.getData(), _context.routerHash().getData());
if (_log.shouldLog(Log.DEBUG)) {
//_log.debug(prefix()+"_X = " + Base64.encode(_X));
_log.debug(prefix()+"hx = " + Base64.encode(hX.getData()));
_log.debug(prefix()+"bih=" + Base64.encode(_context.routerHash().getData()));
_log.debug(prefix()+"xor=" + Base64.encode(realXor));
}
if (!DataHelper.eq(realXor, _hX_xor_bobIdentHash)) {
fail("Invalid hX_xor");
return;
}
try {
// ok, they're actually trying to talk to us, and we got their (unauthenticated) X
_dh.setPeerPublicValue(_X);
_dh.getSessionKey(); // force the calc
System.arraycopy(realXor, 16, _prevEncrypted, 0, _prevEncrypted.length);
if (_log.shouldLog(Log.DEBUG))
_log.debug(prefix()+"DH session key calculated (" + _dh.getSessionKey().toBase64() + ")");
// now prepare our response: Y+E(H(X+Y)+tsB+padding, sk, Y[239:255])
_Y = _dh.getMyPublicValueBytes();
byte xy[] = new byte[_X.length+_Y.length];
System.arraycopy(_X, 0, xy, 0, _X.length);
System.arraycopy(_Y, 0, xy, _X.length, _Y.length);
Hash hxy = _context.sha().calculateHash(xy);
_tsB = _context.clock().now()/1000l;
byte padding[] = new byte[12]; // the encrypted data needs an extra 12 bytes
_context.random().nextBytes(padding);
byte toEncrypt[] = new byte[hxy.getData().length+4+padding.length];
System.arraycopy(hxy.getData(), 0, toEncrypt, 0, hxy.getData().length);
byte tsB[] = DataHelper.toLong(4, _tsB);
System.arraycopy(tsB, 0, toEncrypt, hxy.getData().length, tsB.length);
//DataHelper.toLong(toEncrypt, hxy.getData().length, 4, _tsB);
System.arraycopy(padding, 0,toEncrypt, hxy.getData().length+4, padding.length);
if (_log.shouldLog(Log.DEBUG)) {
//_log.debug(prefix()+"Y="+Base64.encode(_Y));
//_log.debug(prefix()+"x+y="+Base64.encode(xy));
_log.debug(prefix()+"h(x+y)="+Base64.encode(hxy.getData()));
_log.debug(prefix()+"tsb="+Base64.encode(tsB));
_log.debug(prefix()+"unencrypted H(X+Y)+tsB+padding: " + Base64.encode(toEncrypt));
_log.debug(prefix()+"encryption iv= " + Base64.encode(_Y, _Y.length-16, 16));
_log.debug(prefix()+"encryption key= " + _dh.getSessionKey().toBase64());
}
_e_hXY_tsB = new byte[toEncrypt.length];
_context.aes().encrypt(toEncrypt, 0, _e_hXY_tsB, 0, _dh.getSessionKey(), _Y, _Y.length-16, toEncrypt.length);
if (_log.shouldLog(Log.DEBUG))
_log.debug(prefix()+"encrypted H(X+Y)+tsB+padding: " + Base64.encode(_e_hXY_tsB));
byte write[] = new byte[_Y.length + _e_hXY_tsB.length];
System.arraycopy(_Y, 0, write, 0, _Y.length);
System.arraycopy(_e_hXY_tsB, 0, write, _Y.length, _e_hXY_tsB.length);
// ok, now that is prepared, we want to actually send it, so make sure we are up for writing
_transport.getPumper().wantsWrite(_con, write);
if (!src.hasRemaining()) return;
} catch (DHSessionKeyBuilder.InvalidPublicParameterException e) {
fail("Invalid X", e);
return;
}
}
// ok, we are onto the encrypted area
while (src.hasRemaining() && !_corrupt) {
if (_log.shouldLog(Log.DEBUG))
_log.debug(prefix()+"Encrypted bytes available (" + src.hasRemaining() + ")");
while (_curEncryptedOffset < _curEncrypted.length && src.hasRemaining()) {
_curEncrypted[_curEncryptedOffset++] = src.get();
_received++;
}
if (_curEncryptedOffset >= _curEncrypted.length) {
_context.aes().decrypt(_curEncrypted, 0, _curDecrypted, 0, _dh.getSessionKey(), _prevEncrypted, 0, _curEncrypted.length);
//if (_log.shouldLog(Log.DEBUG))
// _log.debug(prefix()+"full block read and decrypted: " + Base64.encode(_curDecrypted));
byte swap[] = new byte[16];
_prevEncrypted = _curEncrypted;
_curEncrypted = swap;
_curEncryptedOffset = 0;
if (_aliceIdentSize <= 0) { // we are on the first decrypted block
_aliceIdentSize = (int)DataHelper.fromLong(_curDecrypted, 0, 2);
_sz_aliceIdent_tsA_padding_aliceSigSize = 2 + _aliceIdentSize + 4 + Signature.SIGNATURE_BYTES;
int rem = (_sz_aliceIdent_tsA_padding_aliceSigSize % 16);
int padding = 0;
if (rem > 0)
padding = 16-rem;
_sz_aliceIdent_tsA_padding_aliceSigSize += padding;
try {
_sz_aliceIdent_tsA_padding_aliceSig.write(_curDecrypted);
} catch (IOException ioe) {
if (_log.shouldLog(Log.ERROR)) _log.error(prefix()+"Error writing to the baos?", ioe);
}
if (_log.shouldLog(Log.DEBUG))
_log.debug(prefix()+"alice ident size decrypted as " + _aliceIdentSize + ", making the padding at " + padding + " and total size at " + _sz_aliceIdent_tsA_padding_aliceSigSize);
} else {
// subsequent block...
try {
_sz_aliceIdent_tsA_padding_aliceSig.write(_curDecrypted);
} catch (IOException ioe) {
if (_log.shouldLog(Log.ERROR)) _log.error(prefix()+"Error writing to the baos?", ioe);
}
if (_log.shouldLog(Log.DEBUG))
_log.debug(prefix()+"subsequent block decrypted (" + _sz_aliceIdent_tsA_padding_aliceSig.size() + ")");
if (_sz_aliceIdent_tsA_padding_aliceSig.size() >= _sz_aliceIdent_tsA_padding_aliceSigSize) {
verifyInbound();
if (!_corrupt && _verified && src.hasRemaining())
prepareExtra(src);
if (_log.shouldLog(Log.DEBUG))
_log.debug(prefix()+"verifying size (sz=" + _sz_aliceIdent_tsA_padding_aliceSig.size()
+ " expected=" + _sz_aliceIdent_tsA_padding_aliceSigSize
+ " corrupt=" + _corrupt
+ " verified=" + _verified + " extra=" + (_extra != null ? _extra.length : 0) + ")");
return;
}
}
} else {
// no more bytes available in the buffer, and only a partial
// block was read, so we can't decrypt it.
if (_log.shouldLog(Log.DEBUG))
_log.debug(prefix()+"end of available data with only a partial block read (" + _curEncryptedOffset + ", " + _received + ")");
}
}
if (_log.shouldLog(Log.DEBUG))
_log.debug(prefix()+"done with the data, not yet complete or corrupt");
}
}
/** we are Alice, so receive these bytes as part of an outbound connection */
private void receiveOutbound(ByteBuffer src) {
if (_log.shouldLog(Log.DEBUG)) _log.debug(prefix()+"Receive outbound " + src + " received=" + _received);
// recv Y+E(H(X+Y)+tsB, sk, Y[239:255])
while (_received < _Y.length && src.hasRemaining()) {
byte c = src.get();
_Y[_received++] = c;
//if (_log.shouldLog(Log.DEBUG)) _log.debug("recv x" + (int)c + " received=" + _received);
if (_received >= _Y.length) {
try {
_dh.setPeerPublicValue(_Y);
_dh.getSessionKey(); // force the calc
if (_log.shouldLog(Log.DEBUG))
_log.debug(prefix()+"DH session key calculated (" + _dh.getSessionKey().toBase64() + ")");
_e_hXY_tsB = new byte[Hash.HASH_LENGTH+4+12];
} catch (DHSessionKeyBuilder.InvalidPublicParameterException e) {
fail("Invalid X", e);
return;
}
}
}
while (_received < _Y.length + _e_hXY_tsB.length && src.hasRemaining()) {
int i = _received-_Y.length;
_received++;
byte c = src.get();
_e_hXY_tsB[i] = c;
if (_log.shouldLog(Log.DEBUG))
_log.debug(prefix() + "recv _e_hXY_tsB " + (int)c + " received=" + _received);
if (i+1 >= _e_hXY_tsB.length) {
if (_log.shouldLog(Log.DEBUG)) _log.debug(prefix() + "received _e_hXY_tsB fully");
byte hXY_tsB[] = new byte[_e_hXY_tsB.length];
_context.aes().decrypt(_e_hXY_tsB, 0, hXY_tsB, 0, _dh.getSessionKey(), _Y, _Y.length-16, _e_hXY_tsB.length);
byte XY[] = new byte[_X.length + _Y.length];
System.arraycopy(_X, 0, XY, 0, _X.length);
System.arraycopy(_Y, 0, XY, _X.length, _Y.length);
Hash h = _context.sha().calculateHash(XY);
if (_log.shouldLog(Log.DEBUG)) _log.debug(prefix() + "h(XY)=" + h.toBase64());
if (!DataHelper.eq(h.getData(), 0, hXY_tsB, 0, Hash.HASH_LENGTH)) {
fail("Invalid H(X+Y) - mitm attack attempted?");
return;
}
_tsB = DataHelper.fromLong(hXY_tsB, Hash.HASH_LENGTH, 4);
_tsA = _context.clock().now()/1000;
if (_log.shouldLog(Log.DEBUG))
_log.debug(prefix()+"h(X+Y) is correct, tsA-tsB=" + (_tsA-_tsB));
// now prepare and send our response
// send E(#+Alice.identity+tsA+padding+S(X+Y+Bob.identHash+tsA+tsB), sk, hX_xor_Bob.identHash[16:31])
int sigSize = _X.length+_Y.length+Hash.HASH_LENGTH+4+4;//+12;
byte preSign[] = new byte[sigSize];
System.arraycopy(_X, 0, preSign, 0, _X.length);
System.arraycopy(_Y, 0, preSign, _X.length, _Y.length);
System.arraycopy(_con.getRemotePeer().calculateHash().getData(), 0, preSign, _X.length+_Y.length, Hash.HASH_LENGTH);
DataHelper.toLong(preSign, _X.length+_Y.length+Hash.HASH_LENGTH, 4, _tsA);
DataHelper.toLong(preSign, _X.length+_Y.length+Hash.HASH_LENGTH+4, 4, _tsB);
// hXY_tsB has 12 bytes of padding (size=48, tsB=4 + hXY=32)
//System.arraycopy(hXY_tsB, hXY_tsB.length-12, preSign, _X.length+_Y.length+Hash.HASH_LENGTH+4+4, 12);
//byte sigPad[] = new byte[padSig];
//_context.random().nextBytes(sigPad);
//System.arraycopy(sigPad, 0, preSign, _X.length+_Y.length+Hash.HASH_LENGTH+4+4, padSig);
Signature sig = _context.dsa().sign(preSign, _context.keyManager().getSigningPrivateKey());
//if (_log.shouldLog(Log.DEBUG)) {
// _log.debug(prefix()+"signing " + Base64.encode(preSign));
//}
byte ident[] = _context.router().getRouterInfo().getIdentity().toByteArray();
int min = 2+ident.length+4+Signature.SIGNATURE_BYTES;
int rem = min % 16;
int padding = 0;
if (rem > 0)
padding = 16 - rem;
byte preEncrypt[] = new byte[min+padding];
DataHelper.toLong(preEncrypt, 0, 2, ident.length);
System.arraycopy(ident, 0, preEncrypt, 2, ident.length);
DataHelper.toLong(preEncrypt, 2+ident.length, 4, _tsA);
byte pad[] = new byte[padding];
_context.random().nextBytes(pad);
System.arraycopy(pad, 0, preEncrypt, 2+ident.length+4, padding);
System.arraycopy(sig.getData(), 0, preEncrypt, 2+ident.length+4+padding, Signature.SIGNATURE_BYTES);
_prevEncrypted = new byte[preEncrypt.length];
_context.aes().encrypt(preEncrypt, 0, _prevEncrypted, 0, _dh.getSessionKey(), _hX_xor_bobIdentHash, _hX_xor_bobIdentHash.length-16, preEncrypt.length);
if (_log.shouldLog(Log.DEBUG)) {
//_log.debug(prefix() + "unencrypted response to Bob: " + Base64.encode(preEncrypt));
//_log.debug(prefix() + "encrypted response to Bob: " + Base64.encode(_prevEncrypted));
}
// send 'er off (when the bw limiter says, etc)
_transport.getPumper().wantsWrite(_con, _prevEncrypted);
}
}
if (_received >= _Y.length + _e_hXY_tsB.length && src.hasRemaining()) {
// we are receiving their confirmation
// recv E(S(X+Y+Alice.identHash+tsA+tsB)+padding, sk, prev)
int off = 0;
if (_e_bobSig == null) {
_e_bobSig = new byte[48];
if (_log.shouldLog(Log.DEBUG))
_log.debug(prefix() + "receiving E(S(X+Y+Alice.identHash+tsA+tsB)+padding, sk, prev) (remaining? " + src.hasRemaining() + ")");
} else {
off = _received - _Y.length - _e_hXY_tsB.length;
if (_log.shouldLog(Log.DEBUG))
_log.debug(prefix() + "continuing to receive E(S(X+Y+Alice.identHash+tsA+tsB)+padding, sk, prev) (remaining? " + src.hasRemaining() + " off=" + off + " recv=" + _received + ")");
}
while (src.hasRemaining() && off < _e_bobSig.length) {
if (_log.shouldLog(Log.DEBUG)) _log.debug(prefix()+"recv bobSig received=" + _received);
_e_bobSig[off++] = src.get();
_received++;
if (off >= _e_bobSig.length) {
//if (_log.shouldLog(Log.DEBUG))
// _log.debug(prefix() + "received E(S(X+Y+Alice.identHash+tsA+tsB)+padding, sk, prev): " + Base64.encode(_e_bobSig));
byte bobSig[] = new byte[_e_bobSig.length];
_context.aes().decrypt(_e_bobSig, 0, bobSig, 0, _dh.getSessionKey(), _e_hXY_tsB, _e_hXY_tsB.length-16, _e_bobSig.length);
// ignore the padding
byte bobSigData[] = new byte[Signature.SIGNATURE_BYTES];
System.arraycopy(bobSig, 0, bobSigData, 0, Signature.SIGNATURE_BYTES);
Signature sig = new Signature(bobSigData);
byte toVerify[] = new byte[_X.length+_Y.length+Hash.HASH_LENGTH+4+4];
int voff = 0;
System.arraycopy(_X, 0, toVerify, voff, _X.length); voff += _X.length;
System.arraycopy(_Y, 0, toVerify, voff, _Y.length); voff += _Y.length;
System.arraycopy(_context.routerHash().getData(), 0, toVerify, voff, Hash.HASH_LENGTH); voff += Hash.HASH_LENGTH;
DataHelper.toLong(toVerify, voff, 4, _tsA); voff += 4;
DataHelper.toLong(toVerify, voff, 4, _tsB); voff += 4;
_verified = _context.dsa().verifySignature(sig, toVerify, _con.getRemotePeer().getSigningPublicKey());
if (!_verified) {
fail("Signature was invalid - attempt to spoof " + _con.getRemotePeer().calculateHash().toBase64() + "?");
return;
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug(prefix() + "signature verified from Bob. done!");
prepareExtra(src);
byte nextWriteIV[] = new byte[16];
System.arraycopy(_prevEncrypted, _prevEncrypted.length-16, nextWriteIV, 0, 16);
byte nextReadIV[] = new byte[16];
System.arraycopy(_e_bobSig, _e_bobSig.length-16, nextReadIV, 0, nextReadIV.length);
_con.finishOutboundEstablishment(_dh.getSessionKey(), 1000*(_tsA-_tsB), nextWriteIV, nextReadIV);
return;
}
}
}
}
}
/** did the handshake fail for some reason? */
public boolean isCorrupt() { return _err != null; }
/** @return is the handshake complete and valid? */
public boolean isComplete() { return _verified; }
/**
* we are establishing an outbound connection, so prepare ourselves by
* queueing up the write of the first part of the handshake
*/
public void prepareOutbound() {
if (_received <= 0) {
if (_log.shouldLog(Log.DEBUG))
_log.debug(prefix() + "write out the first part of our handshake");
byte toWrite[] = new byte[_X.length + _hX_xor_bobIdentHash.length];
System.arraycopy(_X, 0, toWrite, 0, _X.length);
System.arraycopy(_hX_xor_bobIdentHash, 0, toWrite, _X.length, _hX_xor_bobIdentHash.length);
_transport.getPumper().wantsWrite(_con, toWrite);
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug(prefix()+"prepare outbound with received=" + _received);
}
}
/**
* make sure the signatures are correct, and if they are, update the
* NIOConnection with the session key / peer ident / clock skew / iv.
* The NIOConnection itself is responsible for registering with the
* transport
*/
private void verifyInbound() {
if (_corrupt) return;
byte b[] = _sz_aliceIdent_tsA_padding_aliceSig.toByteArray();
//if (_log.shouldLog(Log.DEBUG))
// _log.debug(prefix()+"decrypted sz(etc) data: " + Base64.encode(b));
try {
RouterIdentity alice = new RouterIdentity();
int sz = (int)DataHelper.fromLong(b, 0, 2);
if ( (sz <= 0) || (sz > b.length-2-4-Signature.SIGNATURE_BYTES) ) {
fail("size is invalid", new Exception("size is " + sz));
return;
}
byte aliceData[] = new byte[sz];
System.arraycopy(b, 2, aliceData, 0, sz);
alice.fromByteArray(aliceData);
long tsA = DataHelper.fromLong(b, 2+sz, 4);
long diff = 1000*Math.abs(tsA-_tsB);
if (diff >= Router.CLOCK_FUDGE_FACTOR) {
fail("Clocks too skewed (" + diff + ")");
return;
} else if (_log.shouldLog(Log.DEBUG)) {
_log.debug(prefix()+"Clock skew: " + diff);
}
ByteArrayOutputStream baos = new ByteArrayOutputStream(768);
baos.write(_X);
baos.write(_Y);
baos.write(_context.routerHash().getData());
baos.write(DataHelper.toLong(4, tsA));
baos.write(DataHelper.toLong(4, _tsB));
//baos.write(b, 2+sz+4, b.length-2-sz-4-Signature.SIGNATURE_BYTES);
byte toVerify[] = baos.toByteArray();
if (_log.shouldLog(Log.DEBUG)) {
_log.debug(prefix()+"checking " + Base64.encode(toVerify, 0, 16));
//_log.debug(prefix()+"check pad " + Base64.encode(b, 2+sz+4, 12));
}
byte s[] = new byte[Signature.SIGNATURE_BYTES];
System.arraycopy(b, b.length-s.length, s, 0, s.length);
Signature sig = new Signature(s);
_verified = _context.dsa().verifySignature(sig, toVerify, alice.getSigningPublicKey());
if (_verified) {
if (_log.shouldLog(Log.DEBUG))
_log.debug(prefix() + "verification successful for " + _con);
sendInboundConfirm(alice, tsA);
_con.setRemotePeer(alice);
if (_log.shouldLog(Log.DEBUG))
_log.debug(prefix()+"e_bobSig is " + _e_bobSig.length + " bytes long");
byte iv[] = new byte[16];
System.arraycopy(_e_bobSig, _e_bobSig.length-16, iv, 0, 16);
_con.finishInboundEstablishment(_dh.getSessionKey(), 1000*(tsA-_tsB), iv, _prevEncrypted);
if (_log.shouldLog(Log.INFO))
_log.info(prefix()+"Verified remote peer as " + alice.calculateHash().toBase64());
} else {
fail("Peer verification failed - spoof of " + alice.calculateHash().toBase64() + "?");
}
} catch (IOException ioe) {
fail("Error verifying peer", ioe);
} catch (DataFormatException dfe) {
fail("Error verifying peer", dfe);
}
}
private void sendInboundConfirm(RouterIdentity alice, long tsA) {
// send Alice E(S(X+Y+Alice.identHash+tsA+tsB), sk, prev)
byte toSign[] = new byte[256+256+32+4+4];
int off = 0;
System.arraycopy(_X, 0, toSign, off, 256); off += 256;
System.arraycopy(_Y, 0, toSign, off, 256); off += 256;
Hash h = alice.calculateHash();
System.arraycopy(h.getData(), 0, toSign, off, 32); off += 32;
DataHelper.toLong(toSign, off, 4, tsA); off += 4;
DataHelper.toLong(toSign, off, 4, _tsB); off += 4;
Signature sig = _context.dsa().sign(toSign, _context.keyManager().getSigningPrivateKey());
byte preSig[] = new byte[Signature.SIGNATURE_BYTES+8];
byte pad[] = new byte[8];
_context.random().nextBytes(pad);
System.arraycopy(sig.getData(), 0, preSig, 0, Signature.SIGNATURE_BYTES);
System.arraycopy(pad, 0, preSig, Signature.SIGNATURE_BYTES, pad.length);
_e_bobSig = new byte[preSig.length];
_context.aes().encrypt(preSig, 0, _e_bobSig, 0, _dh.getSessionKey(), _e_hXY_tsB, _e_hXY_tsB.length-16, _e_bobSig.length);
if (_log.shouldLog(Log.DEBUG))
_log.debug(prefix() + "Sending encrypted inbound confirmation");
_transport.getPumper().wantsWrite(_con, _e_bobSig);
}
/** anything left over in the byte buffer after verification is extra */
private void prepareExtra(ByteBuffer buf) {
_extra = new byte[buf.remaining()];
buf.get(_extra);
_received += _extra.length;
if (_log.shouldLog(Log.DEBUG))
_log.debug(prefix() + "prepare extra " + _extra.length + " (total received: " + _received + ")");
}
/**
* if complete, this will contain any bytes received as part of the
* handshake that were after the actual handshake. This may return null.
*/
public byte[] getExtraBytes() { return _extra; }
private void fail(String reason) { fail(reason, null); }
private void fail(String reason, Exception e) {
_corrupt = true;
_err = reason;
_e = e;
if (_log.shouldLog(Log.WARN))
_log.warn(prefix()+"Failed to establish: " + _err, e);
}
public String getError() { return _err; }
public Exception getException() { return _e; }
private String prefix() { return toString(); }
public String toString() {
StringBuffer buf = new StringBuffer(64);
buf.append("est").append(System.identityHashCode(this));
if (_con.isInbound()) buf.append(" inbound");
else buf.append(" outbound");
if (_corrupt) buf.append(" corrupt");
if (_verified) buf.append(" verified");
if (_con.isEstablished()) buf.append(" established");
buf.append(": ");
return buf.toString();
}
/**
* a check info connection will receive 256 bytes containing:
* - 32 bytes of uninterpreted, ignored data
* - 1 byte size
* - that many bytes making up the local router's IP address (as reached by the remote side)
* - 2 byte port number that the local router was reached on
* - 4 byte i2p network time as known by the remote side (seconds since the epoch)
* - uninterpreted padding data, up to byte 223
* - xor of the local router's identity hash and the SHA256 of bytes 32 through bytes 223
*/
private static boolean isCheckInfo(I2PAppContext ctx, Hash us, byte first256[]) {
Log log = ctx.logManager().getLog(EstablishState.class);
int off = 32; // ignore the first 32 bytes
Hash h = ctx.sha().calculateHash(first256, off, first256.length-32-off);
byte xor[] = DataHelper.xor(h.getData(), us.getData());
if (log.shouldLog(Log.DEBUG))
log.debug("check hash: " + h.toBase64() + " xor: " + Base64.encode(xor));
if (DataHelper.eq(xor, 0, first256, first256.length-32, 32)) {
// ok, data is as expected
// parse our IP/port/etc out of the first256
int ipSize = (int)DataHelper.fromLong(first256, off, 1);
off++;
byte ip[] = new byte[ipSize];
System.arraycopy(first256, off, ip, 0, ipSize);
try {
InetAddress ourIP = InetAddress.getByAddress(ip);
off += ipSize;
int port = (int)DataHelper.fromLong(first256, off, 2);
off += 2;
long now = DataHelper.fromLong(first256, off, 4);
off += 4;
long skewSeconds = (ctx.clock().now()/1000)-now;
if (log.shouldLog(Log.INFO))
log.info("Check info received: our IP: " + ourIP + " our port: " + port
+ " skew: " + skewSeconds);
} catch (UnknownHostException uhe) {
// ipSize is invalid
if (log.shouldLog(Log.WARN))
log.warn("Invalid IP received on check connection (size: " + ipSize + ")");
}
return true;
} else {
if (log.shouldLog(Log.DEBUG))
log.debug("Not a checkInfo connection");
return false;
}
}
public static void checkHost(String args[]) {
if (args.length != 3) {
System.err.println("Usage: EstablishState ipOrHostname portNum peerHashBase64");
return;
}
try {
I2PAppContext ctx = I2PAppContext.getGlobalContext();
String host = args[0];
int port = Integer.parseInt(args[1]);
byte peer[] = Base64.decode(args[2]);
Socket s = new Socket(host, port);
OutputStream out = s.getOutputStream();
byte toSend[] = new byte[256];
ctx.random().nextBytes(toSend);
int off = 32;
byte ip[] = s.getInetAddress().getAddress();
DataHelper.toLong(toSend, off, 1, ip.length);
off++;
System.arraycopy(ip, 0, toSend, off, ip.length);
off += ip.length;
DataHelper.toLong(toSend, off, 2, port);
off += 2;
long now = ctx.clock().now()/1000;
DataHelper.toLong(toSend, off, 4, now);
off += 4;
Hash h = ctx.sha().calculateHash(toSend, 32, toSend.length-32-32);
DataHelper.xor(peer, 0, h.getData(), 0, toSend, toSend.length-32, peer.length);
System.out.println("check hash: " + h.toBase64());
out.write(toSend);
out.flush();
try { Thread.sleep(1000); } catch (InterruptedException ie) {}
s.close();
} catch (Exception e) {
e.printStackTrace();
}
}
public static void main(String args[]) {
if (args.length == 3) {
checkHost(args);
return;
}
I2PAppContext ctx = I2PAppContext.getGlobalContext();
try {
java.net.Socket s = new java.net.Socket("localhost", 9094);
OutputStream out = s.getOutputStream();
DHSessionKeyBuilder dh = new DHSessionKeyBuilder();
byte X[] = dh.getMyPublicValueBytes();
// SEND X+(H(X) xor Bob.identHash)----------------------------->
out.write(X);
System.out.println("sent X =" + Base64.encode(X));
byte bih[] = Base64.decode("HuRdDx9t-RaZfYkYvacRwP~6s9mvbdkYzIMrpUCsZIo=");
System.out.println("bih = " + Base64.encode(bih));
Hash hx = ctx.sha().calculateHash(X);
System.out.println("hx = " + Base64.encode(hx.getData()));
byte hx_xor_bih[] = DataHelper.xor(bih, hx.getData());
System.out.println("xor = " + Base64.encode(hx_xor_bih));
out.write(hx_xor_bih);
out.flush();
// DONE SENDING X+(H(X) xor Bob.identHash)----------------------------->
// NOW READ Y+E(H(X+Y)+tsB+padding, sk, Y[239:255])
InputStream in = s.getInputStream();
byte toRead[] = new byte[256+(32+4+12)];
int read = 0;
while (read < toRead.length) {
int r = in.read(toRead, read, toRead.length-read);
if (r == -1)
throw new EOFException("eof at read=" + read);
read += r;
}
byte Y[] = new byte[256];
System.arraycopy(toRead, 0, Y, 0, Y.length);
dh.setPeerPublicValue(Y);
byte decrypted[] = new byte[(32+4+12)];
ctx.aes().decrypt(toRead, Y.length, decrypted, 0, dh.getSessionKey(), Y, Y.length-16, decrypted.length);
//display y, encrypted, decrypted, hx+y, tsb, padding
//unencrypted H(X+Y)+tsB+padding: bSJIv1ynFw9MhIqbObOpCqeZxtFvKEx-ilcsZQ31zYNEnVXyHCZagLbdQYRmd1oq
System.out.println("dh session key: " + dh.getSessionKey().toBase64());
System.out.println("decryption iv: " + Base64.encode(Y, Y.length-16, 16));
System.out.println("Y = " + Base64.encode(Y));
byte xy[] = new byte[512];
System.arraycopy(X, 0, xy, 0, X.length);
System.arraycopy(Y, 0, xy, X.length, Y.length);
System.out.println("h(x+y): " + ctx.sha().calculateHash(xy).toBase64());
System.out.println("encrypted H(X+Y)+tsB+padding: " + Base64.encode(toRead, Y.length, toRead.length-Y.length));
System.out.println("unencrypted H(X+Y)+tsB+padding: " + Base64.encode(decrypted));
long tsB = DataHelper.fromLong(decrypted, 32, 4);
//try { Thread.sleep(40*1000); } catch (InterruptedException ie) {}
RouterIdentity alice = new RouterIdentity();
Object k[] = ctx.keyGenerator().generatePKIKeypair();
PublicKey pub = (PublicKey)k[0];
PrivateKey priv = (PrivateKey)k[1];
k = ctx.keyGenerator().generateSigningKeypair();
SigningPublicKey spub = (SigningPublicKey)k[0];
SigningPrivateKey spriv = (SigningPrivateKey)k[1];
alice.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null));
alice.setPublicKey(pub);
alice.setSigningPublicKey(spub);
// SEND E(#+Alice.identity+tsA+padding+S(X+Y+Bob.identHash+tsA+tsB+padding), sk, hX_xor_Bob.identHash[16:31])--->
ByteArrayOutputStream baos = new ByteArrayOutputStream(512);
byte aliceb[] = alice.toByteArray();
long tsA = ctx.clock().now()/1000l;
baos.write(DataHelper.toLong(2, aliceb.length));
baos.write(aliceb);
baos.write(DataHelper.toLong(4, tsA));
int base = baos.size() + Signature.SIGNATURE_BYTES;
int rem = base % 16;
int padding = 0;
if (rem > 0)
padding = 16 - rem;
byte pad[] = new byte[padding];
ctx.random().nextBytes(pad);
baos.write(pad);
base += padding;
ByteArrayOutputStream sbaos = new ByteArrayOutputStream(512);
sbaos.write(X);
sbaos.write(Y);
sbaos.write(bih);
sbaos.write(DataHelper.toLong(4, tsA));
sbaos.write(DataHelper.toLong(4, tsB));
//sbaos.write(pad);
Signature sig = ctx.dsa().sign(sbaos.toByteArray(), spriv);
baos.write(sig.toByteArray());
byte unencrypted[] = baos.toByteArray();
byte toWrite[] = new byte[unencrypted.length];
System.out.println("unencrypted.length = " + unencrypted.length + " alice.size = " + aliceb.length + " padding = " + padding + " base = " + base);
ctx.aes().encrypt(unencrypted, 0, toWrite, 0, dh.getSessionKey(), hx_xor_bih, 16, unencrypted.length);
out.write(toWrite);
out.flush();
System.out.println("unencrypted: " + Base64.encode(unencrypted));
System.out.println("encrypted: " + Base64.encode(toWrite));
System.out.println("Local peer: " + alice.calculateHash().toBase64());
// now check bob's signature
SigningPublicKey bobPubKey = null;
try {
RouterInfo info = new RouterInfo();
info.readBytes(new FileInputStream("/home/jrandom/routers/router1/netDb/routerInfo-HuRdDx9t-RaZfYkYvacRwP~6s9mvbdkYzIMrpUCsZIo=.dat"));
bobPubKey = info.getIdentity().getSigningPublicKey();
} catch (Exception e) {
e.printStackTrace();
return;
}
System.out.println("Reading in bob's sig");
byte bobRead[] = new byte[48];
read = 0;
while (read < bobRead.length) {
int r = in.read(bobRead, read, bobRead.length-read);
if (r == -1)
throw new EOFException("eof at read=" + read);
read += r;
}
// bob should have sent E(S(X+Y+Alice.identHash+tsA+tsB)+padding, sk, prev)
byte preSig[] = new byte[Signature.SIGNATURE_BYTES+8];
ctx.aes().decrypt(bobRead, 0, preSig, 0, dh.getSessionKey(), toRead, toRead.length-16, preSig.length);
byte bobSigData[] = new byte[Signature.SIGNATURE_BYTES];
System.arraycopy(preSig, 0, bobSigData, 0, Signature.SIGNATURE_BYTES); // ignore the padding
System.out.println("Bob's sig: " + Base64.encode(bobSigData));
byte signed[] = new byte[256+256+32+4+4];
int off = 0;
System.arraycopy(X, 0, signed, off, 256); off += 256;
System.arraycopy(Y, 0, signed, off, 256); off += 256;
Hash h = alice.calculateHash();
System.arraycopy(h.getData(), 0, signed, off, 32); off += 32;
DataHelper.toLong(signed, off, 4, tsA); off += 4;
DataHelper.toLong(signed, off, 4, tsB); off += 4;
Signature bobSig = new Signature(bobSigData);
boolean ok = ctx.dsa().verifySignature(bobSig, signed, bobPubKey);
System.out.println("bob's sig matches? " + ok);
try { Thread.sleep(5*1000); } catch (InterruptedException ie) {}
byte fakeI2NPbuf[] = new byte[128];
ctx.random().nextBytes(fakeI2NPbuf);
out.write(fakeI2NPbuf);
out.flush();
try { Thread.sleep(30*1000); } catch (InterruptedException ie) {}
s.close();
} catch (Exception e) {
e.printStackTrace();
}
}
}

View File

@ -0,0 +1,503 @@
package net.i2p.router.transport.ntcp;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.nio.ByteBuffer;
import java.nio.channels.CancelledKeyException;
import java.nio.channels.ClosedChannelException;
import java.nio.channels.ClosedSelectorException;
import java.nio.channels.NotYetConnectedException;
import java.nio.channels.SelectionKey;
import java.nio.channels.Selector;
import java.nio.channels.ServerSocketChannel;
import java.nio.channels.SocketChannel;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import net.i2p.data.RouterIdentity;
import net.i2p.data.RouterInfo;
import net.i2p.router.RouterContext;
import net.i2p.router.transport.FIFOBandwidthLimiter;
import net.i2p.util.I2PThread;
import net.i2p.util.Log;
/**
*
*/
public class EventPumper implements Runnable {
private RouterContext _context;
private Log _log;
private boolean _alive;
private Selector _selector;
private List _bufCache;
private List _wantsRead;
private List _wantsWrite;
private List _wantsRegister;
private List _wantsConRegister;
private NTCPTransport _transport;
private static final int BUF_SIZE = 8*1024;
private static final int MAX_CACHE_SIZE = 64;
public EventPumper(RouterContext ctx, NTCPTransport transport) {
_context = ctx;
_log = ctx.logManager().getLog(getClass());
_transport = transport;
_alive = false;
_bufCache = new ArrayList(MAX_CACHE_SIZE);
}
public void startPumping() {
if (_log.shouldLog(Log.INFO))
_log.info("Starting pumper");
_alive = true;
_wantsRead = new ArrayList(16);
_wantsWrite = new ArrayList(4);
_wantsRegister = new ArrayList(1);
_wantsConRegister = new ArrayList(4);
try {
_selector = Selector.open();
} catch (IOException ioe) {
_log.error("Error opening the selector", ioe);
}
new I2PThread(this, "NTCP Pumper", true).start();
}
public void stopPumping() {
_alive = false;
if (_selector.isOpen())
_selector.wakeup();
}
public void register(ServerSocketChannel chan) {
if (_log.shouldLog(Log.DEBUG)) _log.debug("Registering server socket channel");
synchronized (_wantsRegister) { _wantsRegister.add(chan); }
_selector.wakeup();
}
public void registerConnect(NTCPConnection con) {
if (_log.shouldLog(Log.DEBUG)) _log.debug("Registering outbound connection");
synchronized (_wantsConRegister) { _wantsConRegister.add(con); }
_selector.wakeup();
}
public void run() {
List bufList = new ArrayList(16);
while (_alive && _selector.isOpen()) {
try {
runDelayedEvents(bufList);
int count = 0;
try {
//if (_log.shouldLog(Log.DEBUG))
// _log.debug("before select...");
count = _selector.select(200);
} catch (IOException ioe) {
if (_log.shouldLog(Log.WARN))
_log.warn("Error selecting", ioe);
}
if (count <= 0)
continue;
if (_log.shouldLog(Log.DEBUG))
_log.debug("select returned " + count);
Set selected = null;
try {
selected = _selector.selectedKeys();
} catch (ClosedSelectorException cse) {
continue;
}
for (Iterator iter = selected.iterator(); iter.hasNext(); ) {
try {
SelectionKey key = (SelectionKey)iter.next();
int ops = key.readyOps();
boolean accept = (ops & SelectionKey.OP_ACCEPT) != 0;
boolean connect = (ops & SelectionKey.OP_CONNECT) != 0;
boolean read = (ops & SelectionKey.OP_READ) != 0;
boolean write = (ops & SelectionKey.OP_WRITE) != 0;
if (_log.shouldLog(Log.DEBUG))
_log.debug("ready ops for : " + key
+ " accept? " + accept + " connect? " + connect
+ " read? " + read
+ "/" + ((key.interestOps()&SelectionKey.OP_READ)!= 0)
+ " write? " + write
+ "/" + ((key.interestOps()&SelectionKey.OP_WRITE)!= 0)
);
if (accept) {
processAccept(key);
}
if (connect) {
key.interestOps(key.interestOps() & ~SelectionKey.OP_CONNECT);
processConnect(key);
}
if (read) {
key.interestOps(key.interestOps() & ~SelectionKey.OP_READ);
processRead(key);
}
if (write) {
key.interestOps(key.interestOps() & ~SelectionKey.OP_WRITE);
processWrite(key);
}
} catch (CancelledKeyException cke) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("key cancelled");
}
}
selected.clear();
} catch (RuntimeException re) {
_log.log(Log.CRIT, "Error in the event pumper", re);
}
}
try {
if (_selector.isOpen()) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Closing down the event pumper with selection keys remaining");
Set keys = _selector.keys();
for (Iterator iter = keys.iterator(); iter.hasNext(); ) {
SelectionKey key = (SelectionKey)iter.next();
try {
Object att = key.attachment();
if (att instanceof ServerSocketChannel) {
ServerSocketChannel chan = (ServerSocketChannel)att;
chan.close();
key.cancel();
} else if (att instanceof NTCPConnection) {
NTCPConnection con = (NTCPConnection)att;
con.close();
key.cancel();
}
} catch (Exception ke) {
_log.error("Error closing key " + key + " on pumper shutdown", ke);
}
}
_selector.close();
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Closing down the event pumper with no selection keys remaining");
}
} catch (Exception e) {
_log.error("Error closing keys on pumper shutdown", e);
}
synchronized (_wantsConRegister) { _wantsConRegister.clear(); }
synchronized (_wantsRead) { _wantsRead.clear(); }
synchronized (_wantsRegister) { _wantsRegister.clear(); }
synchronized (_wantsWrite) { _wantsWrite.clear(); }
}
public void wantsWrite(NTCPConnection con, byte data[]) {
ByteBuffer buf = ByteBuffer.wrap(data);
FIFOBandwidthLimiter.Request req = _context.bandwidthLimiter().requestOutbound(data.length, "NTCP write", null, null);//con, buf);
if (req.getPendingOutboundRequested() > 0) {
if (_log.shouldLog(Log.INFO))
_log.info("queued write on " + con + " for " + data.length);
con.queuedWrite(buf, req);
} else {
// fully allocated
if (_log.shouldLog(Log.INFO))
_log.info("fully allocated write on " + con + " for " + data.length);
con.write(buf);
}
}
/** called by the connection when it has data ready to write (after bw allocation) */
public void wantsWrite(NTCPConnection con) {
if (_log.shouldLog(Log.INFO))
_log.info("Before adding wants to write on " + con);
synchronized (_wantsWrite) {
if (!_wantsWrite.contains(con))
_wantsWrite.add(con);
}
if (_log.shouldLog(Log.INFO))
_log.info("Wants to write on " + con);
_selector.wakeup();
if (_log.shouldLog(Log.DEBUG))
_log.debug("selector awoken for write");
}
public void wantsRead(NTCPConnection con) {
synchronized (_wantsRead) {
if (!_wantsRead.contains(con))
_wantsRead.add(con);
}
if (_log.shouldLog(Log.DEBUG))
_log.debug("wants to read on " + con);
_selector.wakeup();
if (_log.shouldLog(Log.DEBUG))
_log.debug("selector awoken for read");
}
private static final int MIN_BUFS = 5;
private static int NUM_BUFS = 5;
private static int __liveBufs = 0;
private static int __consecutiveExtra;
ByteBuffer acquireBuf() {
if (false) return ByteBuffer.allocate(BUF_SIZE);
ByteBuffer rv = null;
synchronized (_bufCache) {
if (_bufCache.size() > 0)
rv = (ByteBuffer)_bufCache.remove(0);
}
if (rv == null) {
rv = ByteBuffer.allocate(BUF_SIZE);
NUM_BUFS = ++__liveBufs;
if (_log.shouldLog(Log.DEBUG))
_log.debug("creating a new read buffer " + System.identityHashCode(rv) + " with " + __liveBufs + " live: " + rv);
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("acquiring existing read buffer " + System.identityHashCode(rv) + " with " + __liveBufs + " live: " + rv);
}
return rv;
}
void releaseBuf(ByteBuffer buf) {
if (false) return;
if (_log.shouldLog(Log.DEBUG))
_log.debug("releasing read buffer " + System.identityHashCode(buf) + " with " + __liveBufs + " live: " + buf);
buf.clear();
int extra = 0;
boolean cached = false;
synchronized (_bufCache) {
if (_bufCache.size() < NUM_BUFS) {
extra = _bufCache.size();
_bufCache.add(buf);
cached = true;
if (extra > 5) {
__consecutiveExtra++;
if (__consecutiveExtra >= 20) {
NUM_BUFS = Math.max(NUM_BUFS - 1, MIN_BUFS);
__consecutiveExtra = 0;
}
}
} else {
__liveBufs--;
}
}
if (cached && _log.shouldLog(Log.DEBUG))
_log.debug("read buffer " + System.identityHashCode(buf) + " cached with " + __liveBufs + " live");
}
private void processAccept(SelectionKey key) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("processing accept");
ServerSocketChannel servChan = (ServerSocketChannel)key.attachment();
try {
SocketChannel chan = servChan.accept();
chan.configureBlocking(false);
SelectionKey ckey = chan.register(_selector, SelectionKey.OP_READ);
NTCPConnection con = new NTCPConnection(_context, _transport, chan, ckey);
if (_log.shouldLog(Log.DEBUG))
_log.debug("new NTCP connection established: " +con);
} catch (IOException ioe) {
if (_log.shouldLog(Log.ERROR)) _log.error("Error accepting", ioe);
}
}
private void processConnect(SelectionKey key) {
NTCPConnection con = (NTCPConnection)key.attachment();
try {
SocketChannel chan = con.getChannel();
boolean connected = chan.finishConnect();
if (_log.shouldLog(Log.DEBUG))
_log.debug("processing connect for " + key + " / " + con + ": connected? " + connected);
if (connected) {
con.setKey(key);
con.outboundConnected();
} else {
con.close();
}
} catch (IOException ioe) {
if (_log.shouldLog(Log.DEBUG)) _log.debug("Error processing connection", ioe);
}
}
private void processRead(SelectionKey key) {
NTCPConnection con = (NTCPConnection)key.attachment();
ByteBuffer buf = acquireBuf();
try {
int read = con.getChannel().read(buf);
if (read == -1) {
if (_log.shouldLog(Log.DEBUG)) _log.debug("EOF on " + con);
con.close();
releaseBuf(buf);
} else if (read == 0) {
key.interestOps(key.interestOps() | SelectionKey.OP_READ);
releaseBuf(buf);
} else if (read > 0) {
byte data[] = new byte[read];
buf.flip();
buf.get(data);
releaseBuf(buf);
ByteBuffer rbuf = ByteBuffer.wrap(data);
FIFOBandwidthLimiter.Request req = _context.bandwidthLimiter().requestInbound(read, "NTCP read", null, null); //con, buf);
if (req.getPendingInboundRequested() > 0) {
key.interestOps(key.interestOps() & ~SelectionKey.OP_READ);
con.queuedRecv(rbuf, req);
} else {
// fully allocated
key.interestOps(key.interestOps() | SelectionKey.OP_READ);
con.recv(rbuf);
}
}
} catch (IOException ioe) {
if (_log.shouldLog(Log.WARN)) _log.warn("error reading", ioe);
con.close();
if (buf != null) releaseBuf(buf);
} catch (NotYetConnectedException nyce) {
// ???
}
}
private void processWrite(SelectionKey key) {
int totalWritten = 0;
int buffers = 0;
long before = System.currentTimeMillis();
NTCPConnection con = (NTCPConnection)key.attachment();
try {
while (true) {
ByteBuffer buf = con.getNextWriteBuf();
if ( (buf != null) && (buf.remaining() > 0) ) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("writing " + buf.remaining()+"...");
int written = con.getChannel().write(buf);
totalWritten += written;
if (written == 0) {
if ( (buf.remaining() > 0) || (con.getWriteBufCount() > 1) ) {
if (_log.shouldLog(Log.DEBUG)) _log.debug("done writing, but data remains...");
key.interestOps(key.interestOps() | SelectionKey.OP_WRITE);
} else {
if (_log.shouldLog(Log.DEBUG)) _log.debug("done writing, no data remains...");
}
break;
} else if (buf.remaining() > 0) {
if (_log.shouldLog(Log.DEBUG)) _log.debug("buffer data remaining...");
key.interestOps(key.interestOps() | SelectionKey.OP_WRITE);
break;
} else {
long beforeRem = System.currentTimeMillis();
con.removeWriteBuf(buf);
long afterRem = System.currentTimeMillis();
if (_log.shouldLog(Log.DEBUG))
_log.debug("buffer "+ buffers+"/"+written+"/"+totalWritten+" fully written after " +
(beforeRem-before) + ", then removed after " + (afterRem-beforeRem) + "...");
//releaseBuf(buf);
buf = null;
buffers++;
//if (buffer time is too much, add OP_WRITe to the interest ops and break?)
}
} else {
break;
}
}
} catch (IOException ioe) {
if (_log.shouldLog(Log.WARN)) _log.warn("error writing", ioe);
con.close();
}
long after = System.currentTimeMillis();
if (_log.shouldLog(Log.INFO))
_log.info("Wrote " + totalWritten + " in " + buffers + " buffers on " + con
+ " after " + (after-before));
}
private void runDelayedEvents(List buf) {
synchronized (_wantsRead) {
if (_wantsRead.size() > 0) {
buf.addAll(_wantsRead);
_wantsRead.clear();
}
}
while (buf.size() > 0) {
NTCPConnection con = (NTCPConnection)buf.remove(0);
SelectionKey key = con.getKey();
key.interestOps(key.interestOps() | SelectionKey.OP_READ);
}
synchronized (_wantsWrite) {
if (_wantsWrite.size() > 0) {
buf.addAll(_wantsWrite);
_wantsWrite.clear();
}
}
while (buf.size() > 0) {
NTCPConnection con = (NTCPConnection)buf.remove(0);
SelectionKey key = con.getKey();
try {
key.interestOps(key.interestOps() | SelectionKey.OP_WRITE);
} catch (CancelledKeyException cke) {
// ignore
}
}
synchronized (_wantsRegister) {
if (_wantsRegister.size() > 0) {
buf.addAll(_wantsRegister);
_wantsRegister.clear();
}
}
while (buf.size() > 0) {
ServerSocketChannel chan = (ServerSocketChannel)buf.remove(0);
try {
SelectionKey key = chan.register(_selector, SelectionKey.OP_ACCEPT);
key.attach(chan);
} catch (ClosedChannelException cce) {
if (_log.shouldLog(Log.WARN)) _log.warn("Error registering", cce);
}
}
synchronized (_wantsConRegister) {
if (_wantsConRegister.size() > 0) {
buf.addAll(_wantsConRegister);
_wantsConRegister.clear();
}
}
while (buf.size() > 0) {
NTCPConnection con = (NTCPConnection)buf.remove(0);
try {
SelectionKey key = con.getChannel().register(_selector, SelectionKey.OP_CONNECT);
key.attach(con);
con.setKey(key);
try {
NTCPAddress naddr = con.getRemoteAddress();
InetSocketAddress saddr = new InetSocketAddress(naddr.getHost(), naddr.getPort());
boolean connected = con.getChannel().connect(saddr);
if (connected) {
key.interestOps(SelectionKey.OP_READ);
processConnect(key);
}
} catch (IOException ioe) {
if (_log.shouldLog(Log.WARN)) _log.warn("error connecting", ioe);
if (ntcpOnly(con)) {
_context.shitlist().shitlistRouter(con.getRemotePeer().calculateHash(), "unable to connect: " + ioe.getMessage());
con.close(false);
} else {
_context.shitlist().shitlistRouter(con.getRemotePeer().calculateHash(), NTCPTransport.STYLE, "unable to connect: " + ioe.getMessage());
con.close(true);
}
}
} catch (ClosedChannelException cce) {
if (_log.shouldLog(Log.WARN)) _log.warn("Error registering", cce);
}
}
long now = System.currentTimeMillis();
if (_lastExpired + 1000 <= now) {
expireTimedOut();
_lastExpired = now;
}
}
/**
* If the other peer only supports ntcp, we should shitlist them when we can't reach 'em,
* but if they support other transports (eg ssu) we should allow those transports to be
* tried as well.
*/
private boolean ntcpOnly(NTCPConnection con) {
RouterIdentity ident = con.getRemotePeer();
if (ident == null) return true;
RouterInfo info = _context.netDb().lookupRouterInfoLocally(ident.calculateHash());
if (info == null) return true;
return info.getAddresses().size() == 1;
}
private long _lastExpired;
private void expireTimedOut() {
_transport.expireTimedOut();
}
}

View File

@ -0,0 +1,161 @@
package net.i2p.router.transport.ntcp;
/*
* free (adj.): unencumbered; not under the control of others
* Written by jrandom in 2003 and released into the public domain
* with no warranty of any kind, either expressed or implied.
* It probably won't make your computer catch on fire, or eat
* your children, but it might. Use at your own risk.
*
*/
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.Properties;
import net.i2p.data.DataHelper;
import net.i2p.data.RouterAddress;
import net.i2p.router.transport.TransportImpl;
import net.i2p.util.Log;
/**
* Wrap up an address
*/
public class NTCPAddress {
private final static Log _log = new Log(NTCPAddress.class);
private int _port;
private String _host;
//private InetAddress _addr;
/** Port number used in RouterAddress definitions */
public final static String PROP_PORT = "port";
/** Host name used in RouterAddress definitions */
public final static String PROP_HOST = "host";
public NTCPAddress(String host, int port) {
if (host != null)
_host = host.trim();
_port = port;
}
public NTCPAddress() {
_host = null;
_port = -1;
// _addr = null;
}
/*
public NTCPAddress(InetAddress addr, int port) {
if (addr != null)
_host = addr.getHostAddress();
_addr = addr;
_port = port;
}
*/
public NTCPAddress(RouterAddress addr) {
if (addr == null) {
_host = null;
_port = -1;
return;
}
String host = addr.getOptions().getProperty(PROP_HOST);
if (host == null) {
_host = null;
_port = -1;
} else {
_host = host.trim();
String port = addr.getOptions().getProperty(PROP_PORT);
if ( (port != null) && (port.trim().length() > 0) ) {
try {
_port = Integer.parseInt(port.trim());
} catch (NumberFormatException nfe) {
_log.error("Invalid port [" + port + "]", nfe);
_port = -1;
}
} else {
_port = -1;
}
}
}
public RouterAddress toRouterAddress() {
if ( (_host == null) || (_port <= 0) )
return null;
RouterAddress addr = new RouterAddress();
addr.setCost(10);
addr.setExpiration(null);
Properties props = new Properties();
props.setProperty(PROP_HOST, _host);
props.setProperty(PROP_PORT, ""+_port);
addr.setOptions(props);
addr.setTransportStyle(NTCPTransport.STYLE);
return addr;
}
public String getHost() { return _host; }
public void setHost(String host) { _host = host; }
//public InetAddress getAddress() { return _addr; }
//public void setAddress(InetAddress addr) { _addr = addr; }
public int getPort() { return _port; }
public void setPort(int port) { _port = port; }
public boolean isPubliclyRoutable() {
return isPubliclyRoutable(_host);
}
public static boolean isPubliclyRoutable(String host) {
if (host == null) return false;
try {
InetAddress addr = InetAddress.getByName(host);
byte quad[] = addr.getAddress();
// allow ipv6 for ntcpaddress, since we've still got ssu
//if (quad.length != 4) {
// if (_log.shouldLog(Log.ERROR))
// _log.error("Refusing IPv6 address (" + host + " / " + addr.getHostAddress() + ") "
// + " since not all peers support it, and we don't support restricted routes");
// return false;
//}
return TransportImpl.isPubliclyRoutable(quad);
} catch (Throwable t) {
if (_log.shouldLog(Log.WARN))
_log.warn("Error checking routability", t);
return false;
}
}
public String toString() { return _host + ":" + _port; }
public int hashCode() {
int rv = 0;
rv += _port;
//if (_addr != null)
// rv += _addr.getHostAddress().hashCode();
//else
if (_host != null) rv += _host.trim().hashCode();
return rv;
}
public boolean equals(Object val) {
if ( (val != null) && (val instanceof NTCPAddress) ) {
NTCPAddress addr = (NTCPAddress)val;
String hostname = null;
if (addr.getHost() != null)
hostname = addr.getHost().trim();
String ourHost = getHost();
if (ourHost != null)
ourHost = ourHost.trim();
return DataHelper.eq(hostname, ourHost) && getPort() == addr.getPort();
}
return false;
}
public boolean equals(RouterAddress addr) {
if (addr == null) return false;
Properties opts = addr.getOptions();
if (opts == null) return false;
return ( (_host.equals(opts.getProperty(PROP_HOST))) &&
(Integer.toString(_port).equals(opts.getProperty(PROP_PORT))) );
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,486 @@
package net.i2p.router.transport.ntcp;
import java.io.IOException;
import java.io.Writer;
import java.net.InetSocketAddress;
import java.nio.channels.Selector;
import java.nio.channels.ServerSocketChannel;
import java.nio.channels.SocketChannel;
import java.text.DecimalFormat;
import java.text.NumberFormat;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.TreeSet;
import net.i2p.data.DataHelper;
import net.i2p.data.Hash;
import net.i2p.data.RouterAddress;
import net.i2p.data.RouterIdentity;
import net.i2p.data.RouterInfo;
import net.i2p.data.i2np.DatabaseStoreMessage;
import net.i2p.router.OutNetMessage;
import net.i2p.router.RouterContext;
import net.i2p.router.transport.*;
import net.i2p.util.Log;
import net.i2p.util.SimpleTimer;
/**
*
*/
public class NTCPTransport extends TransportImpl {
private Log _log;
private SharedBid _fastBid;
private SharedBid _slowBid;
private Object _conLock;
private Map _conByIdent;
private NTCPAddress _myAddress;
private EventPumper _pumper;
private Reader _reader;
private net.i2p.router.transport.ntcp.Writer _writer;
/**
* list of NTCPConnection of connections not yet established that we
* want to remove on establishment or close on timeout
*/
private List _establishing;
private List _sent;
private SendFinisher _finisher;
public NTCPTransport(RouterContext ctx) {
super(ctx);
_log = ctx.logManager().getLog(getClass());
_context.statManager().createRateStat("ntcp.sendTime", "Total message lifetime when sent completely", "ntcp", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRateStat("ntcp.transmitTime", "How long after message preparation before the message was fully sent", "ntcp", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRateStat("ntcp.sendQueueSize", "How many messages were ahead of the current one on the connection's queue when it was first added", "ntcp", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRateStat("ntcp.receiveTime", "How long it takes to receive an inbound message", "ntcp", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRateStat("ntcp.receiveSize", "How large the received message was", "ntcp", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRateStat("ntcp.sendBacklogTime", "How long the head of the send queue has been waiting when we fail to add a new one to the queue (period is the number of messages queued)", "ntcp", new long[] { 60*1000, 10*60*1000 });
_establishing = new ArrayList(4);
_conLock = new Object();
_conByIdent = new HashMap(64);
_sent = new ArrayList(4);
_finisher = new SendFinisher();
_pumper = new EventPumper(ctx, this);
_reader = new Reader(ctx);
_writer = new net.i2p.router.transport.ntcp.Writer(ctx);
_fastBid = new SharedBid(25); // best
_slowBid = new SharedBid(70); // better than ssu unestablished, but not better than ssu established
}
void inboundEstablished(NTCPConnection con) {
_context.shitlist().unshitlistRouter(con.getRemotePeer().calculateHash());
NTCPConnection old = null;
synchronized (_conLock) {
old = (NTCPConnection)_conByIdent.put(con.getRemotePeer().calculateHash(), con);
}
if (old != null) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Old connection closed: " + old + " replaced by " + con);
old.close();
}
}
protected void outboundMessageReady() {
OutNetMessage msg = getNextMessage();
if (msg != null) {
RouterIdentity ident = msg.getTarget().getIdentity();
Hash ih = ident.calculateHash();
NTCPConnection con = null;
boolean isNew = false;
synchronized (_conLock) {
con = (NTCPConnection)_conByIdent.get(ih);
if (con == null) {
isNew = true;
RouterAddress addr = msg.getTarget().getTargetAddress(STYLE);
if (addr != null) {
NTCPAddress naddr = new NTCPAddress(addr);
con = new NTCPConnection(_context, this, ident, naddr);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Send on a new con: " + con + " at " + addr + " for " + ih.toBase64());
_conByIdent.put(ih, con);
} else {
_log.error("we bid on a peer who doesn't have an ntcp address? " + msg.getTarget());
return;
}
}
}
if (isNew) {
con.enqueueInfoMessage(); // enqueues a netDb store of our own info
con.send(msg); // doesn't do anything yet, just enqueues it
try {
SocketChannel channel = SocketChannel.open();
con.setChannel(channel);
channel.configureBlocking(false);
_pumper.registerConnect(con);
} catch (IOException ioe) {
if (_log.shouldLog(Log.ERROR))
_log.error("Error opening a channel", ioe);
con.close();
}
} else {
con.send(msg);
}
/*
NTCPConnection con = getCon(ident);
remove the race here
if (con != null) {
//if (_log.shouldLog(Log.DEBUG))
// _log.debug("Send on an existing con: " + con);
con.send(msg);
} else {
RouterAddress addr = msg.getTarget().getTargetAddress(STYLE);
if (addr != null) {
NTCPAddress naddr = new NTCPAddress(addr);
con = new NTCPConnection(_context, this, ident, naddr);
Hash ih = ident.calculateHash();
if (_log.shouldLog(Log.DEBUG))
_log.debug("Send on a new con: " + con + " at " + addr + " for " + ih.toBase64());
NTCPConnection old = null;
synchronized (_conLock) {
old = (NTCPConnection)_conByIdent.put(ih, con);
}
if (old != null) {
if (_log.shouldLog(Log.ERROR))
_log.error("Multiple connections on out ready, closing " + old + " and keeping " + con);
old.close();
}
con.enqueueInfoMessage(); // enqueues a netDb store of our own info
con.send(msg); // doesn't do anything yet, just enqueues it
try {
SocketChannel channel = SocketChannel.open();
con.setChannel(channel);
channel.configureBlocking(false);
_pumper.registerConnect(con);
} catch (IOException ioe) {
if (_log.shouldLog(Log.ERROR))
_log.error("Error opening a channel", ioe);
con.close();
}
} else {
con.close();
}
}
*/
}
}
public void afterSend(OutNetMessage msg, boolean sendSuccessful, boolean allowRequeue, long msToSend) {
super.afterSend(msg, sendSuccessful, allowRequeue, msToSend);
}
public TransportBid bid(RouterInfo toAddress, long dataSize) {
if (_context.shitlist().isShitlisted(toAddress.getIdentity().calculateHash(), STYLE)) {
// we aren't shitlisted in general (since we are trying to get a bid), but we have
// recently shitlisted the peer on the NTCP transport, so don't try it
return null;
}
RouterAddress addr = toAddress.getTargetAddress(STYLE);
//if ( (_myAddress != null) && (_myAddress.equals(addr)) )
// return null; // dont talk to yourself
boolean established = isEstablished(toAddress.getIdentity());
if (established) // should we check the queue size? nah, if its valid, use it
return _fastBid;
else if (addr != null)
return _slowBid;
else
return null;
}
void sendComplete(OutNetMessage msg) { _finisher.add(msg); }
/** async afterSend call, which can take some time w/ jobs, etc */
private class SendFinisher implements SimpleTimer.TimedEvent {
public void add(OutNetMessage msg) {
synchronized (_sent) { _sent.add(msg); }
SimpleTimer.getInstance().addEvent(SendFinisher.this, 0);
}
public void timeReached() {
int pending = 0;
OutNetMessage msg = null;
synchronized (_sent) {
pending = _sent.size()-1;
if (pending >= 0)
msg = (OutNetMessage)_sent.remove(0);
}
if (msg != null)
afterSend(msg, true, false, msg.getSendTime());
if (pending > 0)
SimpleTimer.getInstance().addEvent(SendFinisher.this, 0);
}
}
private boolean isEstablished(RouterIdentity peer) {
synchronized (_conLock) {
NTCPConnection con = (NTCPConnection)_conByIdent.get(peer.calculateHash());
return (con != null) && con.isEstablished() && !con.isClosed();
}
}
void removeCon(NTCPConnection con) {
NTCPConnection removed = null;
synchronized (_conLock) {
RouterIdentity ident = con.getRemotePeer();
if (ident != null)
removed = (NTCPConnection)_conByIdent.remove(ident.calculateHash());
}
if ( (removed != null) && (removed != con) ) {// multiple cons, close 'em both
if (_log.shouldLog(Log.ERROR))
_log.error("Multiple connections on remove, closing " + removed + " (already closed " + con + ")");
removed.close();
}
}
/**
* How many peers can we talk to right now?
*
*/
public int countActivePeers() { synchronized (_conLock) { return _conByIdent.size(); } }
/**
* How many peers are we actively sending messages to (this minute)
*/
public int countActiveSendPeers() {
int active = 0;
synchronized (_conLock) {
for (Iterator iter = _conByIdent.values().iterator(); iter.hasNext(); ) {
NTCPConnection con = (NTCPConnection)iter.next();
if (con.getTimeSinceSend() <= 60*1000)
active++;
}
}
return active;
}
private static final int NUM_CONCURRENT_READERS = 3;
private static final int NUM_CONCURRENT_WRITERS = 3;
public RouterAddress startListening() {
if (_log.shouldLog(Log.DEBUG)) _log.debug("Starting ntcp transport listening");
_pumper.startPumping();
_reader.startReading(NUM_CONCURRENT_READERS);
_writer.startWriting(NUM_CONCURRENT_WRITERS);
configureLocalAddress();
if (_myAddress != null) {
try {
ServerSocketChannel chan = ServerSocketChannel.open();
chan.configureBlocking(false);
InetSocketAddress addr = null;
//if (bindAllInterfaces())
addr = new InetSocketAddress(_myAddress.getPort());
//else
// addr = new InetSocketAddress(_myAddress.getAddress(), _myAddress.getPort());
chan.socket().bind(addr);
if (_log.shouldLog(Log.INFO))
_log.info("Listening on " + addr);
_pumper.register(chan);
} catch (IOException ioe) {
_log.error("Error listening", ioe);
}
} else {
if (_log.shouldLog(Log.INFO))
_log.info("Outbound NTCP connections only - no listener configured");
}
if (_myAddress != null) {
RouterAddress rv = _myAddress.toRouterAddress();
if (rv != null)
replaceAddress(rv);
return rv;
} else {
return null;
}
}
Reader getReader() { return _reader; }
net.i2p.router.transport.ntcp.Writer getWriter() { return _writer; }
public String getStyle() { return STYLE; }
EventPumper getPumper() { return _pumper; }
/**
* how long from initial connection attempt (accept() or connect()) until
* the con must be established to avoid premature close()ing
*/
private static final int ESTABLISH_TIMEOUT = 10*1000;
/** add us to the establishment timeout process */
void establishing(NTCPConnection con) {
synchronized (_establishing) {
_establishing.add(con);
}
}
/**
* called in the EventPumper no more than once a second or so, closing
* any unconnected/unestablished connections
*/
void expireTimedOut() {
List expired = null;
synchronized (_establishing) {
for (int i = 0; i < _establishing.size(); i++) {
NTCPConnection con = (NTCPConnection)_establishing.get(i);
if (con.isClosed()) {
_establishing.remove(i);
i--;
} else if (con.isEstablished()) {
_establishing.remove(i);
i--;
} else if (con.getTimeSinceCreated() > ESTABLISH_TIMEOUT) {
_establishing.remove(i);
i--;
if (expired == null)
expired = new ArrayList(2);
expired.add(con);
}
}
}
for (int i = 0; expired != null && i < expired.size(); i++)
((NTCPConnection)expired.get(i)).close();
}
//private boolean bindAllInterfaces() { return true; }
private void configureLocalAddress() {
RouterContext ctx = getContext();
if (ctx == null) {
System.err.println("NIO transport has no context?");
} else {
RouterAddress ra = CommSystemFacadeImpl.createNTCPAddress(ctx);
if (ra != null) {
_myAddress = new NTCPAddress(ra);
replaceAddress(ra);
if (_log.shouldLog(Log.INFO))
_log.info("NTCP address configured: " + _myAddress);
} else {
if (_log.shouldLog(Log.INFO))
_log.info("NTCP address is outbound only");
}
}
}
public void stopListening() {
if (_log.shouldLog(Log.DEBUG)) _log.debug("Stopping ntcp transport");
_pumper.stopPumping();
_writer.stopWriting();
_reader.stopReading();
Map cons = null;
synchronized (_conLock) {
cons = new HashMap(_conByIdent);
_conByIdent.clear();
}
for (Iterator iter = cons.keySet().iterator(); iter.hasNext(); ) {
NTCPConnection con = (NTCPConnection)iter.next();
con.close();
}
}
public static final String STYLE = "NTCP";
public void renderStatusHTML(java.io.Writer out, int sortFlags) throws IOException {}
public void renderStatusHTML(java.io.Writer out, String urlBase, int sortFlags) throws IOException {
TreeSet peers = new TreeSet(getComparator(sortFlags));
synchronized (_conLock) {
peers.addAll(_conByIdent.values());
}
long offsetTotal = 0;
int bpsIn = 0;
int bpsOut = 0;
long uptimeMsTotal = 0;
long sendTotal = 0;
long recvTotal = 0;
int numPeers = 0;
StringBuffer buf = new StringBuffer(512);
buf.append("<b id=\"ntcpcon\">NTCP connections: ").append(peers.size()).append("</b><br />\n");
buf.append("<table border=\"1\">\n");
buf.append(" <tr><td><b><a href=\"#def.peer\">peer</a></b></td>");
buf.append(" <td><b><a href=\"#def.peer\">uptime</a></b></td>");
buf.append(" <td><b><a href=\"#def.peer\">idle</a></b></td>");
buf.append(" <td><b><a href=\"#def.peer\">sent</a></b></td>");
buf.append(" <td><b><a href=\"#def.peer\">received</a></b></td>");
buf.append(" <td><b><a href=\"#def.peer\">out/in</a></b></td>");
buf.append(" <td><b><a href=\"#def.peer\">out queue</a></b></td>");
buf.append(" <td><b><a href=\"#def.peer\">skew</a></b></td>");
buf.append(" </tr>\n");
out.write(buf.toString());
buf.setLength(0);
for (Iterator iter = peers.iterator(); iter.hasNext(); ) {
NTCPConnection con = (NTCPConnection)iter.next();
buf.append("<tr><td>").append(con.getRemotePeer().calculateHash().toBase64().substring(0,8));
buf.append("</td><td>").append(DataHelper.formatDuration(con.getUptime()));
buf.append("</td><td>").append(DataHelper.formatDuration(con.getTimeSinceSend()));
buf.append("/").append(DataHelper.formatDuration(con.getTimeSinceReceive()));
buf.append("</td><td>").append(con.getMessagesSent());
buf.append("</td><td>").append(con.getMessagesReceived());
buf.append("</td><td>").append(formatRate(con.getSendRate()/1024));
buf.append("/").append(formatRate(con.getRecvRate()/1024)).append("KBps");
buf.append("</td><td>").append(con.getOutboundQueueSize());
buf.append("</td><td>").append(DataHelper.formatDuration(con.getClockSkew()));
buf.append("</td></tr>\n");
out.write(buf.toString());
buf.setLength(0);
}
buf.append("</table>\n");
out.write(buf.toString());
buf.setLength(0);
}
private static NumberFormat _rateFmt = new DecimalFormat("#,#00.00");
private static String formatRate(float rate) {
synchronized (_rateFmt) { return _rateFmt.format(rate); }
}
private Comparator getComparator(int sortFlags) {
Comparator rv = null;
switch (Math.abs(sortFlags)) {
default:
rv = AlphaComparator.instance();
}
if (sortFlags < 0)
rv = new InverseComparator(rv);
return rv;
}
private static class AlphaComparator extends PeerComparator {
private static final AlphaComparator _instance = new AlphaComparator();
public static final AlphaComparator instance() { return _instance; }
}
private static class InverseComparator implements Comparator {
private Comparator _comp;
public InverseComparator(Comparator comp) { _comp = comp; }
public int compare(Object lhs, Object rhs) {
return -1 * _comp.compare(lhs, rhs);
}
}
private static class PeerComparator implements Comparator {
public int compare(Object lhs, Object rhs) {
if ( (lhs == null) || (rhs == null) || !(lhs instanceof NTCPConnection) || !(rhs instanceof NTCPConnection))
throw new IllegalArgumentException("rhs = " + rhs + " lhs = " + lhs);
return compare((NTCPConnection)lhs, (NTCPConnection)rhs);
}
protected int compare(NTCPConnection l, NTCPConnection r) {
// base64 retains binary ordering
return DataHelper.compareTo(l.getRemotePeer().calculateHash().getData(), r.getRemotePeer().calculateHash().getData());
}
}
/**
* Cache the bid to reduce object churn
*/
private class SharedBid extends TransportBid {
public SharedBid(int ms) { super(); setLatencyMs(ms); }
public Transport getTransport() { return NTCPTransport.this; }
public String toString() { return "NTCP bid @ " + getLatencyMs(); }
}
}

View File

@ -0,0 +1,165 @@
package net.i2p.router.transport.ntcp;
import java.nio.ByteBuffer;
import java.util.*;
import net.i2p.router.RouterContext;
import net.i2p.util.I2PThread;
import net.i2p.util.Log;
/**
* Pool of running threads which will process any read bytes on any of the
* NTCPConnections, including the decryption of the data read, connection
* handshaking, parsing bytes into I2NP messages, etc.
*
*/
class Reader {
private RouterContext _context;
private Log _log;
private List _pendingConnections;
private List _liveReads;
private List _readAfterLive;
private List _runners;
public Reader(RouterContext ctx) {
_context = ctx;
_log = ctx.logManager().getLog(getClass());
_pendingConnections = new ArrayList(16);
_runners = new ArrayList(5);
_liveReads = new ArrayList(5);
_readAfterLive = new ArrayList();
}
public void startReading(int numReaders) {
for (int i = 0; i < numReaders; i++) {
Runner r = new Runner();
I2PThread t = new I2PThread(r, "NTCP read " + i, true);
_runners.add(r);
t.start();
}
}
public void stopReading() {
while (_runners.size() > 0) {
Runner r = (Runner)_runners.remove(0);
r.stop();
}
synchronized (_pendingConnections) {
_readAfterLive.clear();
_pendingConnections.notifyAll();
}
}
public void wantsRead(NTCPConnection con) {
boolean already = false;
synchronized (_pendingConnections) {
if (_liveReads.contains(con)) {
if (!_readAfterLive.contains(con)) {
_readAfterLive.add(con);
}
already = true;
} else if (!_pendingConnections.contains(con)) {
_pendingConnections.add(con);
}
_pendingConnections.notifyAll();
}
if (_log.shouldLog(Log.DEBUG))
_log.debug("wantsRead: " + con + " already live? " + already);
}
public void connectionClosed(NTCPConnection con) {
synchronized (_pendingConnections) {
_readAfterLive.remove(con);
_pendingConnections.remove(con);
_pendingConnections.notifyAll();
}
}
private class Runner implements Runnable {
private boolean _stop;
public Runner() { _stop = false; }
public void stop() { _stop = true; }
public void run() {
if (_log.shouldLog(Log.INFO)) _log.info("Starting reader");
NTCPConnection con = null;
while (!_stop) {
try {
synchronized (_pendingConnections) {
boolean keepReading = (con != null) && _readAfterLive.remove(con);
if (keepReading) {
// keep on reading the same one
} else {
_liveReads.remove(con);
con = null;
if (_pendingConnections.size() <= 0) {
_pendingConnections.wait();
} else {
con = (NTCPConnection)_pendingConnections.remove(0);
_liveReads.add(con);
}
}
}
} catch (InterruptedException ie) {}
if (!_stop && (con != null) ) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("begin read for " + con);
try {
processRead(con);
} catch (RuntimeException re) {
_log.log(Log.CRIT, "Error in the ntcp reader", re);
}
if (_log.shouldLog(Log.DEBUG))
_log.debug("end read for " + con);
}
}
if (_log.shouldLog(Log.INFO)) _log.info("Stopping reader");
}
}
/**
* process everything read
*/
private void processRead(NTCPConnection con) {
if (con.isClosed())
return;
ByteBuffer buf = null;
while (!con.isClosed() && !con.isEstablished() && ( (buf = con.getNextReadBuf()) != null) ) {
EstablishState est = con.getEstablishState();
if (_log.shouldLog(Log.DEBUG))
_log.debug("Processing read buffer as an establishment for " + con + " with [" + est + "]");
if (est == null) {
if (!con.isEstablished()) {
// establish state is only removed when the connection is fully established,
// yet if that happens, con.isEstablished() should return true...
throw new RuntimeException("connection was not established, yet the establish state is null for " + con);
} else {
// hmm, there shouldn't be a race here - only one reader should
// be running on a con at a time...
if (_log.shouldLog(Log.ERROR))
_log.error("no establishment state but " + con + " is established... race?");
break;
}
}
if (est.isComplete()) {
// why is it complete yet !con.isEstablished?
if (_log.shouldLog(Log.ERROR))
_log.error("establishment state [" + est + "] is complete, yet the connection isn't established? "
+ con.isEstablished() + " (inbound? " + con.isInbound() + " " + con + ")");
break;
}
est.receive(buf);
if (est.isCorrupt()) {
if (_log.shouldLog(Log.WARN))
_log.warn("closing connection on establishment because: " +est.getError(), est.getException());
con.close();
return;
} else if (buf.remaining() <= 0) {
con.removeReadBuf(buf);
}
}
while (!con.isClosed() && (buf = con.getNextReadBuf()) != null) {
// decrypt the data and push it into an i2np message
if (_log.shouldLog(Log.DEBUG))
_log.debug("Processing read buffer as part of an i2np message (" + buf.remaining() + " bytes)");
con.recvEncryptedI2NP(buf);
con.removeReadBuf(buf);
}
}
}

View File

@ -0,0 +1,113 @@
package net.i2p.router.transport.ntcp;
import java.nio.ByteBuffer;
import java.util.*;
import net.i2p.router.RouterContext;
import net.i2p.util.I2PThread;
import net.i2p.util.Log;
/**
* Pool of running threads which will transform the next I2NP message into
* something ready to be transferred over an NTCP connection, including the
* encryption of the data read.
*
*/
class Writer {
private RouterContext _context;
private Log _log;
private List _pendingConnections;
private List _liveWrites;
private List _writeAfterLive;
private List _runners;
public Writer(RouterContext ctx) {
_context = ctx;
_log = ctx.logManager().getLog(getClass());
_pendingConnections = new ArrayList(16);
_runners = new ArrayList(5);
_liveWrites = new ArrayList(5);
_writeAfterLive = new ArrayList(5);
}
public void startWriting(int numWriters) {
for (int i = 0; i < numWriters; i++) {
Runner r = new Runner();
I2PThread t = new I2PThread(r, "NTCP write " + i, true);
_runners.add(r);
t.start();
}
}
public void stopWriting() {
while (_runners.size() > 0) {
Runner r = (Runner)_runners.remove(0);
r.stop();
}
synchronized (_pendingConnections) {
_writeAfterLive.clear();
_pendingConnections.notifyAll();
}
}
public void wantsWrite(NTCPConnection con) {
//if (con.getCurrentOutbound() != null)
// throw new RuntimeException("Current outbound message already in play on " + con);
boolean already = false;
synchronized (_pendingConnections) {
if (_liveWrites.contains(con)) {
if (!_writeAfterLive.contains(con)) {
_writeAfterLive.add(con);
}
already = true;
} else if (!_pendingConnections.contains(con)) {
_pendingConnections.add(con);
}
_pendingConnections.notifyAll();
}
if (_log.shouldLog(Log.DEBUG))
_log.debug("wantsWrite: " + con + " already live? " + already);
}
public void connectionClosed(NTCPConnection con) {
synchronized (_pendingConnections) {
_writeAfterLive.remove(con);
_pendingConnections.remove(con);
_pendingConnections.notifyAll();
}
}
private class Runner implements Runnable {
private boolean _stop;
public Runner() { _stop = false; }
public void stop() { _stop = true; }
public void run() {
if (_log.shouldLog(Log.INFO)) _log.info("Starting writer");
NTCPConnection con = null;
while (!_stop) {
try {
synchronized (_pendingConnections) {
boolean keepWriting = (con != null) && _writeAfterLive.remove(con);
if (keepWriting) {
// keep on writing the same one
} else {
_liveWrites.remove(con);
con = null;
if (_pendingConnections.size() <= 0) {
_pendingConnections.wait();
} else {
con = (NTCPConnection)_pendingConnections.remove(0);
_liveWrites.add(con);
}
}
}
} catch (InterruptedException ie) {}
if (!_stop && (con != null)) {
try {
con.prepareNextWrite();
} catch (RuntimeException re) {
_log.log(Log.CRIT, "Error in the ntcp writer", re);
}
}
}
if (_log.shouldLog(Log.INFO)) _log.info("Stopping writer");
}
}
}

View File

@ -299,7 +299,7 @@ public class TCPTransport extends TransportImpl {
}
}
_context.shitlist().unshitlistRouter(ident.calculateHash());
_context.shitlist().unshitlistRouter(ident.calculateHash(), STYLE);
con.runConnection();
if (_log.shouldLog(Log.DEBUG))

View File

@ -533,7 +533,7 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
_log.warn("Peer already connected: old=" + oldPeer + " new=" + peer, new Exception("dup"));
_activeThrottle.unchoke(peer.getRemotePeer());
_context.shitlist().unshitlistRouter(peer.getRemotePeer());
_context.shitlist().unshitlistRouter(peer.getRemotePeer(), STYLE);
if (SHOULD_FLOOD_PEERS)
_flooder.addPeer(peer);

View File

@ -65,19 +65,35 @@ public class BatchedPreprocessor extends TrivialPreprocessor {
}
public boolean preprocessQueue(List pending, TunnelGateway.Sender sender, TunnelGateway.Receiver rec) {
if (_log.shouldLog(Log.DEBUG))
StringBuffer timingBuf = null;
if (_log.shouldLog(Log.DEBUG)) {
_log.debug("Preprocess queue with " + pending.size() + " to send");
timingBuf = new StringBuffer(128);
timingBuf.append("Preprocess with " + pending.size() + " to send. ");
}
if (DISABLE_BATCHING) {
if (_log.shouldLog(Log.INFO))
_log.info("Disabled batching, pushing " + pending + " immediately");
return super.preprocessQueue(pending, sender, rec);
}
long start = System.currentTimeMillis();
int batchCount = 0;
int beforeLooping = pending.size();
while (pending.size() > 0) {
int allocated = 0;
long beforePendingLoop = System.currentTimeMillis();
for (int i = 0; i < pending.size(); i++) {
long pendingStart = System.currentTimeMillis();
TunnelGateway.Pending msg = (TunnelGateway.Pending)pending.get(i);
int instructionsSize = getInstructionsSize(msg);
instructionsSize += getInstructionAugmentationSize(msg, allocated, instructionsSize);
int curWanted = msg.getData().length - msg.getOffset() + instructionsSize;
if (_log.shouldLog(Log.DEBUG))
_log.debug("pending " + i + "/" +pending.size()
+ " (" + msg.getMessageId() + ") curWanted=" + curWanted
+ " instructionSize=" + instructionsSize + " allocated=" + allocated);
allocated += curWanted;
if (allocated >= FULL_SIZE) {
if (allocated - curWanted + instructionsSize >= FULL_SIZE) {
@ -93,8 +109,10 @@ public class BatchedPreprocessor extends TrivialPreprocessor {
long waited = _context.clock().now() - _pendingSince;
_context.statManager().addRateData("tunnel.batchDelaySent", pending.size(), waited);
}
long beforeSend = System.currentTimeMillis();
_pendingSince = 0;
send(pending, 0, i, sender, rec);
long afterSend = System.currentTimeMillis();
if (_log.shouldLog(Log.INFO))
_log.info("Allocated=" + allocated + " so we sent " + (i+1)
+ " (last complete? " + (msg.getOffset() >= msg.getData().length)
@ -105,26 +123,40 @@ public class BatchedPreprocessor extends TrivialPreprocessor {
if (cur.getOffset() < cur.getData().length)
throw new IllegalArgumentException("i=" + i + " j=" + j + " off=" + cur.getOffset()
+ " len=" + cur.getData().length + " alloc=" + allocated);
if (timingBuf != null)
timingBuf.append(" sent " + cur);
notePreprocessing(cur.getMessageId(), cur.getFragmentNumber(), cur.getData().length, cur.getMessageIds(), "flushed allocated");
_context.statManager().addRateData("tunnel.writeDelay", cur.getLifetime(), cur.getData().length);
}
if (msg.getOffset() >= msg.getData().length) {
// ok, this last message fit perfectly, remove it too
TunnelGateway.Pending cur = (TunnelGateway.Pending)pending.remove(0);
if (timingBuf != null)
timingBuf.append(" sent perfect fit " + cur).append(".");
notePreprocessing(cur.getMessageId(), cur.getFragmentNumber(), msg.getData().length, msg.getMessageIds(), "flushed tail, remaining: " + pending);
_context.statManager().addRateData("tunnel.writeDelay", cur.getLifetime(), cur.getData().length);
}
if (i > 0)
_context.statManager().addRateData("tunnel.batchMultipleCount", i+1, 0);
allocated = 0;
// don't break - we may have enough source messages for multiple full tunnel messages
//break;
batchCount++;
long pendingEnd = System.currentTimeMillis();
if (timingBuf != null)
timingBuf.append(" After sending " + (i+1) + "/"+pending.size() +" in " + (afterSend-beforeSend)
+ " after " + (beforeSend-pendingStart)
+ " since " + (beforeSend-beforePendingLoop)
+ "/" + (beforeSend-start)
+ " pending current " + (pendingEnd-pendingStart)).append(".");
break;
}
if (timingBuf != null)
timingBuf.append(" After pending loop " + (System.currentTimeMillis()-beforePendingLoop)).append(".");
}
display(allocated, pending, "after looping to clear " + (beforeLooping - pending.size()));
long afterCleared = System.currentTimeMillis();
if (_log.shouldLog(Log.INFO))
display(allocated, pending, "after looping to clear " + (beforeLooping - pending.size()));
long afterDisplayed = System.currentTimeMillis();
if (allocated > 0) {
// after going through the entire pending list, we still don't
// have enough data to send a full message
@ -152,13 +184,26 @@ public class BatchedPreprocessor extends TrivialPreprocessor {
_pendingSince = _context.clock().now();
_context.statManager().addRateData("tunnel.batchFlushRemaining", pending.size(), beforeSize);
display(allocated, pending, "flushed, some remain");
if (timingBuf != null) {
timingBuf.append(" flushed, some remain (displayed to now: " + (System.currentTimeMillis()-afterDisplayed) + ")");
timingBuf.append(" total time: " + (System.currentTimeMillis()-start));
_log.debug(timingBuf.toString());
}
return true;
} else {
long delayAmount = _context.clock().now() - _pendingSince;
_pendingSince = 0;
if (batchCount > 1)
_context.statManager().addRateData("tunnel.batchCount", batchCount, 0);
display(allocated, pending, "flushed " + (beforeSize) + ", no remaining after " + delayAmount);
if (_log.shouldLog(Log.INFO))
display(allocated, pending, "flushed " + (beforeSize) + ", no remaining after " + delayAmount);
if (timingBuf != null) {
timingBuf.append(" flushed, none remain (displayed to now: " + (System.currentTimeMillis()-afterDisplayed) + ")");
timingBuf.append(" total time: " + (System.currentTimeMillis()-start));
_log.debug(timingBuf.toString());
}
return false;
}
} else {
@ -169,17 +214,31 @@ public class BatchedPreprocessor extends TrivialPreprocessor {
_context.statManager().addRateData("tunnel.batchCount", batchCount, 0);
// not yet time to send the delayed flush
display(allocated, pending, "dont flush");
if (timingBuf != null) {
timingBuf.append(" dont flush (displayed to now: " + (System.currentTimeMillis()-afterDisplayed) + ")");
timingBuf.append(" total time: " + (System.currentTimeMillis()-start));
_log.debug(timingBuf.toString());
}
return true;
}
} else {
// ok, we sent some, but haven't gone back for another
// pass yet. keep looping
if (timingBuf != null)
timingBuf.append(" Keep looping");
}
}
if (_log.shouldLog(Log.DEBUG))
_log.debug("Sent everything on the list (pending=" + pending.size() + ")");
if (timingBuf != null)
timingBuf.append(" total time: " + (System.currentTimeMillis()-start));
if (timingBuf != null)
_log.debug(timingBuf.toString());
// sent everything from the pending list, no need to delayed flush
return false;
}
@ -244,6 +303,8 @@ public class BatchedPreprocessor extends TrivialPreprocessor {
TunnelGateway.Pending cur = (TunnelGateway.Pending)pending.get(i);
cur.addMessageId(msgId);
}
if (_log.shouldLog(Log.DEBUG))
_log.debug("Sent " + startAt + ":" + sendThrough + " out of " + pending + " in message " + msgId);
}
/**

View File

@ -32,12 +32,22 @@ public class BuildMessageProcessor {
Log log = ctx.logManager().getLog(getClass());
BuildRequestRecord rv = null;
int ourHop = -1;
long beforeActualDecrypt = 0;
long afterActualDecrypt = 0;
long totalEq = 0;
long totalDup = 0;
long beforeLoop = System.currentTimeMillis();
for (int i = 0; i < TunnelBuildMessage.RECORD_COUNT; i++) {
ByteArray rec = msg.getRecord(i);
int off = rec.getOffset();
int len = BuildRequestRecord.PEER_SIZE;
if (DataHelper.eq(ourHash.getData(), 0, rec.getData(), off, len)) {
long beforeEq = System.currentTimeMillis();
boolean eq = DataHelper.eq(ourHash.getData(), 0, rec.getData(), off, len);
totalEq += System.currentTimeMillis()-beforeEq;
if (eq) {
long beforeIsDup = System.currentTimeMillis();
boolean isDup = _filter.add(rec.getData(), off + len, 32);
totalDup += System.currentTimeMillis()-beforeIsDup;
if (isDup) {
if (log.shouldLog(Log.WARN))
log.debug(msg.getUniqueId() + ": A record matching our hash was found, but it seems to be a duplicate");
@ -45,7 +55,9 @@ public class BuildMessageProcessor {
return null;
}
BuildRequestRecord req = new BuildRequestRecord();
beforeActualDecrypt = System.currentTimeMillis();
boolean ok = req.decryptRecord(ctx, privKey, ourHash, rec);
afterActualDecrypt = System.currentTimeMillis();
if (ok) {
if (log.shouldLog(Log.DEBUG))
log.debug(msg.getUniqueId() + ": A record matching our hash was found and decrypted");
@ -64,6 +76,8 @@ public class BuildMessageProcessor {
log.debug(msg.getUniqueId() + ": No records matching our hash was found");
return null;
}
long beforeEncrypt = System.currentTimeMillis();
SessionKey replyKey = rv.readReplyKey();
byte iv[] = rv.readReplyIV();
int ivOff = 0;
@ -76,7 +90,17 @@ public class BuildMessageProcessor {
iv, ivOff, data.getValid());
}
}
long afterEncrypt = System.currentTimeMillis();
msg.setRecord(ourHop, null);
if (afterEncrypt-beforeLoop > 1000) {
if (log.shouldLog(Log.WARN))
log.warn("Slow decryption, total=" + (afterEncrypt-beforeLoop)
+ " looping=" + (beforeEncrypt-beforeLoop)
+ " decrypt=" + (afterActualDecrypt-beforeActualDecrypt)
+ " eq=" + totalEq
+ " dup=" + totalDup
+ " encrypt=" + (afterEncrypt-beforeEncrypt));
}
return rv;
}
}

View File

@ -40,8 +40,8 @@ class OutboundReceiver implements TunnelGateway.Receiver {
send(msg, ri);
return msg.getUniqueId();
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("lookup of " + _config.getPeer(1).toBase64().substring(0,4)
if (_log.shouldLog(Log.ERROR))
_log.error("lookup of " + _config.getPeer(1).toBase64().substring(0,4)
+ " required for " + msg);
_context.netDb().lookupRouterInfo(_config.getPeer(1), new SendJob(_context, msg), new FailedJob(_context), 10*1000);
return -1;
@ -50,7 +50,7 @@ class OutboundReceiver implements TunnelGateway.Receiver {
private void send(TunnelDataMessage msg, RouterInfo ri) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("forwarding encrypted data out " + _config + ": " + msg);
_log.debug("forwarding encrypted data out " + _config + ": " + msg.getUniqueId());
OutNetMessage m = new OutNetMessage(_context);
m.setMessage(msg);
m.setExpiration(msg.getMessageExpiration());

View File

@ -0,0 +1,145 @@
package net.i2p.router.tunnel;
import java.util.ArrayList;
import java.util.List;
import net.i2p.I2PAppContext;
import net.i2p.data.Hash;
import net.i2p.data.TunnelId;
import net.i2p.data.i2np.I2NPMessage;
import net.i2p.data.i2np.TunnelGatewayMessage;
import net.i2p.router.Router;
import net.i2p.util.Log;
import net.i2p.util.SimpleTimer;
/**
* Serve as the gatekeeper for a tunnel, accepting messages, coallescing and/or
* fragmenting them before wrapping them up for tunnel delivery. The flow here
* is: <ol>
* <li>add an I2NPMessage (and a target tunnel/router, if necessary)</li>
* <li>that message is queued up into a TunnelGateway.Pending and offered to the
* assigned QueuePreprocessor.</li>
* <li>that QueuePreprocessor may then take off any of the TunnelGateway.Pending
* messages or instruct the TunnelGateway to offer it the messages again in
* a short while (in an attempt to coallesce them).
* <li>when the QueueProcessor accepts a TunnelGateway.Pending, it preprocesses
* it into fragments, forwarding each preprocessed fragment group through
* the Sender.</li>
* <li>the Sender then encrypts the preprocessed data and delivers it to the
* Receiver.</li>
* <li>the Receiver now has the encrypted message and may do with it as it
* pleases (e.g. wrap it as necessary and enqueue it onto the OutNetMessagePool,
* or if debugging, verify that it can be decrypted properly)</li>
* </ol>
*
*/
public class PumpedTunnelGateway extends TunnelGateway {
private List _prequeue;
private TunnelGatewayPumper _pumper;
/**
* @param preprocessor this pulls Pending messages off a list, builds some
* full preprocessed messages, and pumps those into the sender
* @param sender this takes a preprocessed message, encrypts it, and sends it to
* the receiver
* @param receiver this receives the encrypted message and forwards it off
* to the first hop
*/
public PumpedTunnelGateway(I2PAppContext context, QueuePreprocessor preprocessor, Sender sender, Receiver receiver, TunnelGatewayPumper pumper) {
super(context, preprocessor, sender, receiver);
_prequeue = new ArrayList(4);
_pumper = pumper;
}
/**
* Add a message to be sent down the tunnel, either sending it now (perhaps
* coallesced with other pending messages) or after a brief pause (_flushFrequency).
* If it is queued up past its expiration, it is silently dropped
*
* @param msg message to be sent through the tunnel
* @param toRouter router to send to after the endpoint (or null for endpoint processing)
* @param toTunnel tunnel to send to after the endpoint (or null for endpoint or router processing)
*/
public void add(I2NPMessage msg, Hash toRouter, TunnelId toTunnel) {
_messagesSent++;
long startAdd = System.currentTimeMillis();
Pending cur = new PendingImpl(msg, toRouter, toTunnel);
long beforeLock = System.currentTimeMillis();
long afterAdded = -1;
synchronized (_prequeue) {
_prequeue.add(cur);
afterAdded = System.currentTimeMillis();
}
_pumper.wantsPumping(this);
if (_log.shouldLog(Log.DEBUG))
_log.debug("GW prequeue time: " + (System.currentTimeMillis()-beforeLock) + " for " + msg.getUniqueId() + " on " + toString());
}
/**
* run in one of the TunnelGatewayPumper's threads, this pulls pending messages
* off the prequeue, adds them to the queue and then tries to preprocess the queue,
* scheduling a later delayed flush as necessary. this allows the gw.add call to
* go quickly, rather than blocking its callers on potentially substantial
* processing.
*/
void pump(List queueBuf) {
synchronized (_prequeue) {
if (_prequeue.size() > 0) {
queueBuf.addAll(_prequeue);
_prequeue.clear();
} else {
return;
}
}
long startAdd = System.currentTimeMillis();
long beforeLock = System.currentTimeMillis();
long afterAdded = -1;
boolean delayedFlush = false;
long delayAmount = -1;
int remaining = 0;
long afterPreprocess = 0;
long afterExpire = 0;
synchronized (_queue) {
_queue.addAll(queueBuf);
afterAdded = System.currentTimeMillis();
if (_log.shouldLog(Log.DEBUG))
_log.debug("Added before direct flush preprocessing for " + toString() + ": " + _queue);
delayedFlush = _preprocessor.preprocessQueue(_queue, _sender, _receiver);
afterPreprocess = System.currentTimeMillis();
if (delayedFlush)
delayAmount = _preprocessor.getDelayAmount();
_lastFlush = _context.clock().now();
// expire any as necessary, even if its framented
for (int i = 0; i < _queue.size(); i++) {
Pending m = (Pending)_queue.get(i);
if (m.getExpiration() + Router.CLOCK_FUDGE_FACTOR < _lastFlush) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Expire on the queue (size=" + _queue.size() + "): " + m);
_queue.remove(i);
i--;
}
}
afterExpire = System.currentTimeMillis();
remaining = _queue.size();
if ( (remaining > 0) && (_log.shouldLog(Log.DEBUG)) )
_log.debug("Remaining after preprocessing: " + _queue);
}
if (delayedFlush) {
FlushTimer.getInstance().addEvent(_delayedFlush, delayAmount);
}
_context.statManager().addRateData("tunnel.lockedGatewayAdd", afterAdded-beforeLock, remaining);
long complete = System.currentTimeMillis();
if (_log.shouldLog(Log.DEBUG))
_log.debug("Time to add " + queueBuf.size() + " messages to " + toString() + ": " + (complete-startAdd)
+ " delayed? " + delayedFlush + " remaining: " + remaining
+ " prepare: " + (beforeLock-startAdd)
+ " add: " + (afterAdded-beforeLock)
+ " preprocess: " + (afterPreprocess-afterAdded)
+ " expire: " + (afterExpire-afterPreprocess)
+ " queue flush: " + (complete-afterExpire));
queueBuf.clear();
}
}

View File

@ -24,7 +24,7 @@ public class TrivialPreprocessor implements TunnelGateway.QueuePreprocessor {
public static final int PREPROCESSED_SIZE = 1024;
protected static final int IV_SIZE = HopProcessor.IV_LENGTH;
protected static final ByteCache _dataCache = ByteCache.getInstance(512, PREPROCESSED_SIZE);
protected static final ByteCache _dataCache = ByteCache.getInstance(32, PREPROCESSED_SIZE);
protected static final ByteCache _ivCache = ByteCache.getInstance(128, IV_SIZE);
protected static final ByteCache _hashCache = ByteCache.getInstance(128, Hash.HASH_LENGTH);
@ -42,14 +42,30 @@ public class TrivialPreprocessor implements TunnelGateway.QueuePreprocessor {
*
*/
public boolean preprocessQueue(List pending, TunnelGateway.Sender sender, TunnelGateway.Receiver rec) {
long begin = System.currentTimeMillis();
StringBuffer buf = null;
if (_log.shouldLog(Log.DEBUG)) {
buf = new StringBuffer(256);
buf.append("Trivial preprocessing of ").append(pending.size()).append(" ");
}
while (pending.size() > 0) {
TunnelGateway.Pending msg = (TunnelGateway.Pending)pending.remove(0);
long beforePreproc = System.currentTimeMillis();
byte preprocessed[][] = preprocess(msg);
long afterPreproc = System.currentTimeMillis();
if (buf != null)
buf.append("preprocessed into " + preprocessed.length + " fragments after " + (afterPreproc-beforePreproc) + ". ");
for (int i = 0; i < preprocessed.length; i++) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Preprocessed: fragment " + i + "/" + (preprocessed.length-1) + " in "
+ msg.getMessageId() + ": " + Base64.encode(preprocessed[i]));
+ msg.getMessageId() + ": "
+ " send through " + sender + " receive with " + rec);
//Base64.encode(preprocessed[i]));
long beforeSend = System.currentTimeMillis();
long id = sender.sendPreprocessed(preprocessed[i], rec);
long afterSend = System.currentTimeMillis();
if (buf != null)
buf.append("send of " + msg.getMessageId() + " took " + (afterSend-beforeSend) + ". ");
msg.addMessageId(id);
}
notePreprocessing(msg.getMessageId(), msg.getFragmentNumber(), preprocessed.length, msg.getMessageIds(), null);
@ -58,6 +74,12 @@ public class TrivialPreprocessor implements TunnelGateway.QueuePreprocessor {
+ msg.getFragmentNumber() + "/" + preprocessed.length + " fragments, size = "
+ msg.getData().length);
}
if (buf != null)
buf.append("all fragments sent after " + (System.currentTimeMillis()-afterPreproc) + ". ");
}
if (buf != null) {
buf.append("queue preprocessed after " + (System.currentTimeMillis()-begin) + ".");
_log.debug(buf.toString());
}
return false;
}
@ -69,8 +91,8 @@ public class TrivialPreprocessor implements TunnelGateway.QueuePreprocessor {
while (msg.getOffset() < msg.getData().length) {
fragments.add(preprocessFragment(msg));
if (_log.shouldLog(Log.DEBUG))
_log.debug("\n\nafter preprocessing fragment\n\n");
//if (_log.shouldLog(Log.DEBUG))
// _log.debug("\n\nafter preprocessing fragment\n\n");
}
byte rv[][] = new byte[fragments.size()][];
@ -236,7 +258,8 @@ public class TrivialPreprocessor implements TunnelGateway.QueuePreprocessor {
if (_log.shouldLog(Log.DEBUG))
_log.debug("initial fragment[" + msg.getMessageId() + "/" + msg.getFragmentNumber()+ "/"
+ (PREPROCESSED_SIZE - offset - payloadLength) + "/" + payloadLength + "]: "
+ Base64.encode(target, offset, payloadLength));
);
//+ Base64.encode(target, offset, payloadLength));
offset += payloadLength;
@ -277,7 +300,8 @@ public class TrivialPreprocessor implements TunnelGateway.QueuePreprocessor {
if (_log.shouldLog(Log.DEBUG))
_log.debug("subsequent fragment[" + msg.getMessageId() + "/" + msg.getFragmentNumber()+ "/"
+ offset + "/" + payloadLength + "]: "
+ Base64.encode(target, offset, payloadLength));
);
//+ Base64.encode(target, offset, payloadLength));
offset += payloadLength;

View File

@ -40,6 +40,7 @@ public class TunnelDispatcher implements Service {
private LeaveTunnel _leaveJob;
/** what is the date/time we last deliberately dropped a tunnel? **/
private long _lastDropTime;
private TunnelGatewayPumper _pumper;
/** Creates a new instance of TunnelDispatcher */
public TunnelDispatcher(RouterContext ctx) {
@ -53,6 +54,7 @@ public class TunnelDispatcher implements Service {
_lastParticipatingExpiration = 0;
_lastDropTime = 0;
_validator = null;
_pumper = new TunnelGatewayPumper(ctx);
_leaveJob = new LeaveTunnel(ctx);
ctx.statManager().createRateStat("tunnel.participatingTunnels",
"How many tunnels are we participating in?", "Tunnels",
@ -142,7 +144,8 @@ public class TunnelDispatcher implements Service {
TunnelGateway.QueuePreprocessor preproc = createPreprocessor(cfg);
TunnelGateway.Sender sender = new OutboundSender(_context, cfg);
TunnelGateway.Receiver receiver = new OutboundReceiver(_context, cfg);
TunnelGateway gw = new TunnelGateway(_context, preproc, sender, receiver);
//TunnelGateway gw = new TunnelGateway(_context, preproc, sender, receiver);
TunnelGateway gw = new PumpedTunnelGateway(_context, preproc, sender, receiver, _pumper);
TunnelId outId = cfg.getConfig(0).getSendTunnel();
synchronized (_outboundGateways) {
_outboundGateways.put(outId, gw);
@ -245,7 +248,8 @@ public class TunnelDispatcher implements Service {
TunnelGateway.QueuePreprocessor preproc = createPreprocessor(cfg);
TunnelGateway.Sender sender = new InboundSender(_context, cfg);
TunnelGateway.Receiver receiver = new InboundGatewayReceiver(_context, cfg);
TunnelGateway gw = new TunnelGateway(_context, preproc, sender, receiver);
//TunnelGateway gw = new TunnelGateway(_context, preproc, sender, receiver);
TunnelGateway gw = new PumpedTunnelGateway(_context, preproc, sender, receiver, _pumper);
TunnelId recvId = cfg.getReceiveTunnel();
synchronized (_inboundGateways) {
_inboundGateways.put(recvId, gw);
@ -367,7 +371,7 @@ public class TunnelDispatcher implements Service {
*
*/
public void dispatch(TunnelDataMessage msg, Hash recvFrom) {
long before = _context.clock().now();
long before = System.currentTimeMillis();
TunnelParticipant participant = null;
synchronized (_participants) {
participant = (TunnelParticipant)_participants.get(msg.getTunnelIdObj());
@ -404,7 +408,9 @@ public class TunnelDispatcher implements Service {
}
}
long dispatchTime = _context.clock().now() - before;
long dispatchTime = System.currentTimeMillis() - before;
if (_log.shouldLog(Log.DEBUG))
_log.debug("Dispatch data time: " + dispatchTime + " participant? " + participant);
_context.statManager().addRateData("tunnel.dispatchDataTime", dispatchTime, dispatchTime);
}
@ -414,7 +420,7 @@ public class TunnelDispatcher implements Service {
*
*/
public void dispatch(TunnelGatewayMessage msg) {
long before = _context.clock().now();
long before = System.currentTimeMillis();
TunnelGateway gw = null;
synchronized (_inboundGateways) {
gw = (TunnelGateway)_inboundGateways.get(msg.getTunnelId());
@ -451,7 +457,10 @@ public class TunnelDispatcher implements Service {
+ " existing = " + _inboundGateways.size(), new Exception("source"));
}
long dispatchTime = _context.clock().now() - before;
long dispatchTime = System.currentTimeMillis() - before;
if (_log.shouldLog(Log.DEBUG))
_log.debug("Dispatch in gw time: " + dispatchTime + " gateway? " + gw);
_context.statManager().addRateData("tunnel.dispatchGatewayTime", dispatchTime, dispatchTime);
}
@ -519,6 +528,9 @@ public class TunnelDispatcher implements Service {
}
long dispatchTime = _context.clock().now() - before;
if (dispatchTime > 1000) {
_log.error("wtf, took " + dispatchTime + " to dispatch " + msg + " out " + outboundTunnel + " in " + gw);
}
if (gw instanceof TunnelGatewayZeroHop)
_context.statManager().addRateData("tunnel.dispatchOutboundZeroHopTime", dispatchTime, dispatchTime);
else
@ -603,6 +615,7 @@ public class TunnelDispatcher implements Service {
if (_validator != null)
_validator.destroy();
_validator = null;
_pumper.stopPumping();
}
public void restart() {
shutdown();

View File

@ -34,16 +34,16 @@ import net.i2p.util.SimpleTimer;
*
*/
public class TunnelGateway {
private I2PAppContext _context;
private Log _log;
private List _queue;
private QueuePreprocessor _preprocessor;
private Sender _sender;
private Receiver _receiver;
private long _lastFlush;
private int _flushFrequency;
private DelayedFlush _delayedFlush;
private int _messagesSent;
protected I2PAppContext _context;
protected Log _log;
protected List _queue;
protected QueuePreprocessor _preprocessor;
protected Sender _sender;
protected Receiver _receiver;
protected long _lastFlush;
protected int _flushFrequency;
protected DelayedFlush _delayedFlush;
protected int _messagesSent;
/**
* @param preprocessor this pulls Pending messages off a list, builds some
@ -55,7 +55,7 @@ public class TunnelGateway {
*/
public TunnelGateway(I2PAppContext context, QueuePreprocessor preprocessor, Sender sender, Receiver receiver) {
_context = context;
_log = context.logManager().getLog(TunnelGateway.class);
_log = context.logManager().getLog(getClass());
_queue = new ArrayList(4);
_preprocessor = preprocessor;
_sender = sender;
@ -88,19 +88,22 @@ public class TunnelGateway {
*/
public void add(I2NPMessage msg, Hash toRouter, TunnelId toTunnel) {
_messagesSent++;
long startAdd = System.currentTimeMillis();
boolean delayedFlush = false;
long delayAmount = -1;
int remaining = 0;
long beforeLock = _context.clock().now();
long afterAdded = -1;
Pending cur = new PendingImpl(msg, toRouter, toTunnel);
long beforeLock = System.currentTimeMillis();
long afterAdded = -1;
long afterPreprocess = 0;
long afterExpire = 0;
synchronized (_queue) {
_queue.add(cur);
afterAdded = _context.clock().now();
afterAdded = System.currentTimeMillis();
if (_log.shouldLog(Log.DEBUG))
_log.debug("Added before direct flush preprocessing: " + _queue);
delayedFlush = _preprocessor.preprocessQueue(_queue, _sender, _receiver);
afterPreprocess = System.currentTimeMillis();
if (delayedFlush)
delayAmount = _preprocessor.getDelayAmount();
_lastFlush = _context.clock().now();
@ -115,6 +118,7 @@ public class TunnelGateway {
i--;
}
}
afterExpire = System.currentTimeMillis();
remaining = _queue.size();
if ( (remaining > 0) && (_log.shouldLog(Log.DEBUG)) )
_log.debug("Remaining after preprocessing: " + _queue);
@ -124,6 +128,15 @@ public class TunnelGateway {
FlushTimer.getInstance().addEvent(_delayedFlush, delayAmount);
}
_context.statManager().addRateData("tunnel.lockedGatewayAdd", afterAdded-beforeLock, remaining);
long complete = System.currentTimeMillis();
if (_log.shouldLog(Log.DEBUG))
_log.debug("Time to add the message " + msg.getUniqueId() + ": " + (complete-startAdd)
+ " delayed? " + delayedFlush + " remaining: " + remaining
+ " prepare: " + (beforeLock-startAdd)
+ " add: " + (afterAdded-beforeLock)
+ " preprocess: " + (afterPreprocess-afterAdded)
+ " expire: " + (afterExpire-afterPreprocess)
+ " queue flush: " + (complete-afterExpire));
}
public int getMessagesSent() { return _messagesSent; }
@ -163,7 +176,7 @@ public class TunnelGateway {
public static class Pending {
protected Hash _toRouter;
protected TunnelId _toTunnel;
private long _messageId;
protected long _messageId;
protected long _expiration;
protected byte _remaining[];
protected int _offset;
@ -218,14 +231,14 @@ public class TunnelGateway {
}
}
}
private class PendingImpl extends Pending {
class PendingImpl extends Pending {
public PendingImpl(I2NPMessage message, Hash toRouter, TunnelId toTunnel) {
super(message, toRouter, toTunnel, _context.clock().now());
}
public String toString() {
StringBuffer buf = new StringBuffer(64);
buf.append("Message on ");
buf.append("Message ").append(_messageId).append(" on ");
buf.append(TunnelGateway.this.toString());
if (_toRouter != null) {
buf.append(" targetting ");

View File

@ -0,0 +1,57 @@
package net.i2p.router.tunnel;
import java.util.*;
import net.i2p.router.RouterContext;
import net.i2p.util.I2PThread;
import net.i2p.util.Log;
/**
* run through the tunnel gateways that have had messages added to them and push
* those messages through the preprocessing and sending process
*/
public class TunnelGatewayPumper implements Runnable {
private RouterContext _context;
private Log _log;
private List _wantsPumping;
private boolean _stop;
/** Creates a new instance of TunnelGatewayPumper */
public TunnelGatewayPumper(RouterContext ctx) {
_context = ctx;
_log = ctx.logManager().getLog(getClass());
_wantsPumping = new ArrayList(64);
_stop = false;
for (int i = 0; i < 4; i++)
new I2PThread(this, "GW pumper " + i, true).start();
}
public void stopPumping() {
_stop=true;
synchronized (_wantsPumping) { _wantsPumping.notifyAll(); }
}
public void wantsPumping(PumpedTunnelGateway gw) {
synchronized (_wantsPumping) {
_wantsPumping.add(gw);
_wantsPumping.notify();
}
}
public void run() {
PumpedTunnelGateway gw = null;
List queueBuf = new ArrayList(32);
while (!_stop) {
try {
synchronized (_wantsPumping) {
if (_wantsPumping.size() > 0)
gw = (PumpedTunnelGateway)_wantsPumping.remove(0);
else
_wantsPumping.wait();
}
} catch (InterruptedException ie) {}
if (gw != null) {
gw.pump(queueBuf);
gw = null;
}
}
}
}

View File

@ -102,8 +102,8 @@ class BuildExecutor implements Runnable {
}
}
if (buf != null)
_log.debug(buf.toString());
//if (buf != null)
// _log.debug(buf.toString());
_context.statManager().addRateData("tunnel.concurrentBuilds", concurrent, 0);
@ -217,15 +217,15 @@ class BuildExecutor implements Runnable {
// allowed() also expires timed out requests (for new style requests)
int allowed = allowed();
if (_log.shouldLog(Log.DEBUG))
_log.debug("Allowed: " + allowed + " wanted: " + wanted);
//if (_log.shouldLog(Log.DEBUG))
// _log.debug("Allowed: " + allowed + " wanted: " + wanted);
// zero hop ones can run inline
allowed = buildZeroHopTunnels(wanted, allowed);
afterBuildZeroHop = System.currentTimeMillis();
if (_log.shouldLog(Log.DEBUG))
_log.debug("Zero hops built, Allowed: " + allowed + " wanted: " + wanted);
//if (_log.shouldLog(Log.DEBUG))
// _log.debug("Zero hops built, Allowed: " + allowed + " wanted: " + wanted);
int realBuilt = 0;
TunnelManagerFacade mgr = _context.tunnelManager();
@ -243,10 +243,10 @@ class BuildExecutor implements Runnable {
if ( (allowed > 0) && (wanted.size() > 0) ) {
Collections.shuffle(wanted, _context.random());
// force the loops to be short, since 20 consecutive tunnel build requests can take
// force the loops to be short, since 3 consecutive tunnel build requests can take
// a long, long time
if (allowed > 5)
allowed = 5;
if (allowed > 2)
allowed = 2;
for (int i = 0; (i < allowed) && (wanted.size() > 0); i++) {
TunnelPool pool = (TunnelPool)wanted.remove(0);
@ -275,10 +275,10 @@ class BuildExecutor implements Runnable {
try {
synchronized (_currentlyBuilding) {
if (!_repoll) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Nothin' doin (allowed=" + allowed + ", wanted=" + wanted.size() + ", pending=" + pendingRemaining + "), wait for a while");
//if (_log.shouldLog(Log.DEBUG))
// _log.debug("Nothin' doin (allowed=" + allowed + ", wanted=" + wanted.size() + ", pending=" + pendingRemaining + "), wait for a while");
//if (allowed <= 0)
_currentlyBuilding.wait(_context.random().nextInt(1*1000));
_currentlyBuilding.wait(_context.random().nextInt(2*1000));
//else // wanted <= 0
// _currentlyBuilding.wait(_context.random().nextInt(30*1000));
}
@ -297,14 +297,14 @@ class BuildExecutor implements Runnable {
if (pendingRemaining > 0)
_context.statManager().addRateData("tunnel.pendingRemaining", pendingRemaining, afterHandleInbound-afterBuildReal);
if (_log.shouldLog(Log.INFO))
_log.info("build loop complete, tot=" + (afterHandleInbound-loopBegin) +
" inReply=" + (afterHandleInboundReplies-beforeHandleInboundReplies) +
" zeroHop=" + (afterBuildZeroHop-afterHandleInboundReplies) +
" real=" + (afterBuildReal-afterBuildZeroHop) +
" in=" + (afterHandleInbound-afterBuildReal) +
" built=" + realBuilt +
" pending=" + pendingRemaining);
//if (_log.shouldLog(Log.DEBUG))
// _log.debug("build loop complete, tot=" + (afterHandleInbound-loopBegin) +
// " inReply=" + (afterHandleInboundReplies-beforeHandleInboundReplies) +
// " zeroHop=" + (afterBuildZeroHop-afterHandleInboundReplies) +
// " real=" + (afterBuildReal-afterBuildZeroHop) +
// " in=" + (afterHandleInbound-afterBuildReal) +
// " built=" + realBuilt +
// " pending=" + pendingRemaining);
wanted.clear();
pools.clear();

View File

@ -65,7 +65,7 @@ class BuildHandler {
ctx.inNetMessagePool().registerHandlerJobBuilder(TunnelBuildReplyMessage.MESSAGE_TYPE, new TunnelBuildReplyMessageHandlerJobBuilder());
}
private static final int MAX_HANDLE_AT_ONCE = 5;
private static final int MAX_HANDLE_AT_ONCE = 2;
private static final int NEXT_HOP_LOOKUP_TIMEOUT = 5*1000;
/**
@ -74,7 +74,9 @@ class BuildHandler {
*/
int handleInboundRequests() {
int dropExpired = 0;
int remaining = 0;
List handled = null;
long beforeFindHandled = System.currentTimeMillis();
synchronized (_inboundBuildMessages) {
int toHandle = _inboundBuildMessages.size();
if (toHandle > 0) {
@ -107,14 +109,18 @@ class BuildHandler {
handled.add(_inboundBuildMessages.remove(0));
}
}
remaining = _inboundBuildMessages.size();
}
if (handled != null) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Handling " + handled.size() + " requests");
_log.debug("Handling " + handled.size() + " requests (took " + (System.currentTimeMillis()-beforeFindHandled) + "ms to find them)");
for (int i = 0; i < handled.size(); i++) {
BuildMessageState state = (BuildMessageState)handled.get(i);
handleRequest(state);
long beforeHandle = System.currentTimeMillis();
long actualTime = handleRequest(state);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Handle took " + (System.currentTimeMillis()-beforeHandle) + "/" + actualTime + " (" + i + " out of " + handled.size() + " with " + remaining + " remaining)");
}
handled.clear();
}
@ -140,12 +146,15 @@ class BuildHandler {
}
// anything else?
/*
synchronized (_inboundBuildMessages) {
int remaining = _inboundBuildMessages.size();
if (remaining > 0)
_context.statManager().addRateData("tunnel.handleRemaining", remaining, 0);
return remaining;
}
*/
if (remaining > 0)
_context.statManager().addRateData("tunnel.handleRemaining", remaining, 0);
return remaining;
}
void handleInboundReplies() {
@ -273,7 +282,7 @@ class BuildHandler {
}
}
private void handleRequest(BuildMessageState state) {
private long handleRequest(BuildMessageState state) {
long timeSinceReceived = System.currentTimeMillis()-state.recvTime;
if (_log.shouldLog(Log.DEBUG))
_log.debug(state.msg.getUniqueId() + ": handling request after " + timeSinceReceived);
@ -284,7 +293,7 @@ class BuildHandler {
_log.warn("Not even trying to handle/decrypt the request " + state.msg.getUniqueId()
+ ", since we received it a long time ago: " + timeSinceReceived);
_context.statManager().addRateData("tunnel.dropLoadDelay", timeSinceReceived, 0);
return;
return -1;
}
// ok, this is not our own tunnel, so we need to do some heavy lifting
// this not only decrypts the current hop's record, but encrypts the other records
@ -293,24 +302,37 @@ class BuildHandler {
BuildRequestRecord req = _processor.decrypt(_context, state.msg, _context.routerHash(), _context.keyManager().getPrivateKey());
long decryptTime = System.currentTimeMillis() - beforeDecrypt;
_context.statManager().addRateData("tunnel.decryptRequestTime", decryptTime, decryptTime);
if (decryptTime > 500)
_log.warn("Took too long to decrypt the request: " + decryptTime + " for message " + state.msg.getUniqueId() + " received " + (timeSinceReceived+decryptTime) + " ago");
if (req == null) {
// no records matched, or the decryption failed. bah
if (_log.shouldLog(Log.WARN))
_log.warn("The request " + state.msg.getUniqueId() + " could not be decrypted");
return;
return -1;
}
long beforeLookup = System.currentTimeMillis();
Hash nextPeer = req.readNextIdentity();
long readPeerTime = System.currentTimeMillis()-beforeLookup;
RouterInfo nextPeerInfo = _context.netDb().lookupRouterInfoLocally(nextPeer);
long lookupTime = System.currentTimeMillis()-beforeLookup;
if (lookupTime > 500)
_log.warn("Took too long to lookup the request: " + lookupTime + "/" + readPeerTime + " for message " + state.msg.getUniqueId() + " received " + (timeSinceReceived+decryptTime) + " ago");
if (nextPeerInfo == null) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Request " + state.msg.getUniqueId() + "/" + req.readReceiveTunnelId() + "/" + req.readNextTunnelId()
+ " handled, looking for the next peer " + nextPeer.toBase64());
_context.netDb().lookupRouterInfo(nextPeer, new HandleReq(_context, state, req, nextPeer), new TimeoutReq(_context, state, req, nextPeer), NEXT_HOP_LOOKUP_TIMEOUT);
return -1;
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Request " + state.msg.getUniqueId() + " handled and we know the next peer " + nextPeer.toBase64());
long beforeHandle = System.currentTimeMillis();
handleReq(nextPeerInfo, state, req, nextPeer);
long handleTime = System.currentTimeMillis() - beforeHandle;
if (_log.shouldLog(Log.DEBUG))
_log.debug("Request " + state.msg.getUniqueId() + " handled and we know the next peer "
+ nextPeer.toBase64() + " after " + handleTime
+ "/" + decryptTime + "/" + lookupTime + "/" + timeSinceReceived);
return handleTime;
}
}
@ -488,9 +510,9 @@ class BuildHandler {
if (state.msg.getRecord(j) == null) {
ourSlot = j;
state.msg.setRecord(j, new ByteArray(reply));
if (_log.shouldLog(Log.DEBUG))
_log.debug("Full reply record for slot " + ourSlot + "/" + ourId + "/" + nextId + "/" + req.readReplyMessageId()
+ ": " + Base64.encode(reply));
//if (_log.shouldLog(Log.DEBUG))
// _log.debug("Full reply record for slot " + ourSlot + "/" + ourId + "/" + nextId + "/" + req.readReplyMessageId()
// + ": " + Base64.encode(reply));
break;
}
}
@ -579,7 +601,7 @@ class BuildHandler {
_log.debug("Receive tunnel build message " + reqId + " from "
+ (from != null ? from.calculateHash().toBase64() : fromHash != null ? fromHash.toBase64() : "tunnels")
+ ", waiting ids: " + ids + ", found matching tunnel? " + (cfg != null),
new Exception("source"));
null);//new Exception("source"));
if (cfg != null) {
BuildEndMessageState state = new BuildEndMessageState(cfg, receivedMessage, from, fromHash);
if (HANDLE_REPLIES_INLINE) {

View File

@ -92,7 +92,9 @@ class BuildRequestor {
return;
}
long beforeCreate = System.currentTimeMillis();
TunnelBuildMessage msg = createTunnelBuildMessage(ctx, pool, cfg, pairedTunnel, exec);
long createTime = System.currentTimeMillis()-beforeCreate;
if (msg == null) {
if (log.shouldLog(Log.ERROR))
log.error("Tunnel build failed, as we couldn't create the tunnel build message for " + cfg);
@ -102,9 +104,10 @@ class BuildRequestor {
cfg.setPairedTunnel(pairedTunnel);
long beforeDispatch = System.currentTimeMillis();
if (cfg.isInbound()) {
if (log.shouldLog(Log.DEBUG))
log.debug("Sending the tunnel build request out the tunnel " + pairedTunnel + " to "
log.debug("Sending the tunnel build request " + msg.getUniqueId() + " out the tunnel " + pairedTunnel + " to "
+ cfg.getPeer(0).toBase64() + " for " + cfg + " waiting for the reply of "
+ cfg.getReplyMessageId());
// send it out a tunnel targetting the first hop
@ -129,6 +132,9 @@ class BuildRequestor {
outMsg.setTarget(peer);
ctx.outNetMessagePool().add(outMsg);
}
if (log.shouldLog(Log.DEBUG))
log.debug("Tunnel build message " + msg.getUniqueId() + " created in " + createTime
+ "ms and dispatched in " + (System.currentTimeMillis()-beforeDispatch));
}
private static TunnelBuildMessage createTunnelBuildMessage(RouterContext ctx, TunnelPool pool, PooledTunnelCreatorConfig cfg, TunnelInfo pairedTunnel, BuildExecutor exec) {

View File

@ -510,19 +510,22 @@ public class TunnelPoolManager implements TunnelManagerFacade {
processedOut += info.getProcessedMessagesCount();
}
out.write("</table>\n");
List pending = in.listPending();
for (int i = 0; i < pending.size(); i++) {
TunnelInfo info = (TunnelInfo)pending.get(i);
out.write("In progress: <code>" + info.toString() + "</code><br />\n");
if (in != null) {
List pending = in.listPending();
for (int i = 0; i < pending.size(); i++) {
TunnelInfo info = (TunnelInfo)pending.get(i);
out.write("In progress: <code>" + info.toString() + "</code><br />\n");
}
live += pending.size();
}
live += pending.size();
pending = outPool.listPending();
for (int i = 0; i < pending.size(); i++) {
TunnelInfo info = (TunnelInfo)pending.get(i);
out.write("In progress: <code>" + info.toString() + "</code><br />\n");
if (outPool != null) {
List pending = outPool.listPending();
for (int i = 0; i < pending.size(); i++) {
TunnelInfo info = (TunnelInfo)pending.get(i);
out.write("In progress: <code>" + info.toString() + "</code><br />\n");
}
live += pending.size();
}
live += pending.size();
if (live <= 0)
out.write("<b>No tunnels, waiting for the grace period to end</b><br />\n");
out.write("Lifetime bandwidth usage: " + processedIn + "KB in, " + processedOut + "KB out<br />");