forked from I2P_Developers/i2p.i2p
propagate from branch 'i2p.i2p.zzz.test4' (head 592b7d2b980e8cba19167fa064f25251296ed8bb)
to branch 'i2p.i2p' (head 0ba672eaca7076092389d2277dba231fdd34423b)
This commit is contained in:
@ -43,13 +43,13 @@ import java.io.OutputStream;
|
||||
import java.net.InetAddress;
|
||||
import java.net.UnknownHostException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Properties;
|
||||
import java.util.Set;
|
||||
import java.util.StringTokenizer;
|
||||
import java.util.concurrent.CopyOnWriteArraySet;
|
||||
|
||||
import net.i2p.I2PAppContext;
|
||||
import net.i2p.I2PException;
|
||||
@ -99,7 +99,7 @@ public class I2PTunnel implements Logging, EventDispatcher {
|
||||
private final List tasks = new ArrayList();
|
||||
private int next_task_id = 1;
|
||||
|
||||
private final Set listeners = new HashSet();
|
||||
private final Set listeners = new CopyOnWriteArraySet();
|
||||
|
||||
public static void main(String[] args) throws IOException {
|
||||
new I2PTunnel(args);
|
||||
@ -118,8 +118,8 @@ public class I2PTunnel implements Logging, EventDispatcher {
|
||||
_tunnelId = ++__tunnelId;
|
||||
_log = _context.logManager().getLog(I2PTunnel.class);
|
||||
_event = new EventDispatcherImpl();
|
||||
Properties p = new Properties();
|
||||
p.putAll(System.getProperties());
|
||||
// as of 0.8.4, include context properties
|
||||
Properties p = _context.getProperties();
|
||||
_clientOptions = p;
|
||||
_sessions = new ArrayList(1);
|
||||
|
||||
@ -1626,16 +1626,12 @@ public class I2PTunnel implements Logging, EventDispatcher {
|
||||
|
||||
public void addConnectionEventListener(ConnectionEventListener lsnr) {
|
||||
if (lsnr == null) return;
|
||||
synchronized (listeners) {
|
||||
listeners.add(lsnr);
|
||||
}
|
||||
listeners.add(lsnr);
|
||||
}
|
||||
|
||||
public void removeConnectionEventListener(ConnectionEventListener lsnr) {
|
||||
if (lsnr == null) return;
|
||||
synchronized (listeners) {
|
||||
listeners.remove(lsnr);
|
||||
}
|
||||
listeners.remove(lsnr);
|
||||
}
|
||||
|
||||
private String getPrefix() { return "[" + _tunnelId + "]: "; }
|
||||
@ -1649,12 +1645,10 @@ public class I2PTunnel implements Logging, EventDispatcher {
|
||||
*/
|
||||
void routerDisconnected() {
|
||||
_log.error(getPrefix() + "Router disconnected - firing notification events");
|
||||
synchronized (listeners) {
|
||||
for (Iterator iter = listeners.iterator(); iter.hasNext();) {
|
||||
ConnectionEventListener lsnr = (ConnectionEventListener) iter.next();
|
||||
if (lsnr != null) lsnr.routerDisconnected();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -310,7 +310,7 @@ public class TunnelControllerGroup {
|
||||
synchronized (_sessions) {
|
||||
Set<TunnelController> owners = _sessions.get(session);
|
||||
if (owners == null) {
|
||||
owners = new HashSet(1);
|
||||
owners = new HashSet(2);
|
||||
_sessions.put(session, owners);
|
||||
}
|
||||
owners.add(controller);
|
||||
|
@ -4,6 +4,7 @@ package net.i2p.client.streaming;
|
||||
* Like a StringBuffer, but for bytes. This class is not internally synchronized,
|
||||
* so care should be taken when using in a multithreaded environment.
|
||||
*
|
||||
* @deprecated Only used by deprecated I2PSocketImpl
|
||||
*/
|
||||
class ByteCollector {
|
||||
byte[] contents;
|
||||
@ -294,4 +295,4 @@ class ByteCollector {
|
||||
size = 0;
|
||||
return bb;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -4,9 +4,12 @@
|
||||
*/
|
||||
package net.i2p.client.streaming;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InterruptedIOException;
|
||||
import java.net.ConnectException;
|
||||
import java.net.NoRouteToHostException;
|
||||
import java.net.ServerSocket;
|
||||
import java.net.Socket;
|
||||
import java.util.Properties;
|
||||
import java.util.Set;
|
||||
|
||||
@ -84,7 +87,7 @@ public interface I2PSocketManager {
|
||||
*
|
||||
* @return a set of currently connected I2PSockets
|
||||
*/
|
||||
public Set listSockets();
|
||||
public Set<I2PSocket> listSockets();
|
||||
|
||||
/**
|
||||
* Ping the specified peer, returning true if they replied to the ping within
|
||||
@ -107,4 +110,25 @@ public interface I2PSocketManager {
|
||||
public static interface DisconnectListener {
|
||||
public void sessionDisconnected();
|
||||
}
|
||||
|
||||
/**
|
||||
* Like getServerSocket but returns a real ServerSocket for easier porting of apps.
|
||||
* @since 0.8.4
|
||||
*/
|
||||
public ServerSocket getStandardServerSocket() throws IOException;
|
||||
|
||||
/**
|
||||
* Like connect() but returns a real Socket, and throws only IOE,
|
||||
* for easier porting of apps.
|
||||
* @since 0.8.4
|
||||
*/
|
||||
public Socket connectToSocket(Destination peer) throws IOException;
|
||||
|
||||
/**
|
||||
* Like connect() but returns a real Socket, and throws only IOE,
|
||||
* for easier porting of apps.
|
||||
* @param timeout ms if > 0, forces blocking (disables connectDelay)
|
||||
* @since 0.8.4
|
||||
*/
|
||||
public Socket connectToSocket(Destination peer, int timeout) throws IOException;
|
||||
}
|
||||
|
@ -10,6 +10,8 @@ import java.io.InterruptedIOException;
|
||||
import java.io.UnsupportedEncodingException;
|
||||
import java.net.ConnectException;
|
||||
import java.net.NoRouteToHostException;
|
||||
import java.net.ServerSocket;
|
||||
import java.net.Socket;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
@ -461,6 +463,14 @@ class I2PSocketManagerImpl implements I2PSocketManager, I2PSessionListener {
|
||||
return _serverSocket;
|
||||
}
|
||||
|
||||
/**
|
||||
* @throws UnsupportedOperationException
|
||||
* @since 0.8.4
|
||||
*/
|
||||
public ServerSocket getStandardServerSocket() {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new connected socket (block until the socket is created)
|
||||
*
|
||||
@ -601,6 +611,22 @@ class I2PSocketManagerImpl implements I2PSocketManager, I2PSessionListener {
|
||||
return connect(peer, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* @throws UnsupportedOperationException
|
||||
* @since 0.8.4
|
||||
*/
|
||||
public Socket connectToSocket(Destination peer) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
/**
|
||||
* @throws UnsupportedOperationException
|
||||
* @since 0.8.4
|
||||
*/
|
||||
public Socket connectToSocket(Destination peer, int timeout) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
/**
|
||||
* Destroy the socket manager, freeing all the associated resources. This
|
||||
* method will block untill all the managed sockets are closed.
|
||||
@ -660,7 +686,7 @@ class I2PSocketManagerImpl implements I2PSocketManager, I2PSessionListener {
|
||||
* Retrieve a set of currently connected I2PSockets, either initiated locally or remotely.
|
||||
*
|
||||
*/
|
||||
public Set listSockets() {
|
||||
public Set<I2PSocket> listSockets() {
|
||||
Set<I2PSocket> sockets = new HashSet<I2PSocket>(8);
|
||||
synchronized (lock) {
|
||||
sockets.addAll(_inSockets.values());
|
||||
|
@ -100,7 +100,7 @@ public class ConfigNetHelper extends HelperBase {
|
||||
}
|
||||
|
||||
public String getTcpAutoIPChecked(int mode) {
|
||||
boolean enabled = TransportManager.enableNTCP(_context);
|
||||
boolean enabled = TransportManager.isNTCPEnabled(_context);
|
||||
String hostname = _context.getProperty(PROP_I2NP_NTCP_HOSTNAME);
|
||||
boolean specified = hostname != null && hostname.length() > 0;
|
||||
String auto = _context.getProperty(PROP_I2NP_NTCP_AUTO_IP, "false");
|
||||
|
@ -25,6 +25,7 @@ import org.mortbay.http.DigestAuthenticator;
|
||||
import org.mortbay.http.HashUserRealm;
|
||||
import org.mortbay.http.NCSARequestLog;
|
||||
import org.mortbay.http.SecurityConstraint;
|
||||
import org.mortbay.http.SocketListener;
|
||||
import org.mortbay.http.SslListener;
|
||||
import org.mortbay.http.handler.SecurityHandler;
|
||||
import org.mortbay.jetty.Server;
|
||||
@ -185,11 +186,21 @@ public class RouterConsoleRunner {
|
||||
while (tok.hasMoreTokens()) {
|
||||
String host = tok.nextToken().trim();
|
||||
try {
|
||||
if (host.indexOf(":") >= 0) // IPV6 - requires patched Jetty 5
|
||||
_server.addListener('[' + host + "]:" + _listenPort);
|
||||
else
|
||||
_server.addListener(host + ':' + _listenPort);
|
||||
//if (host.indexOf(":") >= 0) // IPV6 - requires patched Jetty 5
|
||||
// _server.addListener('[' + host + "]:" + _listenPort);
|
||||
//else
|
||||
// _server.addListener(host + ':' + _listenPort);
|
||||
Integer lport = Integer.parseInt(_listenPort);
|
||||
InetAddrPort iap = new InetAddrPort(host, lport);
|
||||
SocketListener lsnr = new SocketListener(iap);
|
||||
lsnr.setMinThreads(1); // default 2
|
||||
lsnr.setMaxThreads(24); // default 256
|
||||
lsnr.setMaxIdleTimeMs(90*1000); // default 10 sec
|
||||
lsnr.setName("ConsoleSocket"); // all with same name will use the same thread pool
|
||||
_server.addListener(lsnr);
|
||||
boundAddresses++;
|
||||
} catch (NumberFormatException nfe) {
|
||||
System.err.println("Unable to bind routerconsole to " + host + " port " + _listenPort + ' ' + nfe);
|
||||
} catch (IOException ioe) { // this doesn't seem to work, exceptions don't happen until start() below
|
||||
System.err.println("Unable to bind routerconsole to " + host + " port " + _listenPort + ' ' + ioe);
|
||||
}
|
||||
@ -221,6 +232,10 @@ public class RouterConsoleRunner {
|
||||
ssll.setPassword(ctx.getProperty(PROP_KEYSTORE_PASSWORD, DEFAULT_KEYSTORE_PASSWORD));
|
||||
// the X.509 cert password (if not present, verifyKeyStore() returned false)
|
||||
ssll.setKeyPassword(ctx.getProperty(PROP_KEY_PASSWORD, "thisWontWork"));
|
||||
ssll.setMinThreads(1); // default 2
|
||||
ssll.setMaxThreads(24); // default 256
|
||||
ssll.setMaxIdleTimeMs(90*1000); // default 10 sec
|
||||
ssll.setName("ConsoleSocket"); // all with same name will use the same thread pool
|
||||
_server.addListener(ssll);
|
||||
boundAddresses++;
|
||||
} catch (Exception e) { // probably no exceptions at this point
|
||||
|
@ -40,7 +40,7 @@ class ConnectionManager {
|
||||
private int _maxConcurrentStreams;
|
||||
private ConnectionOptions _defaultOptions;
|
||||
private volatile int _numWaiting;
|
||||
private long SoTimeout;
|
||||
private long _soTimeout;
|
||||
private ConnThrottler _minuteThrottler;
|
||||
private ConnThrottler _hourThrottler;
|
||||
private ConnThrottler _dayThrottler;
|
||||
@ -64,7 +64,7 @@ class ConnectionManager {
|
||||
_allowIncoming = false;
|
||||
_numWaiting = 0;
|
||||
/** Socket timeout for accept() */
|
||||
SoTimeout = -1;
|
||||
_soTimeout = -1;
|
||||
|
||||
_context.statManager().createRateStat("stream.con.lifetimeMessagesSent", "How many messages do we send on a stream?", "Stream", new long[] { 60*60*1000, 24*60*60*1000 });
|
||||
_context.statManager().createRateStat("stream.con.lifetimeMessagesReceived", "How many messages do we receive on a stream?", "Stream", new long[] { 60*60*1000, 24*60*60*1000 });
|
||||
@ -97,16 +97,16 @@ class ConnectionManager {
|
||||
* Set the socket accept() timeout.
|
||||
* @param x
|
||||
*/
|
||||
public void MsetSoTimeout(long x) {
|
||||
SoTimeout = x;
|
||||
public void setSoTimeout(long x) {
|
||||
_soTimeout = x;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the socket accept() timeout.
|
||||
* @return accept timeout in ms.
|
||||
*/
|
||||
public long MgetSoTimeout() {
|
||||
return SoTimeout;
|
||||
public long getSoTimeout() {
|
||||
return _soTimeout;
|
||||
}
|
||||
|
||||
public void setAllowIncomingConnections(boolean allow) {
|
||||
|
@ -26,11 +26,11 @@ class I2PServerSocketFull implements I2PServerSocket {
|
||||
}
|
||||
|
||||
public long getSoTimeout() {
|
||||
return _socketManager.getConnectionManager().MgetSoTimeout();
|
||||
return _socketManager.getConnectionManager().getSoTimeout();
|
||||
}
|
||||
|
||||
public void setSoTimeout(long x) {
|
||||
_socketManager.getConnectionManager().MsetSoTimeout(x);
|
||||
_socketManager.getConnectionManager().setSoTimeout(x);
|
||||
}
|
||||
/**
|
||||
* Close the connection.
|
||||
|
@ -46,6 +46,10 @@ class I2PSocketFull implements I2PSocket {
|
||||
|
||||
Connection getConnection() { return _connection; }
|
||||
|
||||
/**
|
||||
* Warning, may return null instead of throwing IOE,
|
||||
* which is not what the interface says.
|
||||
*/
|
||||
public InputStream getInputStream() {
|
||||
Connection c = _connection;
|
||||
if (c != null)
|
||||
@ -62,6 +66,10 @@ class I2PSocketFull implements I2PSocket {
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Warning, may return null instead of throwing IOE,
|
||||
* which is not what the interface says.
|
||||
*/
|
||||
public OutputStream getOutputStream() throws IOException {
|
||||
Connection c = _connection;
|
||||
if (c != null)
|
||||
|
@ -1,6 +1,9 @@
|
||||
package net.i2p.client.streaming;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.NoRouteToHostException;
|
||||
import java.net.ServerSocket;
|
||||
import java.net.Socket;
|
||||
import java.net.SocketTimeoutException;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
@ -30,6 +33,7 @@ public class I2PSocketManagerFull implements I2PSocketManager {
|
||||
private Log _log;
|
||||
private I2PSession _session;
|
||||
private I2PServerSocketFull _serverSocket;
|
||||
private StandardServerSocket _realServerSocket;
|
||||
private ConnectionOptions _defaultOptions;
|
||||
private long _acceptTimeout;
|
||||
private String _name;
|
||||
@ -44,8 +48,6 @@ public class I2PSocketManagerFull implements I2PSocketManager {
|
||||
private static final long ACCEPT_TIMEOUT_DEFAULT = 5*1000;
|
||||
|
||||
public I2PSocketManagerFull() {
|
||||
_context = null;
|
||||
_session = null;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -120,7 +122,7 @@ public class I2PSocketManagerFull implements I2PSocketManager {
|
||||
*/
|
||||
public I2PSocket receiveSocket() throws I2PException, SocketTimeoutException {
|
||||
verifySession();
|
||||
Connection con = _connectionManager.getConnectionHandler().accept(_connectionManager.MgetSoTimeout());
|
||||
Connection con = _connectionManager.getConnectionHandler().accept(_connectionManager.getSoTimeout());
|
||||
if(_log.shouldLog(Log.DEBUG)) {
|
||||
_log.debug("receiveSocket() called: " + con);
|
||||
}
|
||||
@ -129,7 +131,7 @@ public class I2PSocketManagerFull implements I2PSocketManager {
|
||||
con.setSocket(sock);
|
||||
return sock;
|
||||
} else {
|
||||
if(_connectionManager.MgetSoTimeout() == -1) {
|
||||
if(_connectionManager.getSoTimeout() == -1) {
|
||||
return null;
|
||||
}
|
||||
throw new SocketTimeoutException("I2PSocket timed out");
|
||||
@ -171,6 +173,17 @@ public class I2PSocketManagerFull implements I2PSocketManager {
|
||||
return _serverSocket;
|
||||
}
|
||||
|
||||
/**
|
||||
* Like getServerSocket but returns a real ServerSocket for easier porting of apps.
|
||||
* @since 0.8.4
|
||||
*/
|
||||
public synchronized ServerSocket getStandardServerSocket() throws IOException {
|
||||
if (_realServerSocket == null)
|
||||
_realServerSocket = new StandardServerSocket(_serverSocket);
|
||||
_connectionManager.setAllowIncomingConnections(true);
|
||||
return _realServerSocket;
|
||||
}
|
||||
|
||||
private void verifySession() throws I2PException {
|
||||
if (!_connectionManager.getSession().isClosed())
|
||||
return;
|
||||
@ -185,7 +198,7 @@ public class I2PSocketManagerFull implements I2PSocketManager {
|
||||
* this data will be bundled in the SYN packet.
|
||||
*
|
||||
* @param peer Destination to connect to
|
||||
* @param options I2P socket options to be used for connecting
|
||||
* @param options I2P socket options to be used for connecting, may be null
|
||||
*
|
||||
* @return I2PSocket if successful
|
||||
* @throws NoRouteToHostException if the peer is not found or not reachable
|
||||
@ -235,6 +248,45 @@ public class I2PSocketManagerFull implements I2PSocketManager {
|
||||
return connect(peer, _defaultOptions);
|
||||
}
|
||||
|
||||
/**
|
||||
* Like connect() but returns a real Socket, and throws only IOE,
|
||||
* for easier porting of apps.
|
||||
* @since 0.8.4
|
||||
*/
|
||||
public Socket connectToSocket(Destination peer) throws IOException {
|
||||
return connectToSocket(peer, _defaultOptions);
|
||||
}
|
||||
|
||||
/**
|
||||
* Like connect() but returns a real Socket, and throws only IOE,
|
||||
* for easier porting of apps.
|
||||
* @param timeout ms if > 0, forces blocking (disables connectDelay)
|
||||
* @since 0.8.4
|
||||
*/
|
||||
public Socket connectToSocket(Destination peer, int timeout) throws IOException {
|
||||
ConnectionOptions opts = new ConnectionOptions(_defaultOptions);
|
||||
opts.setConnectTimeout(timeout);
|
||||
if (timeout > 0)
|
||||
opts.setConnectDelay(-1);
|
||||
return connectToSocket(peer, opts);
|
||||
}
|
||||
|
||||
/**
|
||||
* Like connect() but returns a real Socket, and throws only IOE,
|
||||
* for easier porting of apps.
|
||||
* @param options may be null
|
||||
* @since 0.8.4
|
||||
*/
|
||||
private Socket connectToSocket(Destination peer, I2PSocketOptions options) throws IOException {
|
||||
try {
|
||||
I2PSocket sock = connect(peer, options);
|
||||
return new StandardSocket(sock);
|
||||
} catch (I2PException i2pe) {
|
||||
// fixme in 1.6 change to cause
|
||||
throw new IOException(i2pe.toString());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Destroy the socket manager, freeing all the associated resources. This
|
||||
* method will block untill all the managed sockets are closed.
|
||||
@ -259,11 +311,10 @@ public class I2PSocketManagerFull implements I2PSocketManager {
|
||||
*
|
||||
* @return set of currently connected I2PSockets
|
||||
*/
|
||||
public Set listSockets() {
|
||||
Set connections = _connectionManager.listConnections();
|
||||
Set rv = new HashSet(connections.size());
|
||||
for (Iterator iter = connections.iterator(); iter.hasNext(); ) {
|
||||
Connection con = (Connection)iter.next();
|
||||
public Set<I2PSocket> listSockets() {
|
||||
Set<Connection> connections = _connectionManager.listConnections();
|
||||
Set<I2PSocket> rv = new HashSet(connections.size());
|
||||
for (Connection con : connections) {
|
||||
if (con.getSocket() != null)
|
||||
rv.add(con.getSocket());
|
||||
}
|
||||
|
@ -2,13 +2,13 @@ package net.i2p.client.streaming;
|
||||
|
||||
import java.util.Iterator;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.CopyOnWriteArraySet;
|
||||
|
||||
import net.i2p.I2PAppContext;
|
||||
import net.i2p.client.I2PSession;
|
||||
import net.i2p.client.I2PSessionException;
|
||||
import net.i2p.client.I2PSessionListener;
|
||||
import net.i2p.util.Log;
|
||||
import net.i2p.util.ConcurrentHashSet;
|
||||
|
||||
/**
|
||||
* Receive raw information from the I2PSession and turn it into
|
||||
@ -24,7 +24,7 @@ class MessageHandler implements I2PSessionListener {
|
||||
public MessageHandler(I2PAppContext ctx, ConnectionManager mgr) {
|
||||
_manager = mgr;
|
||||
_context = ctx;
|
||||
_listeners = new ConcurrentHashSet(1);
|
||||
_listeners = new CopyOnWriteArraySet();
|
||||
_log = ctx.logManager().getLog(MessageHandler.class);
|
||||
_context.statManager().createRateStat("stream.packetReceiveFailure", "When do we fail to decrypt or otherwise receive a packet sent to us?", "Stream", new long[] { 60*60*1000, 24*60*60*1000 });
|
||||
}
|
||||
|
@ -0,0 +1,170 @@
|
||||
package net.i2p.client.streaming;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.InetAddress;
|
||||
import java.net.ServerSocket;
|
||||
import java.net.Socket;
|
||||
import java.net.SocketAddress;
|
||||
import java.net.SocketException;
|
||||
import java.nio.channels.ServerSocketChannel;
|
||||
|
||||
import net.i2p.I2PException;
|
||||
|
||||
/**
|
||||
* Bridge to I2PServerSocket.
|
||||
*
|
||||
* This extends ServerSocket to make porting apps easier.
|
||||
* accept() returns a real Socket (a StandardSocket).
|
||||
* accept() throws IOExceptions like ServerSockets do, rather than returning
|
||||
* null or throwing I2PExceptions.
|
||||
*
|
||||
* StandardServerSockets are always bound.
|
||||
* You may not create an unbound StandardServerSocket.
|
||||
* Create this through the SocketManager.
|
||||
*
|
||||
* @author zzz
|
||||
* @since 0.8.4
|
||||
*/
|
||||
class StandardServerSocket extends ServerSocket {
|
||||
private final I2PServerSocketFull _socket;
|
||||
|
||||
/**
|
||||
* Doesn't really throw IOE but super() does
|
||||
*/
|
||||
StandardServerSocket(I2PServerSocketFull socket) throws IOException {
|
||||
_socket = socket;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Socket accept() throws IOException {
|
||||
try {
|
||||
I2PSocket sock = _socket.accept();
|
||||
if (sock == null)
|
||||
throw new IOException("No socket");
|
||||
return new StandardSocket(sock);
|
||||
} catch (I2PException i2pe) {
|
||||
// fixme in 1.6 change to cause
|
||||
throw new IOException(i2pe.toString());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @throws UnsupportedOperationException always
|
||||
*/
|
||||
@Override
|
||||
public void bind(SocketAddress endpoint) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
/**
|
||||
* @throws UnsupportedOperationException always
|
||||
*/
|
||||
@Override
|
||||
public void bind(SocketAddress endpoint, int backlog) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
if (isClosed())
|
||||
throw new IOException("Already closed");
|
||||
_socket.close();
|
||||
}
|
||||
|
||||
/**
|
||||
* @return null always
|
||||
*/
|
||||
@Override
|
||||
public ServerSocketChannel getChannel() {
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return null always
|
||||
*/
|
||||
@Override
|
||||
public InetAddress getInetAddress() {
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return -1 always
|
||||
*/
|
||||
@Override
|
||||
public int getLocalPort() {
|
||||
return -1;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return null always
|
||||
*/
|
||||
@Override
|
||||
public SocketAddress getLocalSocketAddress() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getReceiveBufferSize() {
|
||||
ConnectionOptions opts = (ConnectionOptions) ((I2PSocketManagerFull)_socket.getManager()).getDefaultOptions();
|
||||
if (opts == null)
|
||||
return 64*1024;
|
||||
return opts.getInboundBufferSize();
|
||||
}
|
||||
|
||||
/**
|
||||
* @return false always
|
||||
*/
|
||||
@Override
|
||||
public boolean getReuseAddress() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getSoTimeout() {
|
||||
return (int) _socket.getSoTimeout();
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true always
|
||||
*/
|
||||
@Override
|
||||
public boolean isBound() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isClosed() {
|
||||
return ((I2PSocketManagerFull)_socket.getManager()).getConnectionManager().getAllowIncomingConnections();
|
||||
}
|
||||
|
||||
/**
|
||||
* Does nothing.
|
||||
*/
|
||||
@Override
|
||||
public void setPerformancePreferences(int connectionTime, int latency, int bandwidth) {
|
||||
}
|
||||
|
||||
/**
|
||||
* Does nothing.
|
||||
*/
|
||||
@Override
|
||||
public void setReceiveBufferSize(int size) {
|
||||
}
|
||||
|
||||
/**
|
||||
* Does nothing.
|
||||
*/
|
||||
@Override
|
||||
public void setReuseAddress(boolean on) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setSoTimeout(int timeout) throws SocketException {
|
||||
_socket.setSoTimeout(timeout);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return _socket.toString();
|
||||
}
|
||||
}
|
@ -0,0 +1,341 @@
|
||||
package net.i2p.client.streaming;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.net.InetAddress;
|
||||
import java.net.Socket;
|
||||
import java.net.SocketAddress;
|
||||
import java.net.SocketException;
|
||||
import java.nio.channels.SocketChannel;
|
||||
|
||||
import net.i2p.I2PException;
|
||||
|
||||
/**
|
||||
* Bridge to I2PSocket.
|
||||
*
|
||||
* This extends Socket to make porting apps easier.
|
||||
* Methods throw IOExceptions like Sockets do, rather than returning
|
||||
* null for some methods.
|
||||
*
|
||||
* StandardSockets are always bound, and always start out connected
|
||||
* (unless connectDelay is > 0).
|
||||
* You may not create an unbound StandardSocket.
|
||||
* Create this through the SocketManager.
|
||||
*
|
||||
* Todo: Make public and add getPeerDestination() ?
|
||||
*
|
||||
* @author zzz
|
||||
* @since 0.8.4
|
||||
*/
|
||||
class StandardSocket extends Socket {
|
||||
private final I2PSocket _socket;
|
||||
|
||||
StandardSocket(I2PSocket socket) {
|
||||
_socket = socket;
|
||||
}
|
||||
|
||||
/**
|
||||
* @throws UnsupportedOperationException always
|
||||
*/
|
||||
@Override
|
||||
public void bind(SocketAddress bindpoint) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
if (_socket.isClosed())
|
||||
throw new IOException("Already closed");
|
||||
_socket.close();
|
||||
}
|
||||
|
||||
/**
|
||||
* @throws UnsupportedOperationException always
|
||||
*/
|
||||
@Override
|
||||
public void connect(SocketAddress endpoint) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
/**
|
||||
* @throws UnsupportedOperationException always
|
||||
*/
|
||||
@Override
|
||||
public void connect(SocketAddress endpoint, int timeout) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
/**
|
||||
* @return null always
|
||||
*/
|
||||
@Override
|
||||
public SocketChannel getChannel() {
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return null always
|
||||
*/
|
||||
@Override
|
||||
public InetAddress getInetAddress() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public InputStream getInputStream() throws IOException {
|
||||
InputStream rv = _socket.getInputStream();
|
||||
if (rv != null)
|
||||
return rv;
|
||||
throw new IOException("No stream");
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean getKeepAlive() {
|
||||
ConnectionOptions opts = (ConnectionOptions) _socket.getOptions();
|
||||
if (opts == null)
|
||||
return false;
|
||||
return opts.getInactivityAction() == ConnectionOptions.INACTIVITY_ACTION_SEND;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return null always
|
||||
*/
|
||||
@Override
|
||||
public InetAddress getLocalAddress() {
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return -1 always
|
||||
*/
|
||||
@Override
|
||||
public int getLocalPort() {
|
||||
return -1;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return null always
|
||||
*/
|
||||
@Override
|
||||
public SocketAddress getLocalSocketAddress() {
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return false always
|
||||
*/
|
||||
@Override
|
||||
public boolean getOOBInline() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public OutputStream getOutputStream() throws IOException {
|
||||
OutputStream rv = _socket.getOutputStream();
|
||||
if (rv != null)
|
||||
return rv;
|
||||
throw new IOException("No stream");
|
||||
}
|
||||
|
||||
/**
|
||||
* @return 0 always
|
||||
*/
|
||||
@Override
|
||||
public int getPort() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getReceiveBufferSize() {
|
||||
ConnectionOptions opts = (ConnectionOptions) _socket.getOptions();
|
||||
if (opts == null)
|
||||
return 64*1024;
|
||||
return opts.getInboundBufferSize();
|
||||
}
|
||||
|
||||
/**
|
||||
* @throws UnsupportedOperationException always
|
||||
*/
|
||||
@Override
|
||||
public SocketAddress getRemoteSocketAddress() {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
/**
|
||||
* @return false always
|
||||
*/
|
||||
@Override
|
||||
public boolean getReuseAddress() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getSendBufferSize() {
|
||||
ConnectionOptions opts = (ConnectionOptions) _socket.getOptions();
|
||||
if (opts == null)
|
||||
return 64*1024;
|
||||
return opts.getInboundBufferSize();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getSoLinger() {
|
||||
I2PSocketOptions opts = _socket.getOptions();
|
||||
if (opts == null)
|
||||
return -1;
|
||||
return -1; // fixme really?
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getSoTimeout() {
|
||||
I2PSocketOptions opts = _socket.getOptions();
|
||||
if (opts == null)
|
||||
return 0;
|
||||
return (int) opts.getReadTimeout();
|
||||
}
|
||||
|
||||
/**
|
||||
* @return false always
|
||||
*/
|
||||
@Override
|
||||
public boolean getTcpNoDelay() {
|
||||
// No option yet. See ConnectionDataReceiver
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return 0 always
|
||||
*/
|
||||
@Override
|
||||
public int getTrafficClass() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true always
|
||||
*/
|
||||
@Override
|
||||
public boolean isBound() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isClosed() {
|
||||
return _socket.isClosed();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isConnected() {
|
||||
return !_socket.isClosed();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isInputShutdown() {
|
||||
return _socket.isClosed();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isOutputShutdown() {
|
||||
return _socket.isClosed();
|
||||
}
|
||||
|
||||
/**
|
||||
* @throws UnsupportedOperationException always
|
||||
*/
|
||||
@Override
|
||||
public void sendUrgentData(int data) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setKeepAlive(boolean on) {
|
||||
ConnectionOptions opts = (ConnectionOptions) _socket.getOptions();
|
||||
if (opts == null)
|
||||
return;
|
||||
if (on)
|
||||
opts.setInactivityAction(ConnectionOptions.INACTIVITY_ACTION_SEND);
|
||||
else
|
||||
opts.setInactivityAction(ConnectionOptions.INACTIVITY_ACTION_NOOP); // DISCONNECT?
|
||||
}
|
||||
|
||||
/**
|
||||
* @throws UnsupportedOperationException if on is true
|
||||
*/
|
||||
@Override
|
||||
public void setOOBInline(boolean on) {
|
||||
if (on)
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
/**
|
||||
* Does nothing.
|
||||
*/
|
||||
@Override
|
||||
public void setPerformancePreferences(int connectionTime, int latency, int bandwidth) {
|
||||
}
|
||||
|
||||
/**
|
||||
* Does nothing.
|
||||
*/
|
||||
@Override
|
||||
public void setReceiveBufferSize(int size) {
|
||||
}
|
||||
|
||||
/**
|
||||
* Does nothing.
|
||||
*/
|
||||
@Override
|
||||
public void setReuseAddress(boolean on) {
|
||||
}
|
||||
|
||||
/**
|
||||
* Does nothing.
|
||||
*/
|
||||
@Override
|
||||
public void setSendBufferSize(int size) {
|
||||
}
|
||||
|
||||
/**
|
||||
* Does nothing.
|
||||
*/
|
||||
@Override
|
||||
public void setSoLinger(boolean on, int linger) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setSoTimeout(int timeout) throws SocketException {
|
||||
I2PSocketOptions opts = _socket.getOptions();
|
||||
if (opts == null)
|
||||
throw new SocketException("No options");
|
||||
opts.setReadTimeout(timeout);
|
||||
}
|
||||
|
||||
/**
|
||||
* Does nothing.
|
||||
*/
|
||||
@Override
|
||||
public void setTcpNoDelay(boolean on) {
|
||||
}
|
||||
|
||||
/**
|
||||
* Does nothing.
|
||||
*/
|
||||
@Override
|
||||
public void setTrafficClass(int tc) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void shutdownInput() throws IOException {
|
||||
close();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void shutdownOutput() throws IOException {
|
||||
close();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return _socket.toString();
|
||||
}
|
||||
}
|
@ -65,7 +65,7 @@ import net.i2p.util.I2PProperties.I2PPropertyCallback;
|
||||
*/
|
||||
public class I2PAppContext {
|
||||
/** the context that components without explicit root are bound */
|
||||
protected static I2PAppContext _globalAppContext;
|
||||
protected static volatile I2PAppContext _globalAppContext;
|
||||
|
||||
protected I2PProperties _overrideProps;
|
||||
|
||||
@ -119,7 +119,8 @@ public class I2PAppContext {
|
||||
*
|
||||
*/
|
||||
public static I2PAppContext getGlobalContext() {
|
||||
// skip the global lock
|
||||
// skip the global lock - _gAC must be volatile
|
||||
// http://www.cs.umd.edu/~pugh/java/memoryModel/DoubleCheckedLocking.html
|
||||
I2PAppContext rv = _globalAppContext;
|
||||
if (rv != null)
|
||||
return rv;
|
||||
@ -476,6 +477,9 @@ public class I2PAppContext {
|
||||
* provided during the context construction, as well as the ones included in
|
||||
* System.getProperties.
|
||||
*
|
||||
* WARNING - not overridden in RouterContext, doesn't contain router config settings,
|
||||
* use getProperties() instead.
|
||||
*
|
||||
* @return set of Strings containing the names of defined system properties
|
||||
*/
|
||||
public Set getPropertyNames() {
|
||||
@ -485,6 +489,21 @@ public class I2PAppContext {
|
||||
return names;
|
||||
}
|
||||
|
||||
/**
|
||||
* Access the configuration attributes of this context, listing the properties
|
||||
* provided during the context construction, as well as the ones included in
|
||||
* System.getProperties.
|
||||
*
|
||||
* @return new Properties with system and context properties
|
||||
* @since 0.8.4
|
||||
*/
|
||||
public Properties getProperties() {
|
||||
Properties rv = new Properties();
|
||||
rv.putAll(System.getProperties());
|
||||
rv.putAll(_overrideProps);
|
||||
return rv;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a callback, which will fire upon changes in the property
|
||||
* given in the specific callback.
|
||||
@ -767,7 +786,7 @@ public class I2PAppContext {
|
||||
* enable simulators to play with clock skew among different instances.
|
||||
*
|
||||
*/
|
||||
public Clock clock() { // overridden in RouterContext
|
||||
public Clock clock() {
|
||||
if (!_clockInitialized)
|
||||
initializeClock();
|
||||
return _clock;
|
||||
|
@ -12,6 +12,8 @@ package net.i2p.client;
|
||||
import java.util.Date;
|
||||
import java.util.Properties;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.locks.ReentrantLock;
|
||||
|
||||
import net.i2p.I2PAppContext;
|
||||
import net.i2p.data.DataFormatException;
|
||||
@ -41,22 +43,51 @@ import net.i2p.util.Log;
|
||||
* @author jrandom
|
||||
*/
|
||||
class I2CPMessageProducer {
|
||||
private final static Log _log = new Log(I2CPMessageProducer.class);
|
||||
private final Log _log;
|
||||
private final I2PAppContext _context;
|
||||
private int _sendBps;
|
||||
private long _sendPeriodBytes;
|
||||
private long _sendPeriodBeginTime;
|
||||
private int _maxBytesPerSecond;
|
||||
private volatile int _sendPeriodBytes;
|
||||
private volatile long _sendPeriodBeginTime;
|
||||
private final ReentrantLock _lock;
|
||||
private static final String PROP_MAX_BW = "i2cp.outboundBytesPerSecond";
|
||||
/** see ConnectionOptions in streaming - MTU + streaming overhead + gzip overhead */
|
||||
private static final int TYP_SIZE = 1730 + 28 + 23;
|
||||
private static final int MIN_RATE = 2 * TYP_SIZE;
|
||||
|
||||
public I2CPMessageProducer(I2PAppContext context) {
|
||||
_context = context;
|
||||
context.statManager().createRateStat("client.sendBpsRaw", "How fast we pump out I2CP data messages", "ClientMessages", new long[] { 60*1000, 5*60*1000, 10*60*1000, 60*60*1000 });
|
||||
_log = context.logManager().getLog(I2CPMessageProducer.class);
|
||||
_lock = new ReentrantLock(true);
|
||||
context.statManager().createRateStat("client.sendThrottled", "Times waited for bandwidth", "ClientMessages", new long[] { 60*1000 });
|
||||
context.statManager().createRateStat("client.sendDropped", "Length of msg dropped waiting for bandwidth", "ClientMessages", new long[] { 60*1000 });
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the bandwidth setting
|
||||
* @since 0.8.4
|
||||
*/
|
||||
public void updateBandwidth(I2PSessionImpl session) {
|
||||
String max = session.getOptions().getProperty(PROP_MAX_BW);
|
||||
if (max != null) {
|
||||
try {
|
||||
int iMax = Integer.parseInt(max);
|
||||
if (iMax > 0)
|
||||
// round up to next higher TYP_SIZE for efficiency, then add some fudge for small messages
|
||||
_maxBytesPerSecond = 256 + Math.max(MIN_RATE, TYP_SIZE * ((iMax + TYP_SIZE - 1) / TYP_SIZE));
|
||||
else
|
||||
_maxBytesPerSecond = 0;
|
||||
} catch (NumberFormatException nfe) {}
|
||||
}
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Setting " + _maxBytesPerSecond + " BPS max");
|
||||
}
|
||||
|
||||
/**
|
||||
* Send all the messages that a client needs to send to a router to establish
|
||||
* a new session.
|
||||
*/
|
||||
public void connect(I2PSessionImpl session) throws I2PSessionException {
|
||||
updateBandwidth(session);
|
||||
CreateSessionMessage msg = new CreateSessionMessage();
|
||||
SessionConfig cfg = new SessionConfig(session.getMyDestination());
|
||||
cfg.setOptions(session.getOptions());
|
||||
@ -99,32 +130,135 @@ class I2CPMessageProducer {
|
||||
*/
|
||||
public void sendMessage(I2PSessionImpl session, Destination dest, long nonce, byte[] payload, SessionTag tag,
|
||||
SessionKey key, Set tags, SessionKey newKey, long expires) throws I2PSessionException {
|
||||
sendMessage(session, dest, nonce, payload, expires, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* Package up and send the payload to the router for delivery
|
||||
* @since 0.8.4
|
||||
*/
|
||||
public void sendMessage(I2PSessionImpl session, Destination dest, long nonce, byte[] payload,
|
||||
long expires, int flags) throws I2PSessionException {
|
||||
|
||||
if (!updateBps(payload.length, expires))
|
||||
// drop the message... send fail notification?
|
||||
return;
|
||||
SendMessageMessage msg;
|
||||
if (expires > 0) {
|
||||
msg = new SendMessageExpiresMessage();
|
||||
((SendMessageExpiresMessage)msg).setExpiration(new Date(expires));
|
||||
if (expires > 0 || flags > 0) {
|
||||
SendMessageExpiresMessage smsg = new SendMessageExpiresMessage();
|
||||
smsg.setExpiration(expires);
|
||||
smsg.setFlags(flags);
|
||||
msg = smsg;
|
||||
} else
|
||||
msg = new SendMessageMessage();
|
||||
msg.setDestination(dest);
|
||||
msg.setSessionId(session.getSessionId());
|
||||
msg.setNonce(nonce);
|
||||
Payload data = createPayload(dest, payload, tag, key, tags, newKey);
|
||||
Payload data = createPayload(dest, payload, null, null, null, null);
|
||||
msg.setPayload(data);
|
||||
session.sendMessage(msg);
|
||||
updateBps(payload.length);
|
||||
}
|
||||
|
||||
private void updateBps(int len) {
|
||||
long now = _context.clock().now();
|
||||
float period = ((float)now-_sendPeriodBeginTime)/1000f;
|
||||
if (period >= 1f) {
|
||||
// first term decays on slow transmission
|
||||
_sendBps = (int)(((float)0.9f * (float)_sendBps) + ((float)0.1f*((float)_sendPeriodBytes)/period));
|
||||
_sendPeriodBytes = len;
|
||||
_sendPeriodBeginTime = now;
|
||||
_context.statManager().addRateData("client.sendBpsRaw", _sendBps, 0);
|
||||
} else {
|
||||
_sendPeriodBytes += len;
|
||||
/**
|
||||
* Super-simple bandwidth throttler.
|
||||
* We only calculate on a one-second basis, so large messages
|
||||
* (compared to the one-second limit) may exceed the limits.
|
||||
* Tuned for streaming, may not work well for large datagrams.
|
||||
*
|
||||
* This does poorly with low rate limits since it doesn't credit
|
||||
* bandwidth across two periods. So the limit is rounded up,
|
||||
* and the min limit is set to 2x the typ size, above.
|
||||
*
|
||||
* Blocking so this could be very bad for retransmissions,
|
||||
* as it could clog StreamingTimer.
|
||||
* Waits are somewhat "fair" using ReentrantLock.
|
||||
* While out-of-order transmission is acceptable, fairness
|
||||
* reduces the chance of starvation. ReentrantLock does not
|
||||
* guarantee in-order execution due to thread priority issues,
|
||||
* so out-of-order may still occur. But shouldn't happen within
|
||||
* the same thread anyway... Also note that small messages may
|
||||
* go ahead of large ones that are waiting for the next window.
|
||||
* Also, threads waiting a second time go to the back of the line.
|
||||
*
|
||||
* Since this is at the I2CP layer, it includes streaming overhead,
|
||||
* streaming acks and retransmissions,
|
||||
* gzip overhead (or "underhead" for compression),
|
||||
* repliable datagram overhead, etc.
|
||||
* However, it does not, of course, include the substantial overhead
|
||||
* imposed by the router for the leaseset, tags, encryption,
|
||||
* and fixed-size tunnel messages.
|
||||
*
|
||||
* @param expires if > 0, an expiration date
|
||||
* @return true if we should send the message, false to drop it
|
||||
*/
|
||||
private boolean updateBps(int len, long expires) {
|
||||
if (_maxBytesPerSecond <= 0)
|
||||
return true;
|
||||
//synchronized(this) {
|
||||
_lock.lock();
|
||||
try {
|
||||
int waitCount = 0;
|
||||
while (true) {
|
||||
long now = _context.clock().now();
|
||||
if (waitCount > 0 && expires > 0 && expires < now) {
|
||||
// just say no to bufferbloat... drop the message right here
|
||||
_context.statManager().addRateData("client.sendDropped", len, 0);
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Dropping " + len + " byte msg expired in queue");
|
||||
return false;
|
||||
}
|
||||
|
||||
long period = now - _sendPeriodBeginTime;
|
||||
if (period >= 2000) {
|
||||
// start new period, always let it through no matter how big
|
||||
_sendPeriodBytes = len;
|
||||
_sendPeriodBeginTime = now;
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("New period after idle, " + len + " bytes");
|
||||
return true;
|
||||
}
|
||||
|
||||
if (period >= 1000) {
|
||||
// start new period
|
||||
// Allow burst within 2 sec, only advance window by 1 sec, and
|
||||
// every other second give credit for unused bytes in previous period
|
||||
if (_sendPeriodBytes > 0 && ((_sendPeriodBeginTime / 1000) & 0x01) == 0)
|
||||
_sendPeriodBytes += len - _maxBytesPerSecond;
|
||||
else
|
||||
_sendPeriodBytes = len;
|
||||
_sendPeriodBeginTime += 1000;
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("New period, " + len + " bytes");
|
||||
return true;
|
||||
}
|
||||
|
||||
if (_sendPeriodBytes + len <= _maxBytesPerSecond) {
|
||||
// still bytes available in this period
|
||||
_sendPeriodBytes += len;
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Sending " + len + ", Elapsed " + period + "ms, total " + _sendPeriodBytes + " bytes");
|
||||
return true;
|
||||
}
|
||||
|
||||
if (waitCount >= 2) {
|
||||
// just say no to bufferbloat... drop the message right here
|
||||
_context.statManager().addRateData("client.sendDropped", len, 0);
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Dropping " + len + " byte msg after waiting " + waitCount + " times");
|
||||
return false;
|
||||
}
|
||||
|
||||
// wait until next period
|
||||
_context.statManager().addRateData("client.sendThrottled", ++waitCount, 0);
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Throttled " + len + " bytes, wait #" + waitCount + ' ' + (1000 - period) + "ms" /*, new Exception()*/);
|
||||
try {
|
||||
//this.wait(1000 - period);
|
||||
_lock.newCondition().await(1000 - period, TimeUnit.MILLISECONDS);
|
||||
} catch (InterruptedException ie) {}
|
||||
}
|
||||
} finally {
|
||||
_lock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -9,6 +9,7 @@ package net.i2p.client;
|
||||
*
|
||||
*/
|
||||
|
||||
import java.util.Properties;
|
||||
import java.util.Set;
|
||||
|
||||
import net.i2p.data.Destination;
|
||||
@ -20,17 +21,20 @@ import net.i2p.data.SigningPrivateKey;
|
||||
/**
|
||||
* <p>Define the standard means of sending and receiving messages on the
|
||||
* I2P network by using the I2CP (the client protocol). This is done over a
|
||||
* bidirectional TCP socket and never sends any private keys - all end to end
|
||||
* encryption is done transparently within the client's I2PSession
|
||||
* itself. Periodically the router will ask the client to authorize a new set of
|
||||
* bidirectional TCP socket and never sends any private keys.
|
||||
*
|
||||
* End to end encryption in I2PSession was disabled in release 0.6.
|
||||
*
|
||||
* Periodically the router will ask the client to authorize a new set of
|
||||
* tunnels to be allocated to the client, which the client can accept by sending a
|
||||
* {@link net.i2p.data.LeaseSet} signed by the {@link net.i2p.data.Destination}.
|
||||
* In addition, the router may on occation provide the client with an updated
|
||||
* In addition, the router may on occasion provide the client with an updated
|
||||
* clock offset so that the client can stay in sync with the network (even if
|
||||
* the host computer's clock is off).</p>
|
||||
*
|
||||
*/
|
||||
public interface I2PSession {
|
||||
|
||||
/** Send a new message to the given destination, containing the specified
|
||||
* payload, returning true if the router feels confident that the message
|
||||
* was delivered.
|
||||
@ -39,11 +43,18 @@ public interface I2PSession {
|
||||
* @return whether it was accepted by the router for delivery or not
|
||||
*/
|
||||
public boolean sendMessage(Destination dest, byte[] payload) throws I2PSessionException;
|
||||
|
||||
public boolean sendMessage(Destination dest, byte[] payload, int offset, int size) throws I2PSessionException;
|
||||
/** See I2PSessionMuxedImpl for details */
|
||||
|
||||
/**
|
||||
* See I2PSessionMuxedImpl for proto/port details.
|
||||
* @since 0.7.1
|
||||
*/
|
||||
public boolean sendMessage(Destination dest, byte[] payload, int proto, int fromport, int toport) throws I2PSessionException;
|
||||
|
||||
/**
|
||||
* End-to-End Crypto is disabled, tags and keys are ignored!
|
||||
*
|
||||
* Like sendMessage above, except the key used and the tags sent are exposed to the
|
||||
* application. <p />
|
||||
*
|
||||
@ -61,25 +72,62 @@ public interface I2PSession {
|
||||
*
|
||||
* @param dest location to send the message
|
||||
* @param payload body of the message to be sent (unencrypted)
|
||||
* @param keyUsed session key delivered to the destination for association with the tags sent. This is essentially
|
||||
* @param keyUsed UNUSED, IGNORED. Session key delivered to the destination for association with the tags sent. This is essentially
|
||||
* an output parameter - keyUsed.getData() is ignored during this call, but after the call completes,
|
||||
* it will be filled with the bytes of the session key delivered. Typically the key delivered is the
|
||||
* same one as the key encrypted with, but not always. If this is null then the key data will not be
|
||||
* exposed.
|
||||
* @param tagsSent set of tags delivered to the peer and associated with the keyUsed. This is also an output parameter -
|
||||
* @param tagsSent UNUSED, IGNORED. Set of tags delivered to the peer and associated with the keyUsed. This is also an output parameter -
|
||||
* the contents of the set is ignored during the call, but afterwards it contains a set of SessionTag
|
||||
* objects that were sent along side the given keyUsed.
|
||||
*/
|
||||
public boolean sendMessage(Destination dest, byte[] payload, SessionKey keyUsed, Set tagsSent) throws I2PSessionException;
|
||||
|
||||
/**
|
||||
* End-to-End Crypto is disabled, tags and keys are ignored.
|
||||
* @param keyUsed UNUSED, IGNORED.
|
||||
* @param tagsSent UNUSED, IGNORED.
|
||||
*/
|
||||
public boolean sendMessage(Destination dest, byte[] payload, int offset, int size, SessionKey keyUsed, Set tagsSent) throws I2PSessionException;
|
||||
|
||||
/**
|
||||
* End-to-End Crypto is disabled, tags and keys are ignored.
|
||||
* @param keyUsed UNUSED, IGNORED.
|
||||
* @param tagsSent UNUSED, IGNORED.
|
||||
* @since 0.7.1
|
||||
*/
|
||||
public boolean sendMessage(Destination dest, byte[] payload, int offset, int size, SessionKey keyUsed, Set tagsSent, long expire) throws I2PSessionException;
|
||||
/** See I2PSessionMuxedImpl for details */
|
||||
|
||||
/**
|
||||
* See I2PSessionMuxedImpl for proto/port details.
|
||||
* End-to-End Crypto is disabled, tags and keys are ignored.
|
||||
* @param keyUsed UNUSED, IGNORED.
|
||||
* @param tagsSent UNUSED, IGNORED.
|
||||
* @since 0.7.1
|
||||
*/
|
||||
public boolean sendMessage(Destination dest, byte[] payload, int offset, int size, SessionKey keyUsed, Set tagsSent,
|
||||
int proto, int fromport, int toport) throws I2PSessionException;
|
||||
/** See I2PSessionMuxedImpl for details */
|
||||
|
||||
/**
|
||||
* See I2PSessionMuxedImpl for proto/port details.
|
||||
* End-to-End Crypto is disabled, tags and keys are ignored.
|
||||
* @param keyUsed UNUSED, IGNORED.
|
||||
* @param tagsSent UNUSED, IGNORED.
|
||||
* @since 0.7.1
|
||||
*/
|
||||
public boolean sendMessage(Destination dest, byte[] payload, int offset, int size, SessionKey keyUsed, Set tagsSent, long expire,
|
||||
int proto, int fromport, int toport) throws I2PSessionException;
|
||||
|
||||
/**
|
||||
* See I2PSessionMuxedImpl for proto/port details.
|
||||
* End-to-End Crypto is disabled, tags and keys are ignored.
|
||||
* @param keyUsed UNUSED, IGNORED.
|
||||
* @param tagsSent UNUSED, IGNORED.
|
||||
* @since 0.8.4
|
||||
*/
|
||||
public boolean sendMessage(Destination dest, byte[] payload, int offset, int size, SessionKey keyUsed, Set tagsSent, long expire,
|
||||
int proto, int fromport, int toport, int flags) throws I2PSessionException;
|
||||
|
||||
/** Receive a message that the router has notified the client about, returning
|
||||
* the payload.
|
||||
* @param msgId message to fetch
|
||||
@ -151,8 +199,16 @@ public interface I2PSession {
|
||||
*/
|
||||
public Destination lookupDest(Hash h, long maxWait) throws I2PSessionException;
|
||||
|
||||
/**
|
||||
* Does not remove properties previously present but missing from this options parameter.
|
||||
* @param options non-null
|
||||
* @since 0.8.4
|
||||
*/
|
||||
public void updateOptions(Properties options);
|
||||
|
||||
/**
|
||||
* Get the current bandwidth limits. Blocking.
|
||||
* @since 0.8.3
|
||||
*/
|
||||
public int[] bandwidthLimits() throws I2PSessionException;
|
||||
|
||||
|
@ -221,20 +221,32 @@ abstract class I2PSessionImpl implements I2PSession, I2CPMessageReader.I2CPMessa
|
||||
}
|
||||
}
|
||||
|
||||
/** save some memory, don't pass along the pointless properties */
|
||||
private Properties filter(Properties options) {
|
||||
Properties rv = new Properties();
|
||||
for (Iterator iter = options.keySet().iterator(); iter.hasNext();) {
|
||||
String key = (String) iter.next();
|
||||
String val = options.getProperty(key);
|
||||
if (key.startsWith("java") ||
|
||||
key.startsWith("user") ||
|
||||
key.startsWith("os") ||
|
||||
key.startsWith("sun") ||
|
||||
key.startsWith("file") ||
|
||||
key.startsWith("line") ||
|
||||
key.startsWith("wrapper")) {
|
||||
if (key.startsWith("java.") ||
|
||||
key.startsWith("user.") ||
|
||||
key.startsWith("os.") ||
|
||||
key.startsWith("sun.") ||
|
||||
key.startsWith("file.") ||
|
||||
key.equals("line.separator") ||
|
||||
key.equals("path.separator") ||
|
||||
key.equals("prng.buffers") ||
|
||||
key.equals("router.trustedUpdateKeys") ||
|
||||
key.startsWith("router.update") ||
|
||||
key.startsWith("routerconsole.") ||
|
||||
key.startsWith("time.") ||
|
||||
key.startsWith("stat.") ||
|
||||
key.startsWith("gnu.") || // gnu JVM
|
||||
key.startsWith("net.i2p.router.web.") || // console nonces
|
||||
key.startsWith("wrapper.")) {
|
||||
if (_log.shouldLog(Log.DEBUG)) _log.debug("Skipping property: " + key);
|
||||
} else if ((key.length() > 255) || (val.length() > 255)) {
|
||||
continue;
|
||||
}
|
||||
String val = options.getProperty(key);
|
||||
if ((key.length() > 255) || (val.length() > 255)) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn(getPrefix() + "Not passing on property ["
|
||||
+ key
|
||||
@ -247,6 +259,18 @@ abstract class I2PSessionImpl implements I2PSession, I2CPMessageReader.I2CPMessa
|
||||
return rv;
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the tunnel and bandwidth settings
|
||||
* @since 0.8.4
|
||||
*/
|
||||
public void updateOptions(Properties options) {
|
||||
_options.putAll(filter(options));
|
||||
_producer.updateBandwidth(this);
|
||||
try {
|
||||
_producer.updateTunnels(this, 0);
|
||||
} catch (I2PSessionException ise) {}
|
||||
}
|
||||
|
||||
void setLeaseSet(LeaseSet ls) {
|
||||
_leaseSet = ls;
|
||||
if (ls != null) {
|
||||
@ -397,7 +421,7 @@ abstract class I2PSessionImpl implements I2PSession, I2CPMessageReader.I2CPMessa
|
||||
*
|
||||
*/
|
||||
public byte[] receiveMessage(int msgId) throws I2PSessionException {
|
||||
MessagePayloadMessage msg = _availableMessages.remove(new Long(msgId));
|
||||
MessagePayloadMessage msg = _availableMessages.remove(Long.valueOf(msgId));
|
||||
if (msg == null) {
|
||||
_log.error("Receive message " + msgId + " had no matches");
|
||||
return null;
|
||||
@ -414,21 +438,6 @@ abstract class I2PSessionImpl implements I2PSession, I2CPMessageReader.I2CPMessa
|
||||
_producer.reportAbuse(this, msgId, severity);
|
||||
}
|
||||
|
||||
/**
|
||||
* Send the data to the destination.
|
||||
* TODO: this currently always returns true, regardless of whether the message was
|
||||
* delivered successfully. make this wait for at least ACCEPTED
|
||||
*
|
||||
*/
|
||||
public abstract boolean sendMessage(Destination dest, byte[] payload) throws I2PSessionException;
|
||||
|
||||
/**
|
||||
* @param keyUsed unused - no end-to-end crypto
|
||||
* @param tagsSent unused - no end-to-end crypto
|
||||
*/
|
||||
public abstract boolean sendMessage(Destination dest, byte[] payload, SessionKey keyUsed,
|
||||
Set tagsSent) throws I2PSessionException;
|
||||
|
||||
public abstract void receiveStatus(int msgId, long nonce, int status);
|
||||
|
||||
/****** no end-to-end crypto
|
||||
@ -444,7 +453,7 @@ abstract class I2PSessionImpl implements I2PSession, I2CPMessageReader.I2CPMessa
|
||||
* Recieve a payload message and let the app know its available
|
||||
*/
|
||||
public void addNewMessage(MessagePayloadMessage msg) {
|
||||
Long mid = new Long(msg.getMessageId());
|
||||
Long mid = Long.valueOf(msg.getMessageId());
|
||||
_availableMessages.put(mid, msg);
|
||||
long id = msg.getMessageId();
|
||||
byte data[] = msg.getPayload().getUnencryptedData();
|
||||
@ -494,7 +503,7 @@ abstract class I2PSessionImpl implements I2PSession, I2CPMessageReader.I2CPMessa
|
||||
|
||||
public void available(long msgId, int size) {
|
||||
synchronized (AvailabilityNotifier.this) {
|
||||
_pendingIds.add(new Long(msgId));
|
||||
_pendingIds.add(Long.valueOf(msgId));
|
||||
_pendingSizes.add(Integer.valueOf(size));
|
||||
AvailabilityNotifier.this.notifyAll();
|
||||
}
|
||||
|
@ -130,6 +130,10 @@ class I2PSessionImpl2 extends I2PSessionImpl {
|
||||
int proto, int fromport, int toport) throws I2PSessionException {
|
||||
throw new IllegalArgumentException("Use MuxedImpl");
|
||||
}
|
||||
public boolean sendMessage(Destination dest, byte[] payload, int offset, int size, SessionKey keyUsed, Set tagsSent, long expire,
|
||||
int proto, int fromport, int toport, int flags) throws I2PSessionException {
|
||||
throw new IllegalArgumentException("Use MuxedImpl");
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean sendMessage(Destination dest, byte[] payload) throws I2PSessionException {
|
||||
@ -222,14 +226,23 @@ class I2PSessionImpl2 extends I2PSessionImpl {
|
||||
private static final int NUM_TAGS = 50;
|
||||
|
||||
/**
|
||||
* TODO - Don't need to save MessageState since actuallyWait is false...
|
||||
* But for now just use sendNoEffort() instead.
|
||||
*
|
||||
* @param keyUsed unused - no end-to-end crypto
|
||||
* @param tagsSent unused - no end-to-end crypto
|
||||
*/
|
||||
protected boolean sendBestEffort(Destination dest, byte payload[], SessionKey keyUsed, Set tagsSent, long expires)
|
||||
throws I2PSessionException {
|
||||
return sendBestEffort(dest, payload, expires, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* TODO - Don't need to save MessageState since actuallyWait is false...
|
||||
* But for now just use sendNoEffort() instead.
|
||||
*
|
||||
* @param flags to be passed to the router
|
||||
* @since 0.8.4
|
||||
*/
|
||||
protected boolean sendBestEffort(Destination dest, byte payload[], long expires, int flags)
|
||||
throws I2PSessionException {
|
||||
//SessionKey key = null;
|
||||
//SessionKey newKey = null;
|
||||
//SessionTag tag = null;
|
||||
@ -324,7 +337,7 @@ class I2PSessionImpl2 extends I2PSessionImpl {
|
||||
+ " sync took " + (inSendingSync-beforeSendingSync)
|
||||
+ " add took " + (afterSendingSync-inSendingSync));
|
||||
//_producer.sendMessage(this, dest, nonce, payload, tag, key, sentTags, newKey, expires);
|
||||
_producer.sendMessage(this, dest, nonce, payload, null, null, null, null, expires);
|
||||
_producer.sendMessage(this, dest, nonce, payload, expires, flags);
|
||||
|
||||
// since this is 'best effort', all we're waiting for is a status update
|
||||
// saying that the router received it - in theory, that should come back
|
||||
|
@ -162,12 +162,34 @@ class I2PSessionMuxedImpl extends I2PSessionImpl2 implements I2PSession {
|
||||
* 255 disallowed
|
||||
* @param fromPort 1-65535 or 0 for unset
|
||||
* @param toPort 1-65535 or 0 for unset
|
||||
* @since 0.7.1
|
||||
*/
|
||||
@Override
|
||||
public boolean sendMessage(Destination dest, byte[] payload, int offset, int size,
|
||||
SessionKey keyUsed, Set tagsSent, long expires,
|
||||
int proto, int fromPort, int toPort)
|
||||
throws I2PSessionException {
|
||||
return sendMessage(dest, payload, offset, size, keyUsed, tagsSent, 0, proto, fromPort, toPort, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param keyUsed unused - no end-to-end crypto
|
||||
* @param tagsSent unused - no end-to-end crypto
|
||||
* @param proto 1-254 or 0 for unset; recommended:
|
||||
* I2PSession.PROTO_UNSPECIFIED
|
||||
* I2PSession.PROTO_STREAMING
|
||||
* I2PSession.PROTO_DATAGRAM
|
||||
* 255 disallowed
|
||||
* @param fromPort 1-65535 or 0 for unset
|
||||
* @param toPort 1-65535 or 0 for unset
|
||||
* @param flags to be passed to the router
|
||||
* @since 0.8.4
|
||||
*/
|
||||
@Override
|
||||
public boolean sendMessage(Destination dest, byte[] payload, int offset, int size,
|
||||
SessionKey keyUsed, Set tagsSent, long expires,
|
||||
int proto, int fromPort, int toPort, int flags)
|
||||
throws I2PSessionException {
|
||||
if (isClosed()) throw new I2PSessionException("Already closed");
|
||||
updateActivity();
|
||||
|
||||
@ -183,7 +205,7 @@ class I2PSessionMuxedImpl extends I2PSessionImpl2 implements I2PSession {
|
||||
|
||||
_context.statManager().addRateData("i2cp.tx.msgCompressed", payload.length, 0);
|
||||
_context.statManager().addRateData("i2cp.tx.msgExpanded", size, 0);
|
||||
return sendBestEffort(dest, payload, keyUsed, tagsSent, expires);
|
||||
return sendBestEffort(dest, payload, expires, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -191,7 +213,7 @@ class I2PSessionMuxedImpl extends I2PSessionImpl2 implements I2PSession {
|
||||
*/
|
||||
@Override
|
||||
public void addNewMessage(MessagePayloadMessage msg) {
|
||||
Long mid = new Long(msg.getMessageId());
|
||||
Long mid = Long.valueOf(msg.getMessageId());
|
||||
_availableMessages.put(mid, msg);
|
||||
long id = msg.getMessageId();
|
||||
byte data[] = msg.getPayload().getUnencryptedData();
|
||||
|
@ -98,10 +98,17 @@ class I2PSimpleSession extends I2PSessionImpl2 {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Ignore, does nothing
|
||||
* @since 0.8.4
|
||||
*/
|
||||
@Override
|
||||
public void updateOptions(Properties options) {}
|
||||
|
||||
/**
|
||||
* Only map message handlers that we will use
|
||||
*/
|
||||
class SimpleMessageHandlerMap extends I2PClientMessageHandlerMap {
|
||||
private static class SimpleMessageHandlerMap extends I2PClientMessageHandlerMap {
|
||||
public SimpleMessageHandlerMap(I2PAppContext context) {
|
||||
int highest = Math.max(DestReplyMessage.MESSAGE_TYPE, BandwidthLimitsMessage.MESSAGE_TYPE);
|
||||
_handlers = new I2CPMessageHandler[highest+1];
|
||||
|
@ -149,7 +149,7 @@ public class CryptixAESEngine extends AESEngine {
|
||||
@Override
|
||||
public final void decryptBlock(byte payload[], int inIndex, SessionKey sessionKey, byte rv[], int outIndex) {
|
||||
if ( (payload == null) || (rv == null) )
|
||||
throw new IllegalArgumentException("null block args [payload=" + payload + " rv="+rv);
|
||||
throw new IllegalArgumentException("null block args");
|
||||
if (payload.length - inIndex > rv.length - outIndex)
|
||||
throw new IllegalArgumentException("bad block args [payload.len=" + payload.length
|
||||
+ " inIndex=" + inIndex + " rv.len=" + rv.length
|
||||
|
@ -10,6 +10,7 @@ package net.i2p.crypto;
|
||||
*/
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
@ -527,8 +528,6 @@ public class ElGamalAESEngine {
|
||||
return aesEncr;
|
||||
}
|
||||
|
||||
private final static Set EMPTY_SET = new HashSet();
|
||||
|
||||
/**
|
||||
* For both scenarios, this method encrypts the AES area using the given key, iv
|
||||
* and making sure the resulting data is at least as long as the paddedSize and
|
||||
@ -552,7 +551,7 @@ public class ElGamalAESEngine {
|
||||
long paddedSize, int prefixBytes) {
|
||||
//_log.debug("iv for encryption: " + DataHelper.toString(iv, 16));
|
||||
//_log.debug("Encrypting AES");
|
||||
if (tagsForDelivery == null) tagsForDelivery = EMPTY_SET;
|
||||
if (tagsForDelivery == null) tagsForDelivery = Collections.EMPTY_SET;
|
||||
int size = 2 // sizeof(tags)
|
||||
+ tagsForDelivery.size()
|
||||
+ SessionTag.BYTE_LENGTH*tagsForDelivery.size()
|
||||
|
@ -72,13 +72,13 @@ public class Base32 {
|
||||
}
|
||||
|
||||
private static void runApp(String args[]) {
|
||||
if ("encodestring".equalsIgnoreCase(args[0])) {
|
||||
System.out.println(encode(args[1].getBytes()));
|
||||
return;
|
||||
}
|
||||
InputStream in = System.in;
|
||||
OutputStream out = System.out;
|
||||
try {
|
||||
if ("encodestring".equalsIgnoreCase(args[0])) {
|
||||
System.out.println(encode(args[1].getBytes()));
|
||||
return;
|
||||
}
|
||||
InputStream in = System.in;
|
||||
OutputStream out = System.out;
|
||||
if (args.length >= 3) {
|
||||
out = new FileOutputStream(args[2]);
|
||||
}
|
||||
@ -95,6 +95,9 @@ public class Base32 {
|
||||
}
|
||||
} catch (IOException ioe) {
|
||||
ioe.printStackTrace(System.err);
|
||||
} finally {
|
||||
try { in.close(); } catch (IOException e) {}
|
||||
try { out.close(); } catch (IOException e) {}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -178,13 +178,13 @@ public class Base64 {
|
||||
}
|
||||
|
||||
private static void runApp(String args[]) {
|
||||
if ("encodestring".equalsIgnoreCase(args[0])) {
|
||||
System.out.println(encode(args[1].getBytes()));
|
||||
return;
|
||||
}
|
||||
InputStream in = System.in;
|
||||
OutputStream out = System.out;
|
||||
try {
|
||||
if ("encodestring".equalsIgnoreCase(args[0])) {
|
||||
System.out.println(encode(args[1].getBytes()));
|
||||
return;
|
||||
}
|
||||
InputStream in = System.in;
|
||||
OutputStream out = System.out;
|
||||
if (args.length >= 3) {
|
||||
out = new FileOutputStream(args[2]);
|
||||
}
|
||||
@ -201,6 +201,9 @@ public class Base64 {
|
||||
}
|
||||
} catch (IOException ioe) {
|
||||
ioe.printStackTrace(System.err);
|
||||
} finally {
|
||||
try { in.close(); } catch (IOException e) {}
|
||||
try { out.close(); } catch (IOException e) {}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -845,7 +845,7 @@ public class DataHelper {
|
||||
*/
|
||||
public final static void xor(byte lhs[], int startLeft, byte rhs[], int startRight, byte out[], int startOut, int len) {
|
||||
if ( (lhs == null) || (rhs == null) || (out == null) )
|
||||
throw new NullPointerException("Invalid params to xor (" + lhs + ", " + rhs + ", " + out + ")");
|
||||
throw new NullPointerException("Null params to xor");
|
||||
if (lhs.length < startLeft + len)
|
||||
throw new IllegalArgumentException("Left hand side is too short");
|
||||
if (rhs.length < startRight + len)
|
||||
|
139
core/java/src/net/i2p/data/DateAndFlags.java
Normal file
139
core/java/src/net/i2p/data/DateAndFlags.java
Normal file
@ -0,0 +1,139 @@
|
||||
package net.i2p.data;
|
||||
|
||||
/*
|
||||
* free (adj.): unencumbered; not under the control of others
|
||||
* Released into the public domain
|
||||
* with no warranty of any kind, either expressed or implied.
|
||||
*
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.util.Date;
|
||||
|
||||
/**
|
||||
* A six-byte Date and 2 bytes of flags, since a Date won't encroach
|
||||
* on the top two bytes until the year 10889.
|
||||
*
|
||||
* The flag format is not specified here. The bits may be used in
|
||||
* an application-specific manner. The application should
|
||||
* be designed so that a flags value of 0 is the default, for
|
||||
* compatibility with an 8-byte Date.
|
||||
*
|
||||
* If we really need some more bits we could use the first few bits
|
||||
* of the third byte.
|
||||
*
|
||||
* @author zzz
|
||||
* @since 0.8.4
|
||||
*/
|
||||
public class DateAndFlags extends DataStructureImpl {
|
||||
private int _flags;
|
||||
private long _date;
|
||||
|
||||
public DateAndFlags() {}
|
||||
|
||||
/**
|
||||
* @param flags 0 - 65535
|
||||
*/
|
||||
public DateAndFlags(int flags, long date) {
|
||||
_flags = flags;
|
||||
_date = date;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param flags 0 - 65535
|
||||
*/
|
||||
public DateAndFlags(int flags, Date date) {
|
||||
_flags = flags;
|
||||
_date = date.getTime();
|
||||
}
|
||||
|
||||
public int getFlags() {
|
||||
return _flags;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param flags 0 - 65535
|
||||
*/
|
||||
public void setFlags(int flags) {
|
||||
_flags = flags;
|
||||
}
|
||||
|
||||
/**
|
||||
* The Date object is created here, it is not cached.
|
||||
* Use getTime() if you only need the long value.
|
||||
*/
|
||||
public Date getDate() {
|
||||
return new Date(_date);
|
||||
}
|
||||
|
||||
public long getTime() {
|
||||
return (_date);
|
||||
}
|
||||
|
||||
public void setDate(long date) {
|
||||
_date = date;
|
||||
}
|
||||
|
||||
public void setDate(Date date) {
|
||||
_date = date.getTime();
|
||||
}
|
||||
|
||||
public void readBytes(InputStream in) throws DataFormatException, IOException {
|
||||
_flags = (int) DataHelper.readLong(in, 2);
|
||||
_date = DataHelper.readLong(in, 6);
|
||||
}
|
||||
|
||||
public void writeBytes(OutputStream out) throws DataFormatException, IOException {
|
||||
DataHelper.writeLong(out, 2, _flags);
|
||||
DataHelper.writeLong(out, 6, _date);
|
||||
}
|
||||
|
||||
/**
|
||||
* Overridden for efficiency.
|
||||
*/
|
||||
@Override
|
||||
public byte[] toByteArray() {
|
||||
byte[] rv = DataHelper.toLong(8, _date);
|
||||
rv[0] = (byte) ((_flags >> 8) & 0xff);
|
||||
rv[1] = (byte) (_flags & 0xff);
|
||||
return rv;
|
||||
}
|
||||
|
||||
/**
|
||||
* Overridden for efficiency.
|
||||
* @param data non-null
|
||||
* @throws DataFormatException if null or wrong length
|
||||
*/
|
||||
@Override
|
||||
public void fromByteArray(byte data[]) throws DataFormatException {
|
||||
if (data == null) throw new DataFormatException("Null data passed in");
|
||||
if (data.length != 8) throw new DataFormatException("Bad data length");
|
||||
_flags = (int) DataHelper.fromLong(data, 0, 2);
|
||||
_date = DataHelper.fromLong(data, 2, 6);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object object) {
|
||||
if ((object == null) || !(object instanceof DateAndFlags)) return false;
|
||||
DateAndFlags daf = (DateAndFlags) object;
|
||||
return _date == daf._date && _flags == daf._flags;
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return _flags + (int) _date;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder buf = new StringBuilder(64);
|
||||
buf.append("[DateAndFlags: ");
|
||||
buf.append("\n\tDate: ").append((new Date(_date)).toString());
|
||||
buf.append("\n\tFlags: 0x").append(Integer.toHexString(_flags));
|
||||
buf.append("]");
|
||||
return buf.toString();
|
||||
}
|
||||
}
|
@ -133,9 +133,15 @@ public class PrivateKeyFile {
|
||||
*/
|
||||
public Destination createIfAbsent() throws I2PException, IOException, DataFormatException {
|
||||
if(!this.file.exists()) {
|
||||
FileOutputStream out = new FileOutputStream(this.file);
|
||||
this.client.createDestination(out);
|
||||
out.close();
|
||||
FileOutputStream out = null;
|
||||
try {
|
||||
out = new FileOutputStream(this.file);
|
||||
this.client.createDestination(out);
|
||||
} finally {
|
||||
if (out != null) {
|
||||
try { out.close(); } catch (IOException ioe) {}
|
||||
}
|
||||
}
|
||||
}
|
||||
return getDestination();
|
||||
}
|
||||
@ -243,29 +249,36 @@ public class PrivateKeyFile {
|
||||
public I2PSession open() throws I2PSessionException, IOException {
|
||||
return this.open(new Properties());
|
||||
}
|
||||
|
||||
public I2PSession open(Properties opts) throws I2PSessionException, IOException {
|
||||
// open input file
|
||||
FileInputStream in = new FileInputStream(this.file);
|
||||
|
||||
// create sesssion
|
||||
I2PSession s = this.client.createSession(in, opts);
|
||||
|
||||
// close file
|
||||
in.close();
|
||||
|
||||
return s;
|
||||
FileInputStream in = null;
|
||||
try {
|
||||
in = new FileInputStream(this.file);
|
||||
I2PSession s = this.client.createSession(in, opts);
|
||||
return s;
|
||||
} finally {
|
||||
if (in != null) {
|
||||
try { in.close(); } catch (IOException ioe) {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Copied from I2PClientImpl.createDestination()
|
||||
*/
|
||||
public void write() throws IOException, DataFormatException {
|
||||
FileOutputStream out = new FileOutputStream(this.file);
|
||||
this.dest.writeBytes(out);
|
||||
this.privKey.writeBytes(out);
|
||||
this.signingPrivKey.writeBytes(out);
|
||||
out.flush();
|
||||
out.close();
|
||||
FileOutputStream out = null;
|
||||
try {
|
||||
out = new FileOutputStream(this.file);
|
||||
this.dest.writeBytes(out);
|
||||
this.privKey.writeBytes(out);
|
||||
this.signingPrivKey.writeBytes(out);
|
||||
out.flush();
|
||||
} finally {
|
||||
if (out != null) {
|
||||
try { out.close(); } catch (IOException ioe) {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -377,7 +390,8 @@ public class PrivateKeyFile {
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (Exception ioe) {
|
||||
} catch (DataFormatException dfe) {
|
||||
} catch (IOException ioe) {
|
||||
}
|
||||
// not found, continue to the next file
|
||||
}
|
||||
|
@ -50,6 +50,26 @@ public class TunnelId extends DataStructureImpl {
|
||||
DataHelper.writeLong(out, 4, _tunnelId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Overridden for efficiency.
|
||||
*/
|
||||
@Override
|
||||
public byte[] toByteArray() {
|
||||
return DataHelper.toLong(4, _tunnelId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Overridden for efficiency.
|
||||
* @param data non-null
|
||||
* @throws DataFormatException if null or wrong length
|
||||
*/
|
||||
@Override
|
||||
public void fromByteArray(byte data[]) throws DataFormatException {
|
||||
if (data == null) throw new DataFormatException("Null data passed in");
|
||||
if (data.length != 4) throw new DataFormatException("Bad data length");
|
||||
_tunnelId = (int) DataHelper.fromLong(data, 0, 4);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if ( (obj == null) || !(obj instanceof TunnelId))
|
||||
|
@ -74,10 +74,11 @@ public class DestReplyMessage extends I2CPMessageImpl {
|
||||
}
|
||||
|
||||
protected byte[] doWriteMessage() throws I2CPMessageException, IOException {
|
||||
if (_dest == null && _hash == null)
|
||||
return new byte[0]; // null response allowed
|
||||
if (_dest == null && _hash != null)
|
||||
if (_dest == null) {
|
||||
if (_hash == null)
|
||||
return new byte[0]; // null response allowed
|
||||
return _hash.getData();
|
||||
}
|
||||
ByteArrayOutputStream os = new ByteArrayOutputStream(_dest.size());
|
||||
try {
|
||||
_dest.writeBytes(os);
|
||||
|
@ -16,32 +16,66 @@ import java.util.Date;
|
||||
|
||||
import net.i2p.data.DataFormatException;
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.data.DateAndFlags;
|
||||
import net.i2p.data.Destination;
|
||||
import net.i2p.data.Payload;
|
||||
|
||||
/**
|
||||
* Same as SendMessageMessage, but with an expiration to be passed to the router
|
||||
*
|
||||
* As of 0.8.4, retrofitted to use DateAndFlags. Backwards compatible.
|
||||
*
|
||||
* @author zzz
|
||||
*/
|
||||
public class SendMessageExpiresMessage extends SendMessageMessage {
|
||||
/* FIXME hides another field FIXME */
|
||||
public final static int MESSAGE_TYPE = 36;
|
||||
private SessionId _sessionId;
|
||||
private Destination _destination;
|
||||
private Payload _payload;
|
||||
private Date _expiration;
|
||||
private final DateAndFlags _daf;
|
||||
|
||||
public SendMessageExpiresMessage() {
|
||||
super();
|
||||
_daf = new DateAndFlags();
|
||||
}
|
||||
|
||||
/**
|
||||
* The Date object is created here, it is not cached.
|
||||
* Use getExpirationTime() if you only need the long value.
|
||||
*/
|
||||
public Date getExpiration() {
|
||||
return _expiration;
|
||||
return _daf.getDate();
|
||||
}
|
||||
|
||||
/**
|
||||
* Use this instead of getExpiration().getTime()
|
||||
* @since 0.8.4
|
||||
*/
|
||||
public long getExpirationTime() {
|
||||
return _daf.getTime();
|
||||
}
|
||||
|
||||
public void setExpiration(Date d) {
|
||||
_expiration = d;
|
||||
_daf.setDate(d);
|
||||
}
|
||||
|
||||
/**
|
||||
* @since 0.8.4
|
||||
*/
|
||||
public void setExpiration(long d) {
|
||||
_daf.setDate(d);
|
||||
}
|
||||
|
||||
/**
|
||||
* @since 0.8.4
|
||||
*/
|
||||
public int getFlags() {
|
||||
return _daf.getFlags();
|
||||
}
|
||||
|
||||
/**
|
||||
* @since 0.8.4
|
||||
*/
|
||||
public void setFlags(int f) {
|
||||
_daf.setFlags(f);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -54,7 +88,7 @@ public class SendMessageExpiresMessage extends SendMessageMessage {
|
||||
super.readMessage(in, length, type);
|
||||
|
||||
try {
|
||||
_expiration = DataHelper.readDate(in);
|
||||
_daf.readBytes(in);
|
||||
} catch (DataFormatException dfe) {
|
||||
throw new I2CPMessageException("Unable to load the message data", dfe);
|
||||
}
|
||||
@ -68,7 +102,7 @@ public class SendMessageExpiresMessage extends SendMessageMessage {
|
||||
*/
|
||||
@Override
|
||||
public void writeMessage(OutputStream out) throws I2CPMessageException, IOException {
|
||||
if ((getSessionId() == null) || (getDestination() == null) || (getPayload() == null) || (getNonce() <= 0) || (_expiration == null))
|
||||
if ((getSessionId() == null) || (getDestination() == null) || (getPayload() == null) || (getNonce() <= 0))
|
||||
throw new I2CPMessageException("Unable to write out the message as there is not enough data");
|
||||
int len = 2 + getDestination().size() + getPayload().getSize() + 4 + 4 + DataHelper.DATE_LENGTH;
|
||||
|
||||
@ -79,7 +113,7 @@ public class SendMessageExpiresMessage extends SendMessageMessage {
|
||||
getDestination().writeBytes(out);
|
||||
getPayload().writeBytes(out);
|
||||
DataHelper.writeLong(out, 4, getNonce());
|
||||
DataHelper.writeDate(out, _expiration);
|
||||
_daf.writeBytes(out);
|
||||
} catch (DataFormatException dfe) {
|
||||
throw new I2CPMessageException("Error writing the msg", dfe);
|
||||
}
|
||||
@ -96,7 +130,7 @@ public class SendMessageExpiresMessage extends SendMessageMessage {
|
||||
if ((object != null) && (object instanceof SendMessageExpiresMessage)) {
|
||||
SendMessageExpiresMessage msg = (SendMessageExpiresMessage) object;
|
||||
return super.equals(object)
|
||||
&& DataHelper.eq(getExpiration(), msg.getExpiration());
|
||||
&& _daf.equals(msg._daf);
|
||||
}
|
||||
|
||||
return false;
|
||||
|
@ -89,7 +89,7 @@ public class FrequencyStat {
|
||||
/** @since 0.8.2 */
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if ((obj == null) || (obj.getClass() != FrequencyStat.class)) return false;
|
||||
if ((obj == null) || !(obj instanceof FrequencyStat)) return false;
|
||||
return _statName.equals(((FrequencyStat)obj)._statName);
|
||||
}
|
||||
|
||||
|
@ -473,7 +473,7 @@ public class Rate {
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if ((obj == null) || (obj.getClass() != Rate.class)) return false;
|
||||
if ((obj == null) || !(obj instanceof Rate)) return false;
|
||||
if (obj == this) return true;
|
||||
Rate r = (Rate) obj;
|
||||
return _period == r.getPeriod() && _creationDate == r.getCreationDate() &&
|
||||
|
@ -108,7 +108,7 @@ public class RateStat {
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if ((obj == null) || (obj.getClass() != RateStat.class)) return false;
|
||||
if ((obj == null) || !(obj instanceof RateStat)) return false;
|
||||
RateStat rs = (RateStat) obj;
|
||||
if (DataHelper.eq(getGroupName(), rs.getGroupName()) && DataHelper.eq(getDescription(), rs.getDescription())
|
||||
&& DataHelper.eq(getName(), rs.getName())) {
|
||||
|
@ -1,8 +1,8 @@
|
||||
package net.i2p.util;
|
||||
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.CopyOnWriteArraySet;
|
||||
|
||||
import net.i2p.I2PAppContext;
|
||||
import net.i2p.time.Timestamper;
|
||||
@ -19,19 +19,19 @@ import net.i2p.time.Timestamper;
|
||||
*
|
||||
*/
|
||||
public class Clock implements Timestamper.UpdateListener {
|
||||
protected I2PAppContext _context;
|
||||
private Timestamper _timestamper;
|
||||
protected long _startedOn;
|
||||
protected final I2PAppContext _context;
|
||||
private final Timestamper _timestamper;
|
||||
protected final long _startedOn;
|
||||
protected boolean _statCreated;
|
||||
protected volatile long _offset;
|
||||
protected boolean _alreadyChanged;
|
||||
private final Set _listeners;
|
||||
|
||||
public Clock(I2PAppContext context) {
|
||||
_context = context;
|
||||
_offset = 0;
|
||||
_alreadyChanged = false;
|
||||
_listeners = new HashSet(1);
|
||||
_listeners = new CopyOnWriteArraySet();
|
||||
_timestamper = new Timestamper(context, this);
|
||||
_startedOn = System.currentTimeMillis();
|
||||
_statCreated = false;
|
||||
}
|
||||
public static Clock getInstance() {
|
||||
return I2PAppContext.getGlobalContext().clock();
|
||||
@ -41,10 +41,6 @@ public class Clock implements Timestamper.UpdateListener {
|
||||
|
||||
/** we fetch it on demand to avoid circular dependencies (logging uses the clock) */
|
||||
protected Log getLog() { return _context.logManager().getLog(Clock.class); }
|
||||
|
||||
protected volatile long _offset;
|
||||
protected boolean _alreadyChanged;
|
||||
private final Set _listeners;
|
||||
|
||||
/** if the clock is skewed by 3+ days, fuck 'em */
|
||||
public final static long MAX_OFFSET = 3 * 24 * 60 * 60 * 1000;
|
||||
@ -136,24 +132,18 @@ public class Clock implements Timestamper.UpdateListener {
|
||||
}
|
||||
|
||||
public void addUpdateListener(ClockUpdateListener lsnr) {
|
||||
synchronized (_listeners) {
|
||||
_listeners.add(lsnr);
|
||||
}
|
||||
}
|
||||
|
||||
public void removeUpdateListener(ClockUpdateListener lsnr) {
|
||||
synchronized (_listeners) {
|
||||
_listeners.remove(lsnr);
|
||||
}
|
||||
}
|
||||
|
||||
protected void fireOffsetChanged(long delta) {
|
||||
synchronized (_listeners) {
|
||||
for (Iterator iter = _listeners.iterator(); iter.hasNext();) {
|
||||
ClockUpdateListener lsnr = (ClockUpdateListener) iter.next();
|
||||
lsnr.offsetChanged(delta);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static interface ClockUpdateListener {
|
||||
|
@ -14,7 +14,7 @@ import java.util.concurrent.ConcurrentHashMap;
|
||||
*/
|
||||
public class ConcurrentHashSet<E> extends AbstractSet<E> implements Set<E> {
|
||||
private static final Object DUMMY = new Object();
|
||||
private Map<E, Object> _map;
|
||||
private final Map<E, Object> _map;
|
||||
|
||||
public ConcurrentHashSet() {
|
||||
_map = new ConcurrentHashMap();
|
||||
|
@ -20,28 +20,34 @@ import org.xlattice.crypto.filters.BloomSHA1;
|
||||
* Further analysis and tweaking for the tunnel IVV may be required.
|
||||
*/
|
||||
public class DecayingBloomFilter {
|
||||
private I2PAppContext _context;
|
||||
private Log _log;
|
||||
protected final I2PAppContext _context;
|
||||
protected final Log _log;
|
||||
private BloomSHA1 _current;
|
||||
private BloomSHA1 _previous;
|
||||
private int _durationMs;
|
||||
private int _entryBytes;
|
||||
protected final int _durationMs;
|
||||
protected final int _entryBytes;
|
||||
private byte _extenders[][];
|
||||
private byte _extended[];
|
||||
private byte _longToEntry[];
|
||||
private long _longToEntryMask;
|
||||
protected long _currentDuplicates;
|
||||
private boolean _keepDecaying;
|
||||
private DecayEvent _decayEvent;
|
||||
protected volatile boolean _keepDecaying;
|
||||
protected SimpleTimer.TimedEvent _decayEvent;
|
||||
/** just for logging */
|
||||
private String _name;
|
||||
protected final String _name;
|
||||
|
||||
private static final int DEFAULT_M = 23;
|
||||
private static final int DEFAULT_K = 11;
|
||||
private static final boolean ALWAYS_MISS = false;
|
||||
|
||||
/** noop for DHS */
|
||||
public DecayingBloomFilter() {}
|
||||
/** only for extension by DHS */
|
||||
protected DecayingBloomFilter(int durationMs, int entryBytes, String name, I2PAppContext context) {
|
||||
_context = context;
|
||||
_log = context.logManager().getLog(getClass());
|
||||
_entryBytes = entryBytes;
|
||||
_name = name;
|
||||
_durationMs = durationMs;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a bloom filter that will decay its entries over time.
|
||||
@ -87,7 +93,6 @@ public class DecayingBloomFilter {
|
||||
_longToEntry = new byte[_entryBytes];
|
||||
_longToEntryMask = (1l << (_entryBytes * 8l)) -1;
|
||||
}
|
||||
_currentDuplicates = 0;
|
||||
_decayEvent = new DecayEvent();
|
||||
_keepDecaying = true;
|
||||
SimpleTimer.getInstance().addEvent(_decayEvent, _durationMs);
|
||||
@ -105,11 +110,13 @@ public class DecayingBloomFilter {
|
||||
}
|
||||
|
||||
public long getCurrentDuplicateCount() { return _currentDuplicates; }
|
||||
|
||||
public int getInsertedCount() {
|
||||
synchronized (this) {
|
||||
return _current.size() + _previous.size();
|
||||
}
|
||||
}
|
||||
|
||||
public double getFalsePositiveRate() {
|
||||
synchronized (this) {
|
||||
return _current.falsePositives();
|
||||
@ -117,12 +124,15 @@ public class DecayingBloomFilter {
|
||||
}
|
||||
|
||||
/**
|
||||
* return true if the entry added is a duplicate
|
||||
*
|
||||
* @return true if the entry added is a duplicate
|
||||
*/
|
||||
public boolean add(byte entry[]) {
|
||||
return add(entry, 0, entry.length);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if the entry added is a duplicate
|
||||
*/
|
||||
public boolean add(byte entry[], int off, int len) {
|
||||
if (ALWAYS_MISS) return false;
|
||||
if (entry == null)
|
||||
@ -131,55 +141,52 @@ public class DecayingBloomFilter {
|
||||
throw new IllegalArgumentException("Bad entry [" + len + ", expected "
|
||||
+ _entryBytes + "]");
|
||||
synchronized (this) {
|
||||
return locked_add(entry, off, len);
|
||||
return locked_add(entry, off, len, true);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* return true if the entry added is a duplicate. the number of low order
|
||||
* @return true if the entry added is a duplicate. the number of low order
|
||||
* bits used is determined by the entryBytes parameter used on creation of the
|
||||
* filter.
|
||||
*
|
||||
*/
|
||||
public boolean add(long entry) {
|
||||
if (ALWAYS_MISS) return false;
|
||||
if (_entryBytes <= 7)
|
||||
entry = ((entry ^ _longToEntryMask) & ((1 << 31)-1)) | (entry ^ _longToEntryMask);
|
||||
//entry &= _longToEntryMask;
|
||||
if (entry < 0) {
|
||||
DataHelper.toLong(_longToEntry, 0, _entryBytes, 0-entry);
|
||||
_longToEntry[0] |= (1 << 7);
|
||||
} else {
|
||||
DataHelper.toLong(_longToEntry, 0, _entryBytes, entry);
|
||||
}
|
||||
synchronized (this) {
|
||||
if (_entryBytes <= 7)
|
||||
entry = ((entry ^ _longToEntryMask) & ((1 << 31)-1)) | (entry ^ _longToEntryMask);
|
||||
//entry &= _longToEntryMask;
|
||||
if (entry < 0) {
|
||||
DataHelper.toLong(_longToEntry, 0, _entryBytes, 0-entry);
|
||||
_longToEntry[0] |= (1 << 7);
|
||||
} else {
|
||||
DataHelper.toLong(_longToEntry, 0, _entryBytes, entry);
|
||||
}
|
||||
return locked_add(_longToEntry, 0, _longToEntry.length);
|
||||
return locked_add(_longToEntry, 0, _longToEntry.length, true);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* return true if the entry is already known. this does NOT add the
|
||||
* @return true if the entry is already known. this does NOT add the
|
||||
* entry however.
|
||||
*
|
||||
*/
|
||||
public boolean isKnown(long entry) {
|
||||
if (ALWAYS_MISS) return false;
|
||||
if (_entryBytes <= 7)
|
||||
entry = ((entry ^ _longToEntryMask) & ((1 << 31)-1)) | (entry ^ _longToEntryMask);
|
||||
if (entry < 0) {
|
||||
DataHelper.toLong(_longToEntry, 0, _entryBytes, 0-entry);
|
||||
_longToEntry[0] |= (1 << 7);
|
||||
} else {
|
||||
DataHelper.toLong(_longToEntry, 0, _entryBytes, entry);
|
||||
}
|
||||
synchronized (this) {
|
||||
if (_entryBytes <= 7)
|
||||
entry = ((entry ^ _longToEntryMask) & ((1 << 31)-1)) | (entry ^ _longToEntryMask);
|
||||
if (entry < 0) {
|
||||
DataHelper.toLong(_longToEntry, 0, _entryBytes, 0-entry);
|
||||
_longToEntry[0] |= (1 << 7);
|
||||
} else {
|
||||
DataHelper.toLong(_longToEntry, 0, _entryBytes, entry);
|
||||
}
|
||||
return locked_add(_longToEntry, 0, _longToEntry.length, false);
|
||||
}
|
||||
}
|
||||
|
||||
private boolean locked_add(byte entry[], int offset, int len) {
|
||||
return locked_add(entry, offset, len, true);
|
||||
}
|
||||
private boolean locked_add(byte entry[], int offset, int len, boolean addIfNew) {
|
||||
if (_extended != null) {
|
||||
// extend the entry to 32 bytes
|
||||
@ -195,7 +202,6 @@ public class DecayingBloomFilter {
|
||||
} else {
|
||||
if (addIfNew) {
|
||||
_current.locked_insert(_extended);
|
||||
_previous.locked_insert(_extended);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
@ -208,7 +214,6 @@ public class DecayingBloomFilter {
|
||||
} else {
|
||||
if (addIfNew) {
|
||||
_current.locked_insert(entry, offset, len);
|
||||
_previous.locked_insert(entry, offset, len);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
@ -17,12 +17,15 @@ import net.i2p.data.DataHelper;
|
||||
*
|
||||
* ./router/java/src/net/i2p/router/tunnel/BuildMessageProcessor.java:
|
||||
* 32 bytes, peak 10 entries in 1m
|
||||
* (320 peak entries seen on fast router)
|
||||
*
|
||||
* ./router/java/src/net/i2p/router/transport/udp/InboundMessageFragments.java:
|
||||
* 4 bytes, peak 150 entries in 10s
|
||||
* (1600 peak entries seen on fast router)
|
||||
*
|
||||
* ./router/java/src/net/i2p/router/MessageValidator.java:
|
||||
* 8 bytes, peak 1K entries in 2m
|
||||
* (36K peak entries seen on fast router)
|
||||
*
|
||||
* ./router/java/src/net/i2p/router/tunnel/BloomFilterIVValidator.java:
|
||||
* 16 bytes, peak 15K entries in 10m
|
||||
@ -57,19 +60,10 @@ import net.i2p.data.DataHelper;
|
||||
* @author zzz
|
||||
*/
|
||||
public class DecayingHashSet extends DecayingBloomFilter {
|
||||
private final I2PAppContext _context;
|
||||
private final Log _log;
|
||||
private ConcurrentHashSet<ArrayWrapper> _current;
|
||||
private ConcurrentHashSet<ArrayWrapper> _previous;
|
||||
private int _durationMs;
|
||||
private int _entryBytes;
|
||||
private volatile boolean _keepDecaying;
|
||||
private final DecayEvent _decayEvent;
|
||||
/** just for logging */
|
||||
private final String _name;
|
||||
/** synchronize against this lock when switching double buffers */
|
||||
private final ReentrantReadWriteLock _reorganizeLock = new ReentrantReadWriteLock(true);
|
||||
|
||||
|
||||
/**
|
||||
* Create a double-buffered hash set that will decay its entries over time.
|
||||
@ -83,16 +77,11 @@ public class DecayingHashSet extends DecayingBloomFilter {
|
||||
|
||||
/** @param name just for logging / debugging / stats */
|
||||
public DecayingHashSet(I2PAppContext context, int durationMs, int entryBytes, String name) {
|
||||
super(durationMs, entryBytes, name, context);
|
||||
if (entryBytes <= 0 || entryBytes > 32)
|
||||
throw new IllegalArgumentException("Bad size");
|
||||
_context = context;
|
||||
_log = context.logManager().getLog(DecayingHashSet.class);
|
||||
_entryBytes = entryBytes;
|
||||
_name = name;
|
||||
_current = new ConcurrentHashSet(128);
|
||||
_previous = new ConcurrentHashSet(128);
|
||||
_durationMs = durationMs;
|
||||
_currentDuplicates = 0;
|
||||
_decayEvent = new DecayEvent();
|
||||
_keepDecaying = true;
|
||||
SimpleScheduler.getInstance().addEvent(_decayEvent, _durationMs);
|
||||
@ -111,6 +100,7 @@ public class DecayingHashSet extends DecayingBloomFilter {
|
||||
public int getInsertedCount() {
|
||||
return _current.size() + _previous.size();
|
||||
}
|
||||
|
||||
/** pointless, only used for logging elsewhere */
|
||||
@Override
|
||||
public double getFalsePositiveRate() {
|
||||
@ -121,7 +111,6 @@ public class DecayingHashSet extends DecayingBloomFilter {
|
||||
|
||||
/**
|
||||
* @return true if the entry added is a duplicate
|
||||
*
|
||||
*/
|
||||
@Override
|
||||
public boolean add(byte entry[], int off, int len) {
|
||||
@ -130,9 +119,10 @@ public class DecayingHashSet extends DecayingBloomFilter {
|
||||
if (len != _entryBytes)
|
||||
throw new IllegalArgumentException("Bad entry [" + len + ", expected "
|
||||
+ _entryBytes + "]");
|
||||
ArrayWrapper w = new ArrayWrapper(entry, off, len);
|
||||
getReadLock();
|
||||
try {
|
||||
return locked_add(entry, off, len, true);
|
||||
return locked_add(w, true);
|
||||
} finally { releaseReadLock(); }
|
||||
}
|
||||
|
||||
@ -158,35 +148,30 @@ public class DecayingHashSet extends DecayingBloomFilter {
|
||||
}
|
||||
|
||||
private boolean add(long entry, boolean addIfNew) {
|
||||
int len = Math.min(8, _entryBytes);
|
||||
byte[] b = toLong(len, entry);
|
||||
ArrayWrapper w = new ArrayWrapper(entry);
|
||||
getReadLock();
|
||||
try {
|
||||
return locked_add(b, 0, len, addIfNew);
|
||||
return locked_add(w, addIfNew);
|
||||
} finally { releaseReadLock(); }
|
||||
}
|
||||
|
||||
/** from DataHelper, except negative values ok */
|
||||
private static byte[] toLong(int numBytes, long value) {
|
||||
byte target[] = new byte[numBytes];
|
||||
for (int i = 0; i < numBytes; i++)
|
||||
target[numBytes-i-1] = (byte)(value >>> (i*8));
|
||||
return target;
|
||||
}
|
||||
|
||||
/** so many questions... */
|
||||
private boolean locked_add(byte entry[], int offset, int len, boolean addIfNew) {
|
||||
ArrayWrapper w = new ArrayWrapper(entry, offset, len);
|
||||
boolean seen = _current.contains(w);
|
||||
seen = seen || _previous.contains(w);
|
||||
/**
|
||||
* @param addIfNew if true, add the element to current if it is not already there;
|
||||
* if false, only check
|
||||
* @return if the element is in either the current or previous set
|
||||
*/
|
||||
private boolean locked_add(ArrayWrapper w, boolean addIfNew) {
|
||||
boolean seen;
|
||||
// only access _current once. This adds to _current even if seen in _previous.
|
||||
if (addIfNew)
|
||||
seen = !_current.add(w);
|
||||
else
|
||||
seen = _current.contains(w);
|
||||
if (!seen)
|
||||
seen = _previous.contains(w);
|
||||
if (seen) {
|
||||
// why increment if addIfNew == false?
|
||||
// why not add to current if only in previous?
|
||||
// why increment if addIfNew == false? Only used for stats...
|
||||
_currentDuplicates++;
|
||||
} else if (addIfNew) {
|
||||
_current.add(w);
|
||||
// why add to previous?
|
||||
_previous.add(w);
|
||||
}
|
||||
return seen;
|
||||
}
|
||||
@ -270,14 +255,22 @@ public class DecayingHashSet extends DecayingBloomFilter {
|
||||
* the maximum entropy given the length of the data.
|
||||
*/
|
||||
private static class ArrayWrapper {
|
||||
private long _longhashcode;
|
||||
private final long _longhashcode;
|
||||
|
||||
public ArrayWrapper(byte[] b, int offset, int len) {
|
||||
int idx = offset;
|
||||
int shift = Math.min(8, 64 / len);
|
||||
long lhc = 0;
|
||||
for (int i = 0; i < len; i++) {
|
||||
// xor better than + in tests
|
||||
_longhashcode ^= (((long) b[idx++]) << (i * shift));
|
||||
lhc ^= (((long) b[idx++]) << (i * shift));
|
||||
}
|
||||
_longhashcode = lhc;
|
||||
}
|
||||
|
||||
/** faster version for when storing <= 8 bytes */
|
||||
public ArrayWrapper(long b) {
|
||||
_longhashcode = b;
|
||||
}
|
||||
|
||||
public int hashCode() {
|
||||
|
@ -430,29 +430,33 @@ public class EepGet {
|
||||
_log.debug("Fetching (proxied? " + _shouldProxy + ") url=" + _actualURL);
|
||||
while (_keepFetching) {
|
||||
SocketTimeout timeout = null;
|
||||
if (_fetchHeaderTimeout > 0)
|
||||
if (_fetchHeaderTimeout > 0) {
|
||||
timeout = new SocketTimeout(_fetchHeaderTimeout);
|
||||
final SocketTimeout stimeout = timeout; // ugly - why not use sotimeout?
|
||||
timeout.setTimeoutCommand(new Runnable() {
|
||||
public void run() {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("timeout reached on " + _url + ": " + stimeout);
|
||||
_aborted = true;
|
||||
}
|
||||
});
|
||||
timeout.setTotalTimeoutPeriod(_fetchEndTime);
|
||||
final SocketTimeout stimeout = timeout; // ugly - why not use sotimeout?
|
||||
timeout.setTimeoutCommand(new Runnable() {
|
||||
public void run() {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("timeout reached on " + _url + ": " + stimeout);
|
||||
_aborted = true;
|
||||
}
|
||||
});
|
||||
timeout.setTotalTimeoutPeriod(_fetchEndTime);
|
||||
}
|
||||
try {
|
||||
for (int i = 0; i < _listeners.size(); i++)
|
||||
_listeners.get(i).attempting(_url);
|
||||
sendRequest(timeout);
|
||||
timeout.resetTimer();
|
||||
if (timeout != null)
|
||||
timeout.resetTimer();
|
||||
doFetch(timeout);
|
||||
timeout.cancel();
|
||||
if (timeout != null)
|
||||
timeout.cancel();
|
||||
if (!_transferFailed)
|
||||
return true;
|
||||
break;
|
||||
} catch (IOException ioe) {
|
||||
timeout.cancel();
|
||||
if (timeout != null)
|
||||
timeout.cancel();
|
||||
for (int i = 0; i < _listeners.size(); i++)
|
||||
_listeners.get(i).attemptFailed(_url, _bytesTransferred, _bytesRemaining, _currentAttempt, _numRetries, ioe);
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
@ -492,7 +496,10 @@ public class EepGet {
|
||||
return false;
|
||||
}
|
||||
|
||||
/** single fetch */
|
||||
/**
|
||||
* single fetch
|
||||
* @param timeout may be null
|
||||
*/
|
||||
protected void doFetch(SocketTimeout timeout) throws IOException {
|
||||
_headersRead = false;
|
||||
_aborted = false;
|
||||
@ -504,11 +511,13 @@ public class EepGet {
|
||||
if (_aborted)
|
||||
throw new IOException("Timed out reading the HTTP headers");
|
||||
|
||||
timeout.resetTimer();
|
||||
if (_fetchInactivityTimeout > 0)
|
||||
timeout.setInactivityTimeout(_fetchInactivityTimeout);
|
||||
else
|
||||
timeout.setInactivityTimeout(INACTIVITY_TIMEOUT);
|
||||
if (timeout != null) {
|
||||
timeout.resetTimer();
|
||||
if (_fetchInactivityTimeout > 0)
|
||||
timeout.setInactivityTimeout(_fetchInactivityTimeout);
|
||||
else
|
||||
timeout.setInactivityTimeout(INACTIVITY_TIMEOUT);
|
||||
}
|
||||
|
||||
if (_redirectLocation != null) {
|
||||
//try {
|
||||
@ -571,7 +580,8 @@ public class EepGet {
|
||||
int read = _proxyIn.read(buf, 0, toRead);
|
||||
if (read == -1)
|
||||
break;
|
||||
timeout.resetTimer();
|
||||
if (timeout != null)
|
||||
timeout.resetTimer();
|
||||
_out.write(buf, 0, read);
|
||||
_bytesTransferred += read;
|
||||
if ((_maxSize > -1) && (_alreadyTransferred + read > _maxSize)) // could transfer a little over maxSize
|
||||
@ -597,7 +607,8 @@ public class EepGet {
|
||||
read++;
|
||||
}
|
||||
}
|
||||
timeout.resetTimer();
|
||||
if (timeout != null)
|
||||
timeout.resetTimer();
|
||||
if (_bytesRemaining >= read) // else chunked?
|
||||
_bytesRemaining -= read;
|
||||
if (read > 0) {
|
||||
@ -622,7 +633,8 @@ public class EepGet {
|
||||
if (_aborted)
|
||||
throw new IOException("Timed out reading the HTTP data");
|
||||
|
||||
timeout.cancel();
|
||||
if (timeout != null)
|
||||
timeout.cancel();
|
||||
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Done transferring " + _bytesTransferred + " (ok? " + !_transferFailed + ")");
|
||||
@ -867,6 +879,9 @@ public class EepGet {
|
||||
private static final byte NL = '\n';
|
||||
private static boolean isNL(byte b) { return (b == NL); }
|
||||
|
||||
/**
|
||||
* @param timeout may be null
|
||||
*/
|
||||
protected void sendRequest(SocketTimeout timeout) throws IOException {
|
||||
if (_outputStream != null) {
|
||||
// We are reading into a stream supplied by a caller,
|
||||
@ -907,7 +922,8 @@ public class EepGet {
|
||||
_proxyIn = _proxy.getInputStream();
|
||||
_proxyOut = _proxy.getOutputStream();
|
||||
|
||||
timeout.setSocket(_proxy);
|
||||
if (timeout != null)
|
||||
timeout.setSocket(_proxy);
|
||||
|
||||
_proxyOut.write(DataHelper.getUTF8(req));
|
||||
_proxyOut.flush();
|
||||
|
@ -31,6 +31,7 @@ public class EepPost {
|
||||
_log = ctx.logManager().getLog(EepPost.class);
|
||||
}
|
||||
|
||||
/*****
|
||||
public static void main(String args[]) {
|
||||
EepPost e = new EepPost();
|
||||
Map fields = new HashMap();
|
||||
@ -47,6 +48,8 @@ public class EepPost {
|
||||
//e.postFiles("http://localhost/cgi-bin/read.pl", null, -1, fields, null);
|
||||
//e.postFiles("http://localhost:2001/import.jsp", null, -1, fields, null);
|
||||
}
|
||||
*****/
|
||||
|
||||
/**
|
||||
* Submit an HTTP POST to the given URL (using the proxy if specified),
|
||||
* uploading the given fields. If the field's value is a File object, then
|
||||
@ -117,7 +120,7 @@ public class EepPost {
|
||||
}
|
||||
}
|
||||
out.close();
|
||||
} catch (Exception e) {
|
||||
} catch (IOException e) {
|
||||
e.printStackTrace();
|
||||
} finally {
|
||||
if (s != null) try { s.close(); } catch (IOException ioe) {}
|
||||
|
@ -122,24 +122,24 @@ public class FileUtil {
|
||||
}
|
||||
}
|
||||
} else {
|
||||
InputStream in = null;
|
||||
FileOutputStream fos = null;
|
||||
JarOutputStream jos = null;
|
||||
try {
|
||||
InputStream in = zip.getInputStream(entry);
|
||||
in = zip.getInputStream(entry);
|
||||
if (entry.getName().endsWith(".jar.pack") || entry.getName().endsWith(".war.pack")) {
|
||||
target = new File(targetDir, entry.getName().substring(0, entry.getName().length() - ".pack".length()));
|
||||
JarOutputStream fos = new JarOutputStream(new FileOutputStream(target));
|
||||
unpack(in, fos);
|
||||
fos.close();
|
||||
jos = new JarOutputStream(new FileOutputStream(target));
|
||||
unpack(in, jos);
|
||||
System.err.println("INFO: File [" + entry.getName() + "] extracted and unpacked");
|
||||
} else {
|
||||
FileOutputStream fos = new FileOutputStream(target);
|
||||
fos = new FileOutputStream(target);
|
||||
int read = 0;
|
||||
while ( (read = in.read(buf)) != -1) {
|
||||
fos.write(buf, 0, read);
|
||||
}
|
||||
fos.close();
|
||||
System.err.println("INFO: File [" + entry.getName() + "] extracted");
|
||||
}
|
||||
in.close();
|
||||
} catch (IOException ioe) {
|
||||
System.err.println("ERROR: Error extracting the zip entry (" + entry.getName() + ')');
|
||||
if (ioe.getMessage() != null && ioe.getMessage().indexOf("CAFED00D") >= 0)
|
||||
@ -151,6 +151,10 @@ public class FileUtil {
|
||||
System.err.println("ERROR: Error unpacking the zip entry (" + entry.getName() +
|
||||
"), your JVM does not support unpack200");
|
||||
return false;
|
||||
} finally {
|
||||
try { if (in != null) in.close(); } catch (IOException ioe) {}
|
||||
try { if (fos != null) fos.close(); } catch (IOException ioe) {}
|
||||
try { if (jos != null) jos.close(); } catch (IOException ioe) {}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -401,21 +405,24 @@ public class FileUtil {
|
||||
if (dst.exists() && !overwriteExisting) return false;
|
||||
|
||||
byte buf[] = new byte[4096];
|
||||
InputStream in = null;
|
||||
OutputStream out = null;
|
||||
try {
|
||||
FileInputStream in = new FileInputStream(src);
|
||||
FileOutputStream out = new FileOutputStream(dst);
|
||||
in = new FileInputStream(src);
|
||||
out = new FileOutputStream(dst);
|
||||
|
||||
int read = 0;
|
||||
while ( (read = in.read(buf)) != -1)
|
||||
out.write(buf, 0, read);
|
||||
|
||||
in.close();
|
||||
out.close();
|
||||
return true;
|
||||
} catch (IOException ioe) {
|
||||
if (!quiet)
|
||||
ioe.printStackTrace();
|
||||
return false;
|
||||
} finally {
|
||||
try { if (in != null) in.close(); } catch (IOException ioe) {}
|
||||
try { if (out != null) out.close(); } catch (IOException ioe) {}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -10,9 +10,9 @@ package net.i2p.util;
|
||||
*/
|
||||
|
||||
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.CopyOnWriteArraySet;
|
||||
|
||||
/**
|
||||
* Like I2PThread but with per-thread OOM listeners,
|
||||
@ -22,7 +22,7 @@ import java.util.Set;
|
||||
*/
|
||||
public class I2PAppThread extends I2PThread {
|
||||
|
||||
private Set _threadListeners = new HashSet(0);
|
||||
private final Set _threadListeners = new CopyOnWriteArraySet();
|
||||
|
||||
public I2PAppThread() {
|
||||
super();
|
||||
|
@ -10,9 +10,9 @@ package net.i2p.util;
|
||||
*/
|
||||
|
||||
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.CopyOnWriteArraySet;
|
||||
|
||||
/**
|
||||
* In case its useful later...
|
||||
@ -21,7 +21,7 @@ import java.util.Set;
|
||||
*/
|
||||
public class I2PThread extends Thread {
|
||||
private static volatile Log _log;
|
||||
private static Set _listeners = new HashSet(4);
|
||||
private static final Set _listeners = new CopyOnWriteArraySet();
|
||||
private String _name;
|
||||
private Exception _createdBy;
|
||||
|
||||
|
@ -205,7 +205,8 @@ public class Log {
|
||||
}
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null) throw new NullPointerException("Null object scope?");
|
||||
if (obj == null)
|
||||
return false;
|
||||
if (obj instanceof LogScope) {
|
||||
LogScope s = (LogScope)obj;
|
||||
return s._scopeCache.equals(_scopeCache);
|
||||
|
@ -166,8 +166,10 @@ public class LogManager {
|
||||
Log rv = _logs.get(scope);
|
||||
if (rv == null) {
|
||||
rv = new Log(this, cls, name);
|
||||
_logs.putIfAbsent(scope, rv);
|
||||
isNew = true;
|
||||
Log old = _logs.putIfAbsent(scope, rv);
|
||||
isNew = old == null;
|
||||
if (!isNew)
|
||||
rv = old;
|
||||
}
|
||||
if (isNew)
|
||||
updateLimit(rv);
|
||||
@ -180,8 +182,9 @@ public class LogManager {
|
||||
}
|
||||
|
||||
void addLog(Log log) {
|
||||
_logs.putIfAbsent(log.getScope(), log);
|
||||
updateLimit(log);
|
||||
Log old = _logs.putIfAbsent(log.getScope(), log);
|
||||
if (old == null)
|
||||
updateLimit(log);
|
||||
}
|
||||
|
||||
public LogConsoleBuffer getBuffer() { return _consoleBuffer; }
|
||||
@ -636,6 +639,7 @@ public class LogManager {
|
||||
return _dateFormatPattern;
|
||||
}
|
||||
|
||||
/*****
|
||||
public static void main(String args[]) {
|
||||
I2PAppContext ctx = new I2PAppContext();
|
||||
Log l1 = ctx.logManager().getLog("test.1");
|
||||
@ -656,6 +660,7 @@ public class LogManager {
|
||||
}
|
||||
System.exit(0);
|
||||
}
|
||||
*****/
|
||||
|
||||
public void shutdown() {
|
||||
if (_writer != null) {
|
||||
|
@ -92,10 +92,13 @@ class LogRecordFormatter {
|
||||
}
|
||||
|
||||
/** don't translate */
|
||||
/****
|
||||
private static String getPriority(LogRecord rec) {
|
||||
return toString(Log.toLevelString(rec.getPriority()), MAX_PRIORITY_LENGTH);
|
||||
}
|
||||
****/
|
||||
|
||||
/** */
|
||||
private static final String BUNDLE_NAME = "net.i2p.router.web.messages";
|
||||
|
||||
/** translate @since 0.7.14 */
|
||||
|
@ -78,6 +78,7 @@ public class LookaheadInputStream extends FilterInputStream {
|
||||
/** grab the lookahead footer */
|
||||
public byte[] getFooter() { return _footerLookahead; }
|
||||
|
||||
/*******
|
||||
public static void main(String args[]) {
|
||||
byte buf[] = new byte[32];
|
||||
for (int i = 0; i < 32; i++)
|
||||
@ -128,4 +129,5 @@ public class LookaheadInputStream extends FilterInputStream {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
******/
|
||||
}
|
||||
|
@ -482,12 +482,14 @@ public class SSLEepGet extends EepGet {
|
||||
if (_aborted)
|
||||
throw new IOException("Timed out reading the HTTP headers");
|
||||
|
||||
timeout.resetTimer();
|
||||
if (_fetchInactivityTimeout > 0)
|
||||
timeout.setInactivityTimeout(_fetchInactivityTimeout);
|
||||
else
|
||||
timeout.setInactivityTimeout(60*1000);
|
||||
|
||||
if (timeout != null) {
|
||||
timeout.resetTimer();
|
||||
if (_fetchInactivityTimeout > 0)
|
||||
timeout.setInactivityTimeout(_fetchInactivityTimeout);
|
||||
else
|
||||
timeout.setInactivityTimeout(60*1000);
|
||||
}
|
||||
|
||||
if (_redirectLocation != null) {
|
||||
throw new IOException("Server redirect to " + _redirectLocation + " not allowed");
|
||||
}
|
||||
@ -506,7 +508,8 @@ public class SSLEepGet extends EepGet {
|
||||
int read = _proxyIn.read(buf, 0, toRead);
|
||||
if (read == -1)
|
||||
break;
|
||||
timeout.resetTimer();
|
||||
if (timeout != null)
|
||||
timeout.resetTimer();
|
||||
_out.write(buf, 0, read);
|
||||
_bytesTransferred += read;
|
||||
|
||||
@ -531,7 +534,8 @@ public class SSLEepGet extends EepGet {
|
||||
read++;
|
||||
}
|
||||
}
|
||||
timeout.resetTimer();
|
||||
if (timeout != null)
|
||||
timeout.resetTimer();
|
||||
if (_bytesRemaining >= read) // else chunked?
|
||||
_bytesRemaining -= read;
|
||||
if (read > 0) {
|
||||
@ -556,7 +560,8 @@ public class SSLEepGet extends EepGet {
|
||||
if (_aborted)
|
||||
throw new IOException("Timed out reading the HTTP data");
|
||||
|
||||
timeout.cancel();
|
||||
if (timeout != null)
|
||||
timeout.cancel();
|
||||
|
||||
if (_transferFailed) {
|
||||
// 404, etc - transferFailed is called after all attempts fail, by fetch() above
|
||||
|
@ -89,7 +89,7 @@ public class ShellCommand {
|
||||
*
|
||||
* @author hypercubus
|
||||
*/
|
||||
private class StreamConsumer extends Thread {
|
||||
private static class StreamConsumer extends Thread {
|
||||
|
||||
private BufferedReader bufferedReader;
|
||||
private InputStreamReader inputStreamReader;
|
||||
@ -123,7 +123,7 @@ public class ShellCommand {
|
||||
*
|
||||
* @author hypercubus
|
||||
*/
|
||||
private class StreamReader extends Thread {
|
||||
private static class StreamReader extends Thread {
|
||||
|
||||
private BufferedReader bufferedReader;
|
||||
private InputStreamReader inputStreamReader;
|
||||
@ -159,7 +159,7 @@ public class ShellCommand {
|
||||
*
|
||||
* @author hypercubus
|
||||
*/
|
||||
private class StreamWriter extends Thread {
|
||||
private static class StreamWriter extends Thread {
|
||||
|
||||
private BufferedWriter bufferedWriter;
|
||||
private BufferedReader in;
|
||||
@ -183,7 +183,7 @@ public class ShellCommand {
|
||||
bufferedWriter.write(input, 0, input.length());
|
||||
bufferedWriter.flush();
|
||||
}
|
||||
} catch (Exception e) {
|
||||
} catch (IOException e) {
|
||||
try {
|
||||
bufferedWriter.flush();
|
||||
} catch (IOException e1) {
|
||||
|
@ -90,7 +90,7 @@ public class SimpleTimer {
|
||||
int totalEvents = 0;
|
||||
long now = System.currentTimeMillis();
|
||||
long eventTime = now + timeoutMs;
|
||||
Long time = new Long(eventTime);
|
||||
Long time = Long.valueOf(eventTime);
|
||||
synchronized (_events) {
|
||||
// remove the old scheduled position, then reinsert it
|
||||
Long oldTime = (Long)_eventTimes.get(event);
|
||||
|
@ -55,7 +55,7 @@ public class SimpleTimer2 {
|
||||
_executor.shutdownNow();
|
||||
}
|
||||
|
||||
private class CustomScheduledThreadPoolExecutor extends ScheduledThreadPoolExecutor {
|
||||
private static class CustomScheduledThreadPoolExecutor extends ScheduledThreadPoolExecutor {
|
||||
public CustomScheduledThreadPoolExecutor(int threads, ThreadFactory factory) {
|
||||
super(threads, factory);
|
||||
}
|
||||
|
@ -12,7 +12,9 @@
|
||||
<!-- been patched to allow IPv6 addresses as well, -->
|
||||
<!-- enclosed in brackets e.g. [::1] -->
|
||||
<!-- * port: Default 7658 in the addListener section -->
|
||||
<!-- * threads: Raise MaxThreads in the addListener section -->
|
||||
<!-- * docroot: Change the ResourceBase in the addContext section -->
|
||||
<!-- to serve files from a different location. -->
|
||||
<!-- * threads: Raise MinThreads and/or MaxThreads in the addListener section -->
|
||||
<!-- if you have a high-traffic site and get a lot of warnings. -->
|
||||
<!-- * Uncomment the addWebApplications section to use to enable -->
|
||||
<!-- war files placed in the webapps/ dir. -->
|
||||
@ -23,7 +25,7 @@
|
||||
<!-- found in Jetty 5, you may install and run Jetty 6 in a different JVM, -->
|
||||
<!-- or run any other web server such as Apache. If you do run another -->
|
||||
<!-- web server instead, be sure and disable the Jetty 5 server for your -->
|
||||
<!-- eepsite on http://127.0.0.1/configclients.jsp . -->
|
||||
<!-- eepsite on http://127.0.0.1:7657/configclients.jsp . -->
|
||||
<!-- -->
|
||||
<!-- Jetty errors and warnings will appear in wrapper.log, check there -->
|
||||
<!-- to diagnose problems. -->
|
||||
@ -57,13 +59,12 @@
|
||||
<Set name="port">7658</Set>
|
||||
</New>
|
||||
</Arg>
|
||||
<Set name="MinThreads">3</Set>
|
||||
<Set name="MaxThreads">10</Set>
|
||||
<Set name="MinThreads">1</Set>
|
||||
<Set name="MaxThreads">16</Set>
|
||||
<Set name="MaxIdleTimeMs">60000</Set>
|
||||
<Set name="LowResourcePersistTimeMs">1000</Set>
|
||||
<Set name="ConfidentialPort">8443</Set>
|
||||
<Set name="IntegralPort">8443</Set>
|
||||
<Set name="PoolName">main</Set>
|
||||
</New>
|
||||
</Arg>
|
||||
</Call>
|
||||
|
@ -12,6 +12,7 @@ import java.io.ByteArrayInputStream;
|
||||
import java.io.IOException;
|
||||
|
||||
import net.i2p.I2PAppContext;
|
||||
import net.i2p.data.DatabaseEntry;
|
||||
import net.i2p.data.DataFormatException;
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.data.Hash;
|
||||
@ -28,59 +29,39 @@ import net.i2p.data.TunnelId;
|
||||
public class DatabaseStoreMessage extends I2NPMessageImpl {
|
||||
public final static int MESSAGE_TYPE = 1;
|
||||
private Hash _key;
|
||||
private int _type;
|
||||
private LeaseSet _leaseSet;
|
||||
private RouterInfo _info;
|
||||
private byte[] _leaseSetCache;
|
||||
private byte[] _routerInfoCache;
|
||||
private DatabaseEntry _dbEntry;
|
||||
private byte[] _byteCache;
|
||||
private long _replyToken;
|
||||
private TunnelId _replyTunnel;
|
||||
private Hash _replyGateway;
|
||||
|
||||
public final static int KEY_TYPE_ROUTERINFO = 0;
|
||||
public final static int KEY_TYPE_LEASESET = 1;
|
||||
|
||||
public DatabaseStoreMessage(I2PAppContext context) {
|
||||
super(context);
|
||||
setValueType(-1);
|
||||
}
|
||||
|
||||
/**
|
||||
* Defines the key in the network database being stored
|
||||
*
|
||||
*/
|
||||
public Hash getKey() { return _key; }
|
||||
public void setKey(Hash key) { _key = key; }
|
||||
|
||||
/**
|
||||
* Defines the router info value in the network database being stored
|
||||
*
|
||||
*/
|
||||
public RouterInfo getRouterInfo() { return _info; }
|
||||
public void setRouterInfo(RouterInfo routerInfo) {
|
||||
_info = routerInfo;
|
||||
if (_info != null)
|
||||
setValueType(KEY_TYPE_ROUTERINFO);
|
||||
public Hash getKey() {
|
||||
if (_key != null)
|
||||
return _key; // receive
|
||||
if (_dbEntry != null)
|
||||
return _dbEntry.getHash(); // create
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Defines the lease set value in the network database being stored
|
||||
*
|
||||
* Defines the entry in the network database being stored
|
||||
*/
|
||||
public LeaseSet getLeaseSet() { return _leaseSet; }
|
||||
public void setLeaseSet(LeaseSet leaseSet) {
|
||||
_leaseSet = leaseSet;
|
||||
if (_leaseSet != null)
|
||||
setValueType(KEY_TYPE_LEASESET);
|
||||
}
|
||||
|
||||
public DatabaseEntry getEntry() { return _dbEntry; }
|
||||
|
||||
/**
|
||||
* Defines type of key being stored in the network database -
|
||||
* either KEY_TYPE_ROUTERINFO or KEY_TYPE_LEASESET
|
||||
*
|
||||
* This also sets the key
|
||||
*/
|
||||
public int getValueType() { return _type; }
|
||||
public void setValueType(int type) { _type = type; }
|
||||
public void setEntry(DatabaseEntry entry) {
|
||||
_dbEntry = entry;
|
||||
}
|
||||
|
||||
/**
|
||||
* If a reply is desired, this token specifies the message ID that should
|
||||
@ -90,6 +71,7 @@ public class DatabaseStoreMessage extends I2NPMessageImpl {
|
||||
* @return positive reply token ID, or 0 if no reply is necessary.
|
||||
*/
|
||||
public long getReplyToken() { return _replyToken; }
|
||||
|
||||
/**
|
||||
* Update the reply token.
|
||||
*
|
||||
@ -113,13 +95,10 @@ public class DatabaseStoreMessage extends I2NPMessageImpl {
|
||||
if (type != MESSAGE_TYPE) throw new I2NPMessageException("Message type is incorrect for this message");
|
||||
int curIndex = offset;
|
||||
|
||||
//byte keyData[] = new byte[Hash.HASH_LENGTH];
|
||||
//System.arraycopy(data, curIndex, keyData, 0, Hash.HASH_LENGTH);
|
||||
_key = Hash.create(data, curIndex);
|
||||
curIndex += Hash.HASH_LENGTH;
|
||||
//_key = new Hash(keyData);
|
||||
|
||||
_type = (int)DataHelper.fromLong(data, curIndex, 1);
|
||||
type = (int)DataHelper.fromLong(data, curIndex, 1);
|
||||
curIndex++;
|
||||
|
||||
_replyToken = DataHelper.fromLong(data, curIndex, 4);
|
||||
@ -131,39 +110,38 @@ public class DatabaseStoreMessage extends I2NPMessageImpl {
|
||||
_replyTunnel = new TunnelId(tunnel);
|
||||
curIndex += 4;
|
||||
|
||||
//byte gw[] = new byte[Hash.HASH_LENGTH];
|
||||
//System.arraycopy(data, curIndex, gw, 0, Hash.HASH_LENGTH);
|
||||
_replyGateway = Hash.create(data, curIndex);
|
||||
curIndex += Hash.HASH_LENGTH;
|
||||
//_replyGateway = new Hash(gw);
|
||||
} else {
|
||||
_replyTunnel = null;
|
||||
_replyGateway = null;
|
||||
}
|
||||
|
||||
if (_type == KEY_TYPE_LEASESET) {
|
||||
_leaseSet = new LeaseSet();
|
||||
if (type == DatabaseEntry.KEY_TYPE_LEASESET) {
|
||||
_dbEntry = new LeaseSet();
|
||||
try {
|
||||
_leaseSet.readBytes(new ByteArrayInputStream(data, curIndex, data.length-curIndex));
|
||||
_dbEntry.readBytes(new ByteArrayInputStream(data, curIndex, data.length-curIndex));
|
||||
} catch (DataFormatException dfe) {
|
||||
throw new I2NPMessageException("Error reading the leaseSet", dfe);
|
||||
}
|
||||
} else if (_type == KEY_TYPE_ROUTERINFO) {
|
||||
_info = new RouterInfo();
|
||||
} else if (type == DatabaseEntry.KEY_TYPE_ROUTERINFO) {
|
||||
_dbEntry = new RouterInfo();
|
||||
int compressedSize = (int)DataHelper.fromLong(data, curIndex, 2);
|
||||
curIndex += 2;
|
||||
|
||||
try {
|
||||
byte decompressed[] = DataHelper.decompress(data, curIndex, compressedSize);
|
||||
_info.readBytes(new ByteArrayInputStream(decompressed));
|
||||
_dbEntry.readBytes(new ByteArrayInputStream(decompressed));
|
||||
} catch (DataFormatException dfe) {
|
||||
throw new I2NPMessageException("Error reading the routerInfo", dfe);
|
||||
} catch (IOException ioe) {
|
||||
throw new I2NPMessageException("Compressed routerInfo was corrupt", ioe);
|
||||
}
|
||||
} else {
|
||||
throw new I2NPMessageException("Invalid type of key read from the structure - " + _type);
|
||||
throw new I2NPMessageException("Invalid type of key read from the structure - " + type);
|
||||
}
|
||||
//if (!key.equals(_dbEntry.getHash()))
|
||||
// throw new I2NPMessageException("Hash mismatch in DSM");
|
||||
}
|
||||
|
||||
|
||||
@ -172,28 +150,28 @@ public class DatabaseStoreMessage extends I2NPMessageImpl {
|
||||
int len = Hash.HASH_LENGTH + 1 + 4; // key+type+replyToken
|
||||
if (_replyToken > 0)
|
||||
len += 4 + Hash.HASH_LENGTH; // replyTunnel+replyGateway
|
||||
if (_type == KEY_TYPE_LEASESET) {
|
||||
_leaseSetCache = _leaseSet.toByteArray();
|
||||
len += _leaseSetCache.length;
|
||||
} else if (_type == KEY_TYPE_ROUTERINFO) {
|
||||
byte uncompressed[] = _info.toByteArray();
|
||||
byte compressed[] = DataHelper.compress(uncompressed);
|
||||
_routerInfoCache = compressed;
|
||||
len += compressed.length + 2;
|
||||
if (_dbEntry.getType() == DatabaseEntry.KEY_TYPE_LEASESET) {
|
||||
_byteCache = _dbEntry.toByteArray();
|
||||
} else if (_dbEntry.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO) {
|
||||
byte uncompressed[] = _dbEntry.toByteArray();
|
||||
_byteCache = DataHelper.compress(uncompressed);
|
||||
len += 2;
|
||||
}
|
||||
len += _byteCache.length;
|
||||
return len;
|
||||
}
|
||||
|
||||
/** write the message body to the output array, starting at the given index */
|
||||
protected int writeMessageBody(byte out[], int curIndex) throws I2NPMessageException {
|
||||
if (_key == null) throw new I2NPMessageException("Invalid key");
|
||||
if ( (_type != KEY_TYPE_LEASESET) && (_type != KEY_TYPE_ROUTERINFO) ) throw new I2NPMessageException("Invalid key type");
|
||||
if ( (_type == KEY_TYPE_LEASESET) && (_leaseSet == null) ) throw new I2NPMessageException("Missing lease set");
|
||||
if ( (_type == KEY_TYPE_ROUTERINFO) && (_info == null) ) throw new I2NPMessageException("Missing router info");
|
||||
if (_dbEntry == null) throw new I2NPMessageException("Missing entry");
|
||||
int type = _dbEntry.getType();
|
||||
if (type != DatabaseEntry.KEY_TYPE_LEASESET && type != DatabaseEntry.KEY_TYPE_ROUTERINFO)
|
||||
throw new I2NPMessageException("Invalid key type");
|
||||
|
||||
System.arraycopy(_key.getData(), 0, out, curIndex, Hash.HASH_LENGTH);
|
||||
// Use the hash of the DatabaseEntry
|
||||
System.arraycopy(getKey().getData(), 0, out, curIndex, Hash.HASH_LENGTH);
|
||||
curIndex += Hash.HASH_LENGTH;
|
||||
byte type[] = DataHelper.toLong(1, _type);
|
||||
out[curIndex++] = type[0];
|
||||
out[curIndex++] = (byte) type;
|
||||
byte tok[] = DataHelper.toLong(4, _replyToken);
|
||||
System.arraycopy(tok, 0, out, curIndex, 4);
|
||||
curIndex += 4;
|
||||
@ -209,17 +187,14 @@ public class DatabaseStoreMessage extends I2NPMessageImpl {
|
||||
curIndex += Hash.HASH_LENGTH;
|
||||
}
|
||||
|
||||
if (_type == KEY_TYPE_LEASESET) {
|
||||
// initialized in calculateWrittenLength
|
||||
System.arraycopy(_leaseSetCache, 0, out, curIndex, _leaseSetCache.length);
|
||||
curIndex += _leaseSetCache.length;
|
||||
} else if (_type == KEY_TYPE_ROUTERINFO) {
|
||||
byte len[] = DataHelper.toLong(2, _routerInfoCache.length);
|
||||
// _byteCache initialized in calculateWrittenLength
|
||||
if (type == DatabaseEntry.KEY_TYPE_ROUTERINFO) {
|
||||
byte len[] = DataHelper.toLong(2, _byteCache.length);
|
||||
out[curIndex++] = len[0];
|
||||
out[curIndex++] = len[1];
|
||||
System.arraycopy(_routerInfoCache, 0, out, curIndex, _routerInfoCache.length);
|
||||
curIndex += _routerInfoCache.length;
|
||||
}
|
||||
System.arraycopy(_byteCache, 0, out, curIndex, _byteCache.length);
|
||||
curIndex += _byteCache.length;
|
||||
return curIndex;
|
||||
}
|
||||
|
||||
@ -228,9 +203,7 @@ public class DatabaseStoreMessage extends I2NPMessageImpl {
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return DataHelper.hashCode(getKey()) +
|
||||
DataHelper.hashCode(getLeaseSet()) +
|
||||
DataHelper.hashCode(getRouterInfo()) +
|
||||
getValueType() +
|
||||
DataHelper.hashCode(_dbEntry) +
|
||||
(int)getReplyToken() +
|
||||
DataHelper.hashCode(getReplyTunnel()) +
|
||||
DataHelper.hashCode(getReplyGateway());
|
||||
@ -241,9 +214,7 @@ public class DatabaseStoreMessage extends I2NPMessageImpl {
|
||||
if ( (object != null) && (object instanceof DatabaseStoreMessage) ) {
|
||||
DatabaseStoreMessage msg = (DatabaseStoreMessage)object;
|
||||
return DataHelper.eq(getKey(),msg.getKey()) &&
|
||||
DataHelper.eq(getLeaseSet(),msg.getLeaseSet()) &&
|
||||
DataHelper.eq(getRouterInfo(),msg.getRouterInfo()) &&
|
||||
_type == msg.getValueType() &&
|
||||
DataHelper.eq(_dbEntry,msg.getEntry()) &&
|
||||
getReplyToken() == msg.getReplyToken() &&
|
||||
DataHelper.eq(getReplyTunnel(), msg.getReplyTunnel()) &&
|
||||
DataHelper.eq(getReplyGateway(), msg.getReplyGateway());
|
||||
@ -259,9 +230,7 @@ public class DatabaseStoreMessage extends I2NPMessageImpl {
|
||||
buf.append("\n\tExpiration: ").append(getMessageExpiration());
|
||||
buf.append("\n\tUnique ID: ").append(getUniqueId());
|
||||
buf.append("\n\tKey: ").append(getKey());
|
||||
buf.append("\n\tValue Type: ").append(getValueType());
|
||||
buf.append("\n\tRouter Info: ").append(getRouterInfo());
|
||||
buf.append("\n\tLease Set: ").append(getLeaseSet());
|
||||
buf.append("\n\tEntry: ").append(_dbEntry);
|
||||
buf.append("\n\tReply token: ").append(getReplyToken());
|
||||
buf.append("\n\tReply tunnel: ").append(getReplyTunnel());
|
||||
buf.append("\n\tReply gateway: ").append(getReplyGateway());
|
||||
|
@ -122,7 +122,7 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM
|
||||
if (!eq)
|
||||
throw new I2NPMessageException("Hash does not match for " + getClass().getName());
|
||||
|
||||
long start = _context.clock().now();
|
||||
//long start = _context.clock().now();
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Reading bytes: type = " + type + " / uniqueId : " + _uniqueId + " / expiration : " + _expiration);
|
||||
readMessage(buffer, 0, size, type);
|
||||
@ -184,7 +184,7 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM
|
||||
if (!eq)
|
||||
throw new I2NPMessageException("Hash does not match for " + getClass().getName());
|
||||
|
||||
long start = _context.clock().now();
|
||||
//long start = _context.clock().now();
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Reading bytes: type = " + type + " / uniqueId : " + _uniqueId + " / expiration : " + _expiration);
|
||||
readMessage(data, cur, size, type);
|
||||
@ -240,7 +240,7 @@ public abstract class I2NPMessageImpl extends DataStructureImpl implements I2NPM
|
||||
}
|
||||
|
||||
public int toByteArray(byte buffer[]) {
|
||||
long start = _context.clock().now();
|
||||
//long start = _context.clock().now();
|
||||
|
||||
int prefixLen = 1 // type
|
||||
+ 4 // uniqueId
|
||||
|
@ -160,7 +160,7 @@ public class TunnelDataMessage extends I2NPMessageImpl {
|
||||
/** write the message body to the output array, starting at the given index */
|
||||
protected int writeMessageBody(byte out[], int curIndex) throws I2NPMessageException {
|
||||
if ( (_tunnelId <= 0) || (_data == null) )
|
||||
throw new I2NPMessageException("Not enough data to write out (id=" + _tunnelId + " data=" + _data + ")");
|
||||
throw new I2NPMessageException("Not enough data to write out (id=" + _tunnelId + ")");
|
||||
if (_data.length <= 0)
|
||||
throw new I2NPMessageException("Not enough data to write out (data.length=" + _data.length + ")");
|
||||
|
||||
|
@ -59,7 +59,7 @@ import net.i2p.util.Log;
|
||||
*
|
||||
*/
|
||||
public class Blocklist {
|
||||
private Log _log;
|
||||
private final Log _log;
|
||||
private RouterContext _context;
|
||||
private long _blocklist[];
|
||||
private int _blocklistSize;
|
||||
@ -72,15 +72,11 @@ public class Blocklist {
|
||||
public Blocklist(RouterContext context) {
|
||||
_context = context;
|
||||
_log = context.logManager().getLog(Blocklist.class);
|
||||
_blocklist = null;
|
||||
_blocklistSize = 0;
|
||||
_wrapSave = null;
|
||||
}
|
||||
|
||||
/** only for testing with main() */
|
||||
public Blocklist() {
|
||||
_log = new Log(Blocklist.class);
|
||||
_blocklist = null;
|
||||
_blocklistSize = 0;
|
||||
}
|
||||
|
||||
static final String PROP_BLOCKLIST_ENABLED = "router.blocklist.enable";
|
||||
@ -683,7 +679,7 @@ public class Blocklist {
|
||||
return;
|
||||
Job job = new ShitlistJob(peer);
|
||||
if (number > 0)
|
||||
job.getTiming().setStartAfter(_context.clock().now() + (number * 30*1000));
|
||||
job.getTiming().setStartAfter(_context.clock().now() + (30*1000l * number));
|
||||
_context.jobQueue().addJob(job);
|
||||
}
|
||||
|
||||
|
@ -28,16 +28,10 @@ public class ClientMessage {
|
||||
private Hash _destinationHash;
|
||||
private MessageId _messageId;
|
||||
private long _expiration;
|
||||
/** only for outbound messages */
|
||||
private int _flags;
|
||||
|
||||
public ClientMessage() {
|
||||
setPayload(null);
|
||||
setDestination(null);
|
||||
setFromDestination(null);
|
||||
setReceptionInfo(null);
|
||||
setSenderConfig(null);
|
||||
setDestinationHash(null);
|
||||
setMessageId(null);
|
||||
setExpiration(0);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -101,4 +95,17 @@ public class ClientMessage {
|
||||
*/
|
||||
public long getExpiration() { return _expiration; }
|
||||
public void setExpiration(long e) { _expiration = e; }
|
||||
|
||||
/**
|
||||
* Flags requested by the client that sent the message. This will only be available
|
||||
* for locally originated messages.
|
||||
*
|
||||
* @since 0.8.4
|
||||
*/
|
||||
public int getFlags() { return _flags; }
|
||||
|
||||
/**
|
||||
* @since 0.8.4
|
||||
*/
|
||||
public void setFlags(int f) { _flags = f; }
|
||||
}
|
||||
|
@ -23,12 +23,13 @@ import net.i2p.util.Log;
|
||||
*
|
||||
*/
|
||||
public class ClientMessagePool {
|
||||
private Log _log;
|
||||
private RouterContext _context;
|
||||
private final Log _log;
|
||||
private final RouterContext _context;
|
||||
|
||||
public ClientMessagePool(RouterContext context) {
|
||||
_context = context;
|
||||
_log = _context.logManager().getLog(ClientMessagePool.class);
|
||||
OutboundClientMessageOneShotJob.init(_context);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -65,6 +66,7 @@ public class ClientMessagePool {
|
||||
}
|
||||
}
|
||||
|
||||
/******
|
||||
private boolean isGuaranteed(ClientMessage msg) {
|
||||
Properties opts = null;
|
||||
if (msg.getSenderConfig() != null)
|
||||
@ -76,4 +78,5 @@ public class ClientMessagePool {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
******/
|
||||
}
|
||||
|
@ -16,6 +16,7 @@ import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import net.i2p.data.DatabaseEntry;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.LeaseSet;
|
||||
import net.i2p.data.RouterInfo;
|
||||
@ -36,6 +37,7 @@ class DummyNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
_routers.put(info.getIdentity().getHash(), info);
|
||||
}
|
||||
|
||||
public DatabaseEntry lookupLocally(Hash key) { return null; }
|
||||
public void lookupLeaseSet(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs) {}
|
||||
public LeaseSet lookupLeaseSetLocally(Hash key) { return null; }
|
||||
public void lookupRouterInfo(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs) {
|
||||
|
@ -30,15 +30,17 @@ import net.i2p.util.Log;
|
||||
*
|
||||
*/
|
||||
public class InNetMessagePool implements Service {
|
||||
private Log _log;
|
||||
private RouterContext _context;
|
||||
private HandlerJobBuilder _handlerJobBuilders[];
|
||||
private final Log _log;
|
||||
private final RouterContext _context;
|
||||
private final HandlerJobBuilder _handlerJobBuilders[];
|
||||
|
||||
/** following 5 unused unless DISPATCH_DIRECT == false */
|
||||
private final List _pendingDataMessages;
|
||||
private final List _pendingDataMessagesFrom;
|
||||
private final List _pendingGatewayMessages;
|
||||
private SharedShortCircuitDataJob _shortCircuitDataJob;
|
||||
private SharedShortCircuitGatewayJob _shortCircuitGatewayJob;
|
||||
|
||||
private boolean _alive;
|
||||
private boolean _dispatchThreaded;
|
||||
|
||||
|
@ -34,22 +34,22 @@ import net.i2p.util.Log;
|
||||
*
|
||||
*/
|
||||
public class JobQueue {
|
||||
private Log _log;
|
||||
private RouterContext _context;
|
||||
private final Log _log;
|
||||
private final RouterContext _context;
|
||||
|
||||
/** Integer (runnerId) to JobQueueRunner for created runners */
|
||||
private final Map<Integer, JobQueueRunner> _queueRunners;
|
||||
/** a counter to identify a job runner */
|
||||
private volatile static int _runnerId = 0;
|
||||
/** list of jobs that are ready to run ASAP */
|
||||
private BlockingQueue<Job> _readyJobs;
|
||||
private final BlockingQueue<Job> _readyJobs;
|
||||
/** list of jobs that are scheduled for running in the future */
|
||||
private List<Job> _timedJobs;
|
||||
private final List<Job> _timedJobs;
|
||||
/** job name to JobStat for that job */
|
||||
private final Map<String, JobStats> _jobStats;
|
||||
/** how many job queue runners can go concurrently */
|
||||
private int _maxRunners = 1;
|
||||
private QueuePumper _pumper;
|
||||
private final QueuePumper _pumper;
|
||||
/** will we allow the # job runners to grow beyond 1? */
|
||||
private boolean _allowParallelOperation;
|
||||
/** have we been killed or are we alive? */
|
||||
@ -208,7 +208,7 @@ public class JobQueue {
|
||||
* <code>false</code> if the job is finished or doesn't exist in the queue.
|
||||
*/
|
||||
public boolean isJobActive(Job job) {
|
||||
if (_readyJobs.contains(job) | _timedJobs.contains(job))
|
||||
if (_readyJobs.contains(job) || _timedJobs.contains(job))
|
||||
return true;
|
||||
for (JobQueueRunner runner: _queueRunners.values())
|
||||
if (runner.getCurrentJob() == job)
|
||||
@ -689,7 +689,7 @@ public class JobQueue {
|
||||
TreeMap<Long, Job> ordered = new TreeMap();
|
||||
for (int i = 0; i < timedJobs.size(); i++) {
|
||||
Job j = timedJobs.get(i);
|
||||
ordered.put(new Long(j.getTiming().getStartAfter()), j);
|
||||
ordered.put(Long.valueOf(j.getTiming().getStartAfter()), j);
|
||||
}
|
||||
for (Iterator<Job> iter = ordered.values().iterator(); iter.hasNext(); ) {
|
||||
Job j = iter.next();
|
||||
|
@ -4,10 +4,10 @@ import net.i2p.util.Log;
|
||||
|
||||
/** a do run run run a do run run */
|
||||
class JobQueueRunner implements Runnable {
|
||||
private Log _log;
|
||||
private RouterContext _context;
|
||||
private final Log _log;
|
||||
private final RouterContext _context;
|
||||
private boolean _keepRunning;
|
||||
private int _id;
|
||||
private final int _id;
|
||||
private long _numJobs;
|
||||
private Job _currentJob;
|
||||
private Job _lastJob;
|
||||
@ -19,9 +19,6 @@ class JobQueueRunner implements Runnable {
|
||||
_context = context;
|
||||
_id = id;
|
||||
_keepRunning = true;
|
||||
_numJobs = 0;
|
||||
_currentJob = null;
|
||||
_lastJob = null;
|
||||
_log = _context.logManager().getLog(JobQueueRunner.class);
|
||||
_context.statManager().createRateStat("jobQueue.jobRun", "How long jobs take", "JobQueue", new long[] { 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("jobQueue.jobRunSlow", "How long jobs that take over a second take", "JobQueue", new long[] { 60*60*1000l, 24*60*60*1000l });
|
||||
|
@ -4,7 +4,7 @@ import net.i2p.data.DataHelper;
|
||||
|
||||
/** glorified struct to contain basic job stats */
|
||||
class JobStats {
|
||||
private String _job;
|
||||
private final String _job;
|
||||
private volatile long _numRuns;
|
||||
private volatile long _totalTime;
|
||||
private volatile long _maxTime;
|
||||
|
@ -18,7 +18,7 @@ public class JobTiming implements Clock.ClockUpdateListener {
|
||||
private long _start;
|
||||
private long _actualStart;
|
||||
private long _actualEnd;
|
||||
private RouterContext _context;
|
||||
private final RouterContext _context;
|
||||
|
||||
public JobTiming(RouterContext context) {
|
||||
_context = context;
|
||||
|
@ -12,9 +12,7 @@ import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
import net.i2p.data.DataFormatException;
|
||||
@ -37,14 +35,14 @@ import net.i2p.util.SecureFileOutputStream;
|
||||
*
|
||||
*/
|
||||
public class KeyManager {
|
||||
private Log _log;
|
||||
private RouterContext _context;
|
||||
private final Log _log;
|
||||
private final RouterContext _context;
|
||||
private PrivateKey _privateKey;
|
||||
private PublicKey _publicKey;
|
||||
private SigningPrivateKey _signingPrivateKey;
|
||||
private SigningPublicKey _signingPublicKey;
|
||||
private final Map<Hash, LeaseSetKeys> _leaseSetKeys; // Destination --> LeaseSetKeys
|
||||
private SynchronizeKeysJob _synchronizeJob;
|
||||
private final SynchronizeKeysJob _synchronizeJob;
|
||||
|
||||
public final static String PROP_KEYDIR = "router.keyBackupDir";
|
||||
public final static String DEFAULT_KEYDIR = "keyBackup";
|
||||
@ -61,10 +59,6 @@ public class KeyManager {
|
||||
_context = context;
|
||||
_log = _context.logManager().getLog(KeyManager.class);
|
||||
_synchronizeJob = new SynchronizeKeysJob();
|
||||
setPrivateKey(null);
|
||||
setPublicKey(null);
|
||||
setSigningPrivateKey(null);
|
||||
setSigningPublicKey(null);
|
||||
_leaseSetKeys = new ConcurrentHashMap();
|
||||
}
|
||||
|
||||
@ -132,12 +126,6 @@ public class KeyManager {
|
||||
return _leaseSetKeys.get(dest);
|
||||
}
|
||||
|
||||
public Set<LeaseSetKeys> getAllKeys() {
|
||||
HashSet keys = new HashSet();
|
||||
keys.addAll(_leaseSetKeys.values());
|
||||
return keys;
|
||||
}
|
||||
|
||||
private class SynchronizeKeysJob extends JobImpl {
|
||||
public SynchronizeKeysJob() {
|
||||
super(KeyManager.this._context);
|
||||
|
@ -13,14 +13,13 @@ import net.i2p.util.Log;
|
||||
*
|
||||
*/
|
||||
public class MessageValidator {
|
||||
private Log _log;
|
||||
private RouterContext _context;
|
||||
private final Log _log;
|
||||
private final RouterContext _context;
|
||||
private DecayingBloomFilter _filter;
|
||||
|
||||
|
||||
public MessageValidator(RouterContext context) {
|
||||
_log = context.logManager().getLog(MessageValidator.class);
|
||||
_filter = null;
|
||||
_context = context;
|
||||
context.statManager().createRateStat("router.duplicateMessageId", "Note that a duplicate messageId was received", "Router",
|
||||
new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
|
@ -90,13 +90,17 @@ public class MultiRouter {
|
||||
|
||||
private static Properties getEnv(String filename) {
|
||||
Properties props = new Properties();
|
||||
FileInputStream in = null;
|
||||
try {
|
||||
props.load(new FileInputStream(filename));
|
||||
in = new FileInputStream(filename);
|
||||
props.load(in);
|
||||
props.setProperty("time.disabled", "true");
|
||||
return props;
|
||||
} catch (IOException ioe) {
|
||||
ioe.printStackTrace();
|
||||
return null;
|
||||
} finally {
|
||||
if (in != null) try { in.close(); } catch (IOException ioe) {}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -13,6 +13,7 @@ import java.io.Writer;
|
||||
import java.util.Collections;
|
||||
import java.util.Set;
|
||||
|
||||
import net.i2p.data.DatabaseEntry;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.LeaseSet;
|
||||
import net.i2p.data.RouterInfo;
|
||||
@ -32,6 +33,11 @@ public abstract class NetworkDatabaseFacade implements Service {
|
||||
*/
|
||||
public abstract Set<Hash> findNearestRouters(Hash key, int maxNumRouters, Set<Hash> peersToIgnore);
|
||||
|
||||
/**
|
||||
* @return RouterInfo, LeaseSet, or null
|
||||
* @since 0.8.3
|
||||
*/
|
||||
public abstract DatabaseEntry lookupLocally(Hash key);
|
||||
public abstract void lookupLeaseSet(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs);
|
||||
public abstract LeaseSet lookupLeaseSetLocally(Hash key);
|
||||
public abstract void lookupRouterInfo(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs);
|
||||
|
@ -89,15 +89,18 @@ public class OutNetMessage {
|
||||
// only timestamp if we are debugging
|
||||
synchronized (this) {
|
||||
locked_initTimestamps();
|
||||
while (_timestamps.containsKey(eventName)) {
|
||||
eventName = eventName + '.';
|
||||
}
|
||||
_timestamps.put(eventName, new Long(now));
|
||||
// ???
|
||||
//while (_timestamps.containsKey(eventName)) {
|
||||
// eventName = eventName + '.';
|
||||
//}
|
||||
_timestamps.put(eventName, Long.valueOf(now));
|
||||
_timestampOrder.add(eventName);
|
||||
}
|
||||
}
|
||||
return now - _created;
|
||||
}
|
||||
|
||||
/** @deprecated unused */
|
||||
public Map<String, Long> getTimestamps() {
|
||||
if (_log.shouldLog(Log.INFO)) {
|
||||
synchronized (this) {
|
||||
@ -107,6 +110,8 @@ public class OutNetMessage {
|
||||
}
|
||||
return Collections.EMPTY_MAP;
|
||||
}
|
||||
|
||||
/** @deprecated unused */
|
||||
public Long getTimestamp(String eventName) {
|
||||
if (_log.shouldLog(Log.INFO)) {
|
||||
synchronized (this) {
|
||||
@ -220,7 +225,7 @@ public class OutNetMessage {
|
||||
|
||||
public void transportFailed(String transportStyle) {
|
||||
if (_failedTransports == null)
|
||||
_failedTransports = new HashSet(1);
|
||||
_failedTransports = new HashSet(2);
|
||||
_failedTransports.add(transportStyle);
|
||||
}
|
||||
/** not thread safe - dont fail transports and iterate over this at the same time */
|
||||
@ -368,7 +373,7 @@ public class OutNetMessage {
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if(obj == null) return false;
|
||||
if(obj.getClass() != OutNetMessage.class) return false;
|
||||
if(!(obj instanceof OutNetMessage)) return false;
|
||||
return obj == this; // two OutNetMessages are different even if they contain the same message
|
||||
}
|
||||
}
|
||||
|
@ -34,7 +34,7 @@ public class RouterClock extends Clock {
|
||||
private long _lastChanged;
|
||||
private int _lastStratum;
|
||||
|
||||
RouterContext _contextRC; // LINT field hides another field
|
||||
private final RouterContext _contextRC;
|
||||
|
||||
public RouterClock(RouterContext context) {
|
||||
super(context);
|
||||
|
@ -9,13 +9,9 @@ import net.i2p.data.Hash;
|
||||
import net.i2p.internal.InternalClientManager;
|
||||
import net.i2p.router.client.ClientManagerFacadeImpl;
|
||||
import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseFacade;
|
||||
import net.i2p.router.peermanager.Calculator;
|
||||
import net.i2p.router.peermanager.CapacityCalculator;
|
||||
import net.i2p.router.peermanager.IntegrationCalculator;
|
||||
import net.i2p.router.peermanager.PeerManagerFacadeImpl;
|
||||
import net.i2p.router.peermanager.ProfileManagerImpl;
|
||||
import net.i2p.router.peermanager.ProfileOrganizer;
|
||||
import net.i2p.router.peermanager.SpeedCalculator;
|
||||
import net.i2p.router.transport.CommSystemFacadeImpl;
|
||||
import net.i2p.router.transport.FIFOBandwidthLimiter;
|
||||
import net.i2p.router.transport.OutboundMessageRegistry;
|
||||
@ -58,11 +54,6 @@ public class RouterContext extends I2PAppContext {
|
||||
private MessageValidator _messageValidator;
|
||||
private MessageStateMonitor _messageStateMonitor;
|
||||
private RouterThrottle _throttle;
|
||||
private RouterClock _clockX; // LINT field hides another field, hope rename won't break anything.
|
||||
private Calculator _integrationCalc;
|
||||
private Calculator _speedCalc;
|
||||
private Calculator _capacityCalc;
|
||||
|
||||
|
||||
private static List<RouterContext> _contexts = new ArrayList(1);
|
||||
|
||||
@ -166,9 +157,6 @@ public class RouterContext extends I2PAppContext {
|
||||
_messageValidator = new MessageValidator(this);
|
||||
_throttle = new RouterThrottleImpl(this);
|
||||
//_throttle = new RouterDoSThrottle(this);
|
||||
_integrationCalc = new IntegrationCalculator(this);
|
||||
_speedCalc = new SpeedCalculator(this);
|
||||
_capacityCalc = new CapacityCalculator(this);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -290,13 +278,6 @@ public class RouterContext extends I2PAppContext {
|
||||
*/
|
||||
public RouterThrottle throttle() { return _throttle; }
|
||||
|
||||
/** how do we rank the integration of profiles? */
|
||||
public Calculator integrationCalculator() { return _integrationCalc; }
|
||||
/** how do we rank the speed of profiles? */
|
||||
public Calculator speedCalculator() { return _speedCalc; }
|
||||
/** how do we rank the capacity of profiles? */
|
||||
public Calculator capacityCalculator() { return _capacityCalc; }
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder buf = new StringBuilder(512);
|
||||
@ -320,8 +301,6 @@ public class RouterContext extends I2PAppContext {
|
||||
buf.append(_statPublisher).append('\n');
|
||||
buf.append(_shitlist).append('\n');
|
||||
buf.append(_messageValidator).append('\n');
|
||||
buf.append(_integrationCalc).append('\n');
|
||||
buf.append(_speedCalc).append('\n');
|
||||
return buf.toString();
|
||||
}
|
||||
|
||||
@ -371,23 +350,22 @@ public class RouterContext extends I2PAppContext {
|
||||
}
|
||||
|
||||
/**
|
||||
* The context's synchronized clock, which is kept context specific only to
|
||||
* enable simulators to play with clock skew among different instances.
|
||||
*
|
||||
* It wouldn't be necessary to override clock(), except for the reason
|
||||
* that it triggers initializeClock() of which we definitely
|
||||
* need the local version to run.
|
||||
* @return new Properties with system and context properties
|
||||
* @since 0.8.4
|
||||
*/
|
||||
@Override
|
||||
public Clock clock() {
|
||||
if (!_clockInitialized) initializeClock();
|
||||
return _clockX;
|
||||
public Properties getProperties() {
|
||||
Properties rv = super.getProperties();
|
||||
if (_router != null)
|
||||
rv.putAll(_router.getConfigMap());
|
||||
return rv;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void initializeClock() {
|
||||
synchronized (this) {
|
||||
if (_clockX == null)
|
||||
_clockX = new RouterClock(this);
|
||||
if (_clock == null)
|
||||
_clock = new RouterClock(this);
|
||||
_clockInitialized = true;
|
||||
}
|
||||
}
|
||||
|
@ -12,12 +12,12 @@ import net.i2p.util.Log;
|
||||
*
|
||||
*/
|
||||
class RouterThrottleImpl implements RouterThrottle {
|
||||
private RouterContext _context;
|
||||
private Log _log;
|
||||
private final RouterContext _context;
|
||||
private final Log _log;
|
||||
private String _tunnelStatus;
|
||||
|
||||
/**
|
||||
* arbitrary hard limit of 10 seconds - if its taking this long to get
|
||||
* arbitrary hard limit - if it's taking this long to get
|
||||
* to a job, we're congested.
|
||||
*
|
||||
*/
|
||||
@ -98,7 +98,7 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
if (_context.router().getUptime() < 20*60*1000)
|
||||
return TunnelHistory.TUNNEL_REJECT_BANDWIDTH;
|
||||
|
||||
long lag = _context.jobQueue().getMaxLag();
|
||||
//long lag = _context.jobQueue().getMaxLag();
|
||||
// reject here if lag too high???
|
||||
|
||||
RateStat rs = _context.statManager().getRate("transport.sendProcessingTime");
|
||||
|
@ -12,8 +12,8 @@ import net.i2p.util.Log;
|
||||
*
|
||||
*/
|
||||
class RouterWatchdog implements Runnable {
|
||||
private Log _log;
|
||||
private RouterContext _context;
|
||||
private final Log _log;
|
||||
private final RouterContext _context;
|
||||
private int _consecutiveErrors;
|
||||
|
||||
private static final long MAX_JOB_RUN_LAG = 60*1000;
|
||||
|
@ -10,13 +10,13 @@ package net.i2p.router;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Writer;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
import net.i2p.data.DataHelper;
|
||||
import net.i2p.data.Hash;
|
||||
@ -31,9 +31,9 @@ import net.i2p.util.Log;
|
||||
* shitlist.
|
||||
*/
|
||||
public class Shitlist {
|
||||
private Log _log;
|
||||
private RouterContext _context;
|
||||
private Map<Hash, Entry> _entries;
|
||||
private final Log _log;
|
||||
private final RouterContext _context;
|
||||
private final Map<Hash, Entry> _entries;
|
||||
|
||||
public static class Entry {
|
||||
/** when it should expire, per the i2p clock */
|
||||
@ -156,7 +156,7 @@ public class Shitlist {
|
||||
e.causeCode = reasonCode;
|
||||
e.transports = null;
|
||||
if (transport != null) {
|
||||
e.transports = new ConcurrentHashSet(1);
|
||||
e.transports = new ConcurrentHashSet(2);
|
||||
e.transports.add(transport);
|
||||
}
|
||||
|
||||
|
@ -157,9 +157,12 @@ public class StatisticsManager implements Service {
|
||||
return stats;
|
||||
}
|
||||
|
||||
/*****
|
||||
private void includeRate(String rateName, Properties stats, long selectedPeriods[]) {
|
||||
includeRate(rateName, stats, selectedPeriods, false);
|
||||
}
|
||||
*****/
|
||||
|
||||
/**
|
||||
* @param fudgeQuantity the data being published in this stat is too sensitive to, uh
|
||||
* publish, so we're kludge the quantity (allowing the fairly safe
|
||||
@ -258,7 +261,6 @@ public class StatisticsManager implements Service {
|
||||
// bah saturation
|
||||
buf.append("0;0;0;0;");
|
||||
}
|
||||
long numPeriods = rate.getLifetimePeriods();
|
||||
buf.append(num(fudgeQuantity)).append(';');
|
||||
return buf.toString();
|
||||
}
|
||||
|
@ -280,8 +280,12 @@ class ClientConnectionRunner {
|
||||
MessageId id = new MessageId();
|
||||
id.setMessageId(getNextMessageId());
|
||||
long expiration = 0;
|
||||
if (message instanceof SendMessageExpiresMessage)
|
||||
expiration = ((SendMessageExpiresMessage) message).getExpiration().getTime();
|
||||
int flags = 0;
|
||||
if (message.getType() == SendMessageExpiresMessage.MESSAGE_TYPE) {
|
||||
SendMessageExpiresMessage msg = (SendMessageExpiresMessage) message;
|
||||
expiration = msg.getExpirationTime();
|
||||
flags = msg.getFlags();
|
||||
}
|
||||
if (!_dontSendMSM)
|
||||
_acceptedPending.add(id);
|
||||
|
||||
@ -289,16 +293,17 @@ class ClientConnectionRunner {
|
||||
_log.debug("** Receiving message [" + id.getMessageId() + "] with payload of size ["
|
||||
+ payload.getSize() + "]" + " for session [" + _sessionId.getSessionId()
|
||||
+ "]");
|
||||
long beforeDistribute = _context.clock().now();
|
||||
//long beforeDistribute = _context.clock().now();
|
||||
// the following blocks as described above
|
||||
SessionConfig cfg = _config;
|
||||
if (cfg != null)
|
||||
_manager.distributeMessage(cfg.getDestination(), dest, payload, id, expiration);
|
||||
long timeToDistribute = _context.clock().now() - beforeDistribute;
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.warn("Time to distribute in the manager to "
|
||||
+ dest.calculateHash().toBase64() + ": "
|
||||
+ timeToDistribute);
|
||||
_manager.distributeMessage(cfg.getDestination(), dest, payload, id, expiration, flags);
|
||||
// else log error?
|
||||
//long timeToDistribute = _context.clock().now() - beforeDistribute;
|
||||
//if (_log.shouldLog(Log.DEBUG))
|
||||
// _log.warn("Time to distribute in the manager to "
|
||||
// + dest.calculateHash().toBase64() + ": "
|
||||
// + timeToDistribute);
|
||||
return id;
|
||||
}
|
||||
|
||||
|
@ -193,7 +193,11 @@ class ClientManager {
|
||||
}
|
||||
}
|
||||
|
||||
void distributeMessage(Destination fromDest, Destination toDest, Payload payload, MessageId msgId, long expiration) {
|
||||
/**
|
||||
* Distribute message to a local or remote destination.
|
||||
* @param flags ignored for local
|
||||
*/
|
||||
void distributeMessage(Destination fromDest, Destination toDest, Payload payload, MessageId msgId, long expiration, int flags) {
|
||||
// check if there is a runner for it
|
||||
ClientConnectionRunner runner = getRunner(toDest);
|
||||
if (runner != null) {
|
||||
@ -204,6 +208,7 @@ class ClientManager {
|
||||
// sender went away
|
||||
return;
|
||||
}
|
||||
// TODO can we just run this inline instead?
|
||||
_ctx.jobQueue().addJob(new DistributeLocal(toDest, runner, sender, fromDest, payload, msgId));
|
||||
} else {
|
||||
// remote. w00t
|
||||
@ -217,22 +222,22 @@ class ClientManager {
|
||||
ClientMessage msg = new ClientMessage();
|
||||
msg.setDestination(toDest);
|
||||
msg.setPayload(payload);
|
||||
msg.setReceptionInfo(null);
|
||||
msg.setSenderConfig(runner.getConfig());
|
||||
msg.setFromDestination(runner.getConfig().getDestination());
|
||||
msg.setMessageId(msgId);
|
||||
msg.setExpiration(expiration);
|
||||
msg.setFlags(flags);
|
||||
_ctx.clientMessagePool().add(msg, true);
|
||||
}
|
||||
}
|
||||
|
||||
private class DistributeLocal extends JobImpl {
|
||||
private Destination _toDest;
|
||||
private ClientConnectionRunner _to;
|
||||
private ClientConnectionRunner _from;
|
||||
private Destination _fromDest;
|
||||
private Payload _payload;
|
||||
private MessageId _msgId;
|
||||
private final Destination _toDest;
|
||||
private final ClientConnectionRunner _to;
|
||||
private final ClientConnectionRunner _from;
|
||||
private final Destination _fromDest;
|
||||
private final Payload _payload;
|
||||
private final MessageId _msgId;
|
||||
|
||||
public DistributeLocal(Destination toDest, ClientConnectionRunner to, ClientConnectionRunner from, Destination fromDest, Payload payload, MessageId id) {
|
||||
super(_ctx);
|
||||
@ -433,7 +438,9 @@ class ClientManager {
|
||||
}
|
||||
}
|
||||
|
||||
/** @deprecated unused */
|
||||
public void renderStatusHTML(Writer out) throws IOException {
|
||||
/******
|
||||
StringBuilder buf = new StringBuilder(8*1024);
|
||||
buf.append("<u><b>Local destinations</b></u><br>");
|
||||
|
||||
@ -479,6 +486,7 @@ class ClientManager {
|
||||
buf.append("\n<hr>\n");
|
||||
out.write(buf.toString());
|
||||
out.flush();
|
||||
******/
|
||||
}
|
||||
|
||||
public void messageReceived(ClientMessage msg) {
|
||||
|
@ -207,6 +207,7 @@ public class ClientManagerFacadeImpl extends ClientManagerFacade implements Inte
|
||||
}
|
||||
}
|
||||
|
||||
/** @deprecated unused */
|
||||
@Override
|
||||
public void renderStatusHTML(Writer out) throws IOException {
|
||||
if (_manager != null)
|
||||
|
@ -142,12 +142,12 @@ class SSLClientListenerRunner extends ClientListenerRunner {
|
||||
private void exportCert(File ks) {
|
||||
File sdir = new SecureDirectory(_context.getConfigDir(), "certificates");
|
||||
if (sdir.exists() || sdir.mkdir()) {
|
||||
InputStream fis = null;
|
||||
try {
|
||||
KeyStore keyStore = KeyStore.getInstance(KeyStore.getDefaultType());
|
||||
InputStream fis = new FileInputStream(ks);
|
||||
fis = new FileInputStream(ks);
|
||||
String ksPass = _context.getProperty(PROP_KEYSTORE_PASSWORD, DEFAULT_KEYSTORE_PASSWORD);
|
||||
keyStore.load(fis, ksPass.toCharArray());
|
||||
fis.close();
|
||||
Certificate cert = keyStore.getCertificate(KEY_ALIAS);
|
||||
if (cert != null) {
|
||||
File certFile = new File(sdir, ASCII_KEYFILE);
|
||||
@ -159,6 +159,8 @@ class SSLClientListenerRunner extends ClientListenerRunner {
|
||||
_log.error("Error saving ASCII SSL keys", gse);
|
||||
} catch (IOException ioe) {
|
||||
_log.error("Error saving ASCII SSL keys", ioe);
|
||||
} finally {
|
||||
if (fis != null) try { fis.close(); } catch (IOException ioe) {}
|
||||
}
|
||||
} else {
|
||||
_log.error("Error saving ASCII SSL keys");
|
||||
@ -208,12 +210,12 @@ class SSLClientListenerRunner extends ClientListenerRunner {
|
||||
" in " + (new File(_context.getConfigDir(), "router.config")).getAbsolutePath());
|
||||
return false;
|
||||
}
|
||||
InputStream fis = null;
|
||||
try {
|
||||
SSLContext sslc = SSLContext.getInstance("TLS");
|
||||
KeyStore keyStore = KeyStore.getInstance(KeyStore.getDefaultType());
|
||||
InputStream fis = new FileInputStream(ks);
|
||||
fis = new FileInputStream(ks);
|
||||
keyStore.load(fis, ksPass.toCharArray());
|
||||
fis.close();
|
||||
KeyManagerFactory kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm());
|
||||
kmf.init(keyStore, keyPass.toCharArray());
|
||||
sslc.init(kmf.getKeyManagers(), null, _context.random());
|
||||
@ -223,6 +225,8 @@ class SSLClientListenerRunner extends ClientListenerRunner {
|
||||
_log.error("Error loading SSL keys", gse);
|
||||
} catch (IOException ioe) {
|
||||
_log.error("Error loading SSL keys", ioe);
|
||||
} finally {
|
||||
if (fis != null) try { fis.close(); } catch (IOException ioe) {}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
@ -18,15 +18,14 @@ import net.i2p.data.i2np.GarlicClove;
|
||||
* Wrap up the data contained in a CloveMessage after being decrypted
|
||||
*
|
||||
*/
|
||||
public class CloveSet {
|
||||
private List _cloves;
|
||||
class CloveSet {
|
||||
private final List _cloves;
|
||||
private Certificate _cert;
|
||||
private long _msgId;
|
||||
private long _expiration;
|
||||
|
||||
public CloveSet() {
|
||||
_cloves = new ArrayList();
|
||||
_cert = null;
|
||||
_cloves = new ArrayList(4);
|
||||
_msgId = -1;
|
||||
_expiration = -1;
|
||||
}
|
||||
|
@ -21,13 +21,13 @@ import net.i2p.data.i2np.DeliveryInstructions;
|
||||
* Define the contents of a garlic chunk that contains 1 or more sub garlics
|
||||
*
|
||||
*/
|
||||
public class GarlicConfig {
|
||||
class GarlicConfig {
|
||||
private RouterInfo _recipient;
|
||||
private PublicKey _recipientPublicKey;
|
||||
private Certificate _cert;
|
||||
private long _id;
|
||||
private long _expiration;
|
||||
private List _cloveConfigs;
|
||||
private final List _cloveConfigs;
|
||||
private DeliveryInstructions _instructions;
|
||||
private boolean _requestAck;
|
||||
private RouterInfo _replyThroughRouter; // router through which any replies will be sent before delivery to us
|
||||
@ -39,7 +39,7 @@ public class GarlicConfig {
|
||||
public GarlicConfig() {
|
||||
_id = -1;
|
||||
_expiration = -1;
|
||||
_cloveConfigs = new ArrayList();
|
||||
_cloveConfigs = new ArrayList(4);
|
||||
_replyBlockMessageId = -1;
|
||||
_replyBlockExpiration = -1;
|
||||
}
|
||||
|
@ -21,7 +21,7 @@ import net.i2p.router.RouterContext;
|
||||
*
|
||||
*/
|
||||
public class GarlicMessageHandler implements HandlerJobBuilder {
|
||||
private RouterContext _context;
|
||||
private final RouterContext _context;
|
||||
|
||||
public GarlicMessageHandler(RouterContext context) {
|
||||
_context = context;
|
||||
|
@ -24,9 +24,9 @@ import net.i2p.util.Log;
|
||||
* Read a GarlicMessage, decrypt it, and return the resulting CloveSet
|
||||
*
|
||||
*/
|
||||
public class GarlicMessageParser {
|
||||
private Log _log;
|
||||
private RouterContext _context;
|
||||
class GarlicMessageParser {
|
||||
private final Log _log;
|
||||
private final RouterContext _context;
|
||||
|
||||
public GarlicMessageParser(RouterContext context) {
|
||||
_context = context;
|
||||
@ -47,7 +47,7 @@ public class GarlicMessageParser {
|
||||
}
|
||||
if (decrData == null) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Decryption of garlic message failed (data = " + encData + ")", new Exception("Decrypt fail"));
|
||||
_log.warn("Decryption of garlic message failed", new Exception("Decrypt fail"));
|
||||
return null;
|
||||
} else {
|
||||
try {
|
||||
|
@ -26,11 +26,11 @@ import net.i2p.util.Log;
|
||||
*
|
||||
*/
|
||||
public class GarlicMessageReceiver {
|
||||
private RouterContext _context;
|
||||
private Log _log;
|
||||
private CloveReceiver _receiver;
|
||||
private Hash _clientDestination;
|
||||
private GarlicMessageParser _parser;
|
||||
private final RouterContext _context;
|
||||
private final Log _log;
|
||||
private final CloveReceiver _receiver;
|
||||
private final Hash _clientDestination;
|
||||
private final GarlicMessageParser _parser;
|
||||
|
||||
private final static int FORWARD_PRIORITY = 50;
|
||||
|
||||
|
@ -28,9 +28,9 @@ import net.i2p.util.Log;
|
||||
* need to be. soon)
|
||||
*
|
||||
*/
|
||||
public class HandleGarlicMessageJob extends JobImpl implements GarlicMessageReceiver.CloveReceiver {
|
||||
private Log _log;
|
||||
private GarlicMessage _message;
|
||||
class HandleGarlicMessageJob extends JobImpl implements GarlicMessageReceiver.CloveReceiver {
|
||||
private final Log _log;
|
||||
private final GarlicMessage _message;
|
||||
//private RouterIdentity _from;
|
||||
//private Hash _fromHash;
|
||||
//private Map _cloves; // map of clove Id --> Expiration of cloves we've already seen
|
||||
|
@ -227,9 +227,8 @@ class OutboundClientMessageJobHelper {
|
||||
clove.setExpiration(expiration);
|
||||
clove.setId(ctx.random().nextLong(I2NPMessage.MAX_ID_VALUE));
|
||||
DatabaseStoreMessage msg = new DatabaseStoreMessage(ctx);
|
||||
msg.setLeaseSet(replyLeaseSet);
|
||||
msg.setEntry(replyLeaseSet);
|
||||
msg.setMessageExpiration(expiration);
|
||||
msg.setKey(replyLeaseSet.getDestination().calculateHash());
|
||||
clove.setPayload(msg);
|
||||
clove.setRecipientPublicKey(null);
|
||||
clove.setRequestAck(false);
|
||||
|
@ -47,21 +47,21 @@ import net.i2p.util.SimpleTimer;
|
||||
*
|
||||
*/
|
||||
public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
private Log _log;
|
||||
private final Log _log;
|
||||
private long _overallExpiration;
|
||||
private ClientMessage _clientMessage;
|
||||
private MessageId _clientMessageId;
|
||||
private int _clientMessageSize;
|
||||
private Destination _from;
|
||||
private Destination _to;
|
||||
private String _toString;
|
||||
private final MessageId _clientMessageId;
|
||||
private final int _clientMessageSize;
|
||||
private final Destination _from;
|
||||
private final Destination _to;
|
||||
private final String _toString;
|
||||
/** target destination's leaseSet, if known */
|
||||
private LeaseSet _leaseSet;
|
||||
/** Actual lease the message is being routed through */
|
||||
private Lease _lease;
|
||||
private PayloadGarlicConfig _clove;
|
||||
private long _cloveId;
|
||||
private long _start;
|
||||
private final long _start;
|
||||
private boolean _finished;
|
||||
private long _leaseSetLookupBegin;
|
||||
private TunnelInfo _outTunnel;
|
||||
@ -103,8 +103,6 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
*/
|
||||
private static final int BUNDLE_PROBABILITY_DEFAULT = 100;
|
||||
|
||||
private static final Object _initializeLock = new Object();
|
||||
private static boolean _initialized = false;
|
||||
private static final int CLEAN_INTERVAL = 5*60*1000;
|
||||
private static final int REPLY_REQUEST_INTERVAL = 60*1000;
|
||||
|
||||
@ -115,26 +113,6 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
super(ctx);
|
||||
_log = ctx.logManager().getLog(OutboundClientMessageOneShotJob.class);
|
||||
|
||||
synchronized (_initializeLock) {
|
||||
if (!_initialized) {
|
||||
SimpleScheduler.getInstance().addPeriodicEvent(new OCMOSJCacheCleaner(ctx), CLEAN_INTERVAL, CLEAN_INTERVAL);
|
||||
ctx.statManager().createFrequencyStat("client.sendMessageFailFrequency", "How often does a client fail to send a message?", "ClientMessages", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.sendMessageSize", "How large are messages sent by the client?", "ClientMessages", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.sendAckTime", "Message round trip time", "ClientMessages", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.timeoutCongestionTunnel", "How lagged our tunnels are when a send times out?", "ClientMessages", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.timeoutCongestionMessage", "How fast we process messages locally when a send times out?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.timeoutCongestionInbound", "How much faster we are receiving data than our average bps when a send times out?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.leaseSetFoundLocally", "How often we tried to look for a leaseSet and found it locally?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.leaseSetFoundRemoteTime", "How long we tried to look for a remote leaseSet (when we succeeded)?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.leaseSetFailedRemoteTime", "How long we tried to look for a remote leaseSet (when we failed)?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.dispatchPrepareTime", "How long until we've queued up the dispatch job (since we started)?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.dispatchTime", "How long until we've dispatched the message (since we started)?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.dispatchSendTime", "How long the actual dispatching takes?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.dispatchNoTunnels", "How long after start do we run out of tunnels to send/receive with?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.dispatchNoACK", "Repeated message sends to a peer (no ack required)", "ClientMessages", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l });
|
||||
_initialized = true;
|
||||
}
|
||||
}
|
||||
long timeoutMs = OVERALL_TIMEOUT_MS_DEFAULT;
|
||||
_clientMessage = msg;
|
||||
_clientMessageId = msg.getMessageId();
|
||||
@ -149,10 +127,17 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
// otherwise router config, otherwise default
|
||||
_overallExpiration = msg.getExpiration();
|
||||
if (_overallExpiration > 0) {
|
||||
_overallExpiration = Math.max(_overallExpiration, _start + OVERALL_TIMEOUT_MS_MIN);
|
||||
_overallExpiration = Math.min(_overallExpiration, _start + OVERALL_TIMEOUT_MS_DEFAULT);
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info(getJobId() + ": Message Expiration (ms): " + (_overallExpiration - _start));
|
||||
// Unless it's already expired, set a min and max expiration
|
||||
if (_overallExpiration <= _start) {
|
||||
_overallExpiration = Math.max(_overallExpiration, _start + OVERALL_TIMEOUT_MS_MIN);
|
||||
_overallExpiration = Math.min(_overallExpiration, _start + OVERALL_TIMEOUT_MS_DEFAULT);
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info(getJobId() + ": Message Expiration (ms): " + (_overallExpiration - _start));
|
||||
} else {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn(getJobId() + ": Expired before we got to it");
|
||||
// runJob() will call dieFatal()
|
||||
}
|
||||
} else {
|
||||
String param = msg.getSenderConfig().getOptions().getProperty(OVERALL_TIMEOUT_MS_PARAM);
|
||||
if (param == null)
|
||||
@ -171,15 +156,38 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info(getJobId() + " Default Expiration (ms): " + timeoutMs);
|
||||
}
|
||||
_finished = false;
|
||||
}
|
||||
|
||||
/** call once only */
|
||||
public static void init(RouterContext ctx) {
|
||||
SimpleScheduler.getInstance().addPeriodicEvent(new OCMOSJCacheCleaner(ctx), CLEAN_INTERVAL, CLEAN_INTERVAL);
|
||||
ctx.statManager().createFrequencyStat("client.sendMessageFailFrequency", "How often does a client fail to send a message?", "ClientMessages", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.sendMessageSize", "How large are messages sent by the client?", "ClientMessages", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.sendAckTime", "Message round trip time", "ClientMessages", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.timeoutCongestionTunnel", "How lagged our tunnels are when a send times out?", "ClientMessages", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.timeoutCongestionMessage", "How fast we process messages locally when a send times out?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.timeoutCongestionInbound", "How much faster we are receiving data than our average bps when a send times out?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.leaseSetFoundLocally", "How often we tried to look for a leaseSet and found it locally?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.leaseSetFoundRemoteTime", "How long we tried to look for a remote leaseSet (when we succeeded)?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.leaseSetFailedRemoteTime", "How long we tried to look for a remote leaseSet (when we failed)?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.dispatchPrepareTime", "How long until we've queued up the dispatch job (since we started)?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.dispatchTime", "How long until we've dispatched the message (since we started)?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.dispatchSendTime", "How long the actual dispatching takes?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.dispatchNoTunnels", "How long after start do we run out of tunnels to send/receive with?", "ClientMessages", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
ctx.statManager().createRateStat("client.dispatchNoACK", "Repeated message sends to a peer (no ack required)", "ClientMessages", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l });
|
||||
}
|
||||
|
||||
public String getName() { return "Outbound client message"; }
|
||||
|
||||
public void runJob() {
|
||||
long now = getContext().clock().now();
|
||||
if (now >= _overallExpiration) {
|
||||
dieFatal();
|
||||
return;
|
||||
}
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug(getJobId() + ": Send outbound client message job beginning");
|
||||
long timeoutMs = _overallExpiration - getContext().clock().now();
|
||||
long timeoutMs = _overallExpiration - now;
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug(getJobId() + ": preparing to search for the leaseSet for " + _toString);
|
||||
Hash key = _to.calculateHash();
|
||||
@ -249,7 +257,6 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
}
|
||||
|
||||
// If the last leaseSet we sent him is still good, don't bother sending again
|
||||
long now = getContext().clock().now();
|
||||
synchronized (_leaseSetCache) {
|
||||
if (!force) {
|
||||
LeaseSet ls = _leaseSetCache.get(hashPair());
|
||||
@ -326,7 +333,6 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
_log.warn(getJobId() + ": Lookup locally didn't find the leaseSet for " + _toString);
|
||||
return false;
|
||||
}
|
||||
long now = getContext().clock().now();
|
||||
|
||||
// Use the same lease if it's still good
|
||||
// Even if _leaseSet changed, _leaseSet.getEncryptionKey() didn't...
|
||||
@ -373,7 +379,7 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
|
||||
// randomize the ordering (so leases with equal # of failures per next
|
||||
// sort are randomly ordered)
|
||||
Collections.shuffle(leases);
|
||||
Collections.shuffle(leases, getContext().random());
|
||||
|
||||
/****
|
||||
if (false) {
|
||||
@ -793,7 +799,6 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
||||
|
||||
private TunnelInfo selectOutboundTunnel(Destination to) {
|
||||
TunnelInfo tunnel;
|
||||
long now = getContext().clock().now();
|
||||
synchronized (_tunnelCache) {
|
||||
/**
|
||||
* If old tunnel is valid and no longer backlogged, use it.
|
||||
|
@ -19,7 +19,6 @@ public class PayloadGarlicConfig extends GarlicConfig {
|
||||
|
||||
public PayloadGarlicConfig() {
|
||||
super();
|
||||
_payload = null;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -22,16 +22,16 @@ import net.i2p.router.RouterContext;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
public class SendMessageDirectJob extends JobImpl {
|
||||
private Log _log;
|
||||
private I2NPMessage _message;
|
||||
private Hash _targetHash;
|
||||
private final Log _log;
|
||||
private final I2NPMessage _message;
|
||||
private final Hash _targetHash;
|
||||
private RouterInfo _router;
|
||||
private long _expiration;
|
||||
private int _priority;
|
||||
private Job _onSend;
|
||||
private ReplyJob _onSuccess;
|
||||
private Job _onFail;
|
||||
private MessageSelector _selector;
|
||||
private final long _expiration;
|
||||
private final int _priority;
|
||||
private final Job _onSend;
|
||||
private final ReplyJob _onSuccess;
|
||||
private final Job _onFail;
|
||||
private final MessageSelector _selector;
|
||||
private boolean _alreadySearched;
|
||||
private boolean _sent;
|
||||
private long _searchOn;
|
||||
@ -47,7 +47,6 @@ public class SendMessageDirectJob extends JobImpl {
|
||||
_log = getContext().logManager().getLog(SendMessageDirectJob.class);
|
||||
_message = message;
|
||||
_targetHash = toPeer;
|
||||
_router = null;
|
||||
if (timeoutMs < 10*1000) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Very little time given [" + timeoutMs + "], resetting to 5s", new Exception("stingy bastard"));
|
||||
@ -56,8 +55,6 @@ public class SendMessageDirectJob extends JobImpl {
|
||||
_expiration = timeoutMs + ctx.clock().now();
|
||||
}
|
||||
_priority = priority;
|
||||
_searchOn = 0;
|
||||
_alreadySearched = false;
|
||||
_onSend = onSend;
|
||||
_onSuccess = onSuccess;
|
||||
_onFail = onFail;
|
||||
@ -66,7 +63,6 @@ public class SendMessageDirectJob extends JobImpl {
|
||||
throw new IllegalArgumentException("Attempt to send a null message");
|
||||
if (_targetHash == null)
|
||||
throw new IllegalArgumentException("Attempt to send a message to a null peer");
|
||||
_sent = false;
|
||||
}
|
||||
|
||||
public String getName() { return "Send Message Direct"; }
|
||||
|
@ -12,7 +12,7 @@ import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.Set;
|
||||
|
||||
import net.i2p.data.DataStructure;
|
||||
import net.i2p.data.DatabaseEntry;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.LeaseSet;
|
||||
import net.i2p.data.RouterIdentity;
|
||||
@ -227,20 +227,19 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
|
||||
return routerHashSet.contains(getContext().routerHash());
|
||||
}
|
||||
|
||||
private void sendData(Hash key, DataStructure data, Hash toPeer, TunnelId replyTunnel) {
|
||||
private void sendData(Hash key, DatabaseEntry data, Hash toPeer, TunnelId replyTunnel) {
|
||||
if (!key.equals(data.getHash())) {
|
||||
_log.error("Hash mismatch HDLMJ");
|
||||
return;
|
||||
}
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Sending data matching key " + key.toBase64() + " to peer " + toPeer.toBase64()
|
||||
+ " tunnel " + replyTunnel);
|
||||
DatabaseStoreMessage msg = new DatabaseStoreMessage(getContext());
|
||||
msg.setKey(key);
|
||||
if (data instanceof LeaseSet) {
|
||||
msg.setLeaseSet((LeaseSet)data);
|
||||
msg.setValueType(DatabaseStoreMessage.KEY_TYPE_LEASESET);
|
||||
if (data.getType() == DatabaseEntry.KEY_TYPE_LEASESET) {
|
||||
getContext().statManager().addRateData("netDb.lookupsMatchedLeaseSet", 1, 0);
|
||||
} else if (data instanceof RouterInfo) {
|
||||
msg.setRouterInfo((RouterInfo)data);
|
||||
msg.setValueType(DatabaseStoreMessage.KEY_TYPE_ROUTERINFO);
|
||||
}
|
||||
msg.setEntry(data);
|
||||
getContext().statManager().addRateData("netDb.lookupsMatched", 1, 0);
|
||||
getContext().statManager().addRateData("netDb.lookupsHandled", 1, 0);
|
||||
sendMessage(msg, toPeer, replyTunnel);
|
||||
|
@ -10,9 +10,11 @@ package net.i2p.router.networkdb;
|
||||
|
||||
import java.util.Date;
|
||||
|
||||
import net.i2p.data.DatabaseEntry;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.LeaseSet;
|
||||
import net.i2p.data.RouterIdentity;
|
||||
import net.i2p.data.RouterInfo;
|
||||
import net.i2p.data.i2np.DatabaseStoreMessage;
|
||||
import net.i2p.data.i2np.DeliveryStatusMessage;
|
||||
import net.i2p.router.JobImpl;
|
||||
@ -59,16 +61,17 @@ public class HandleDatabaseStoreMessageJob extends JobImpl {
|
||||
|
||||
String invalidMessage = null;
|
||||
boolean wasNew = false;
|
||||
if (_message.getValueType() == DatabaseStoreMessage.KEY_TYPE_LEASESET) {
|
||||
DatabaseEntry entry = _message.getEntry();
|
||||
if (entry.getType() == DatabaseEntry.KEY_TYPE_LEASESET) {
|
||||
getContext().statManager().addRateData("netDb.storeLeaseSetHandled", 1, 0);
|
||||
|
||||
try {
|
||||
LeaseSet ls = _message.getLeaseSet();
|
||||
LeaseSet ls = (LeaseSet) entry;
|
||||
// mark it as something we received, so we'll answer queries
|
||||
// for it. this flag does NOT get set on entries that we
|
||||
// receive in response to our own lookups.
|
||||
ls.setReceivedAsPublished(true);
|
||||
LeaseSet match = getContext().netDb().store(_message.getKey(), _message.getLeaseSet());
|
||||
LeaseSet match = getContext().netDb().store(_message.getKey(), ls);
|
||||
if (match == null) {
|
||||
wasNew = true;
|
||||
} else {
|
||||
@ -78,13 +81,14 @@ public class HandleDatabaseStoreMessageJob extends JobImpl {
|
||||
} catch (IllegalArgumentException iae) {
|
||||
invalidMessage = iae.getMessage();
|
||||
}
|
||||
} else if (_message.getValueType() == DatabaseStoreMessage.KEY_TYPE_ROUTERINFO) {
|
||||
} else if (entry.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO) {
|
||||
RouterInfo ri = (RouterInfo) entry;
|
||||
getContext().statManager().addRateData("netDb.storeRouterInfoHandled", 1, 0);
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Handling dbStore of router " + _message.getKey() + " with publishDate of "
|
||||
+ new Date(_message.getRouterInfo().getPublished()));
|
||||
+ new Date(ri.getPublished()));
|
||||
try {
|
||||
Object match = getContext().netDb().store(_message.getKey(), _message.getRouterInfo());
|
||||
Object match = getContext().netDb().store(_message.getKey(), ri);
|
||||
wasNew = (null == match);
|
||||
getContext().profileManager().heardAbout(_message.getKey());
|
||||
} catch (IllegalArgumentException iae) {
|
||||
@ -92,7 +96,7 @@ public class HandleDatabaseStoreMessageJob extends JobImpl {
|
||||
}
|
||||
} else {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Invalid DatabaseStoreMessage data type - " + _message.getValueType()
|
||||
_log.error("Invalid DatabaseStoreMessage data type - " + entry.getType()
|
||||
+ ": " + _message);
|
||||
}
|
||||
|
||||
|
@ -8,21 +8,27 @@ package net.i2p.router.networkdb.kademlia;
|
||||
*
|
||||
*/
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import net.i2p.data.DataStructure;
|
||||
import net.i2p.data.DatabaseEntry;
|
||||
import net.i2p.data.Hash;
|
||||
|
||||
public interface DataStore {
|
||||
public boolean isInitialized();
|
||||
public boolean isKnown(Hash key);
|
||||
public DataStructure get(Hash key);
|
||||
public DataStructure get(Hash key, boolean persist);
|
||||
public boolean put(Hash key, DataStructure data);
|
||||
public boolean put(Hash key, DataStructure data, boolean persist);
|
||||
public DataStructure remove(Hash key);
|
||||
public DataStructure remove(Hash key, boolean persist);
|
||||
public DatabaseEntry get(Hash key);
|
||||
public DatabaseEntry get(Hash key, boolean persist);
|
||||
public boolean put(Hash key, DatabaseEntry data);
|
||||
public boolean put(Hash key, DatabaseEntry data, boolean persist);
|
||||
public DatabaseEntry remove(Hash key);
|
||||
public DatabaseEntry remove(Hash key, boolean persist);
|
||||
public Set<Hash> getKeys();
|
||||
/** @since 0.8.3 */
|
||||
public Collection<DatabaseEntry> getEntries();
|
||||
/** @since 0.8.3 */
|
||||
public Set<Map.Entry<Hash, DatabaseEntry>> getMapEntries();
|
||||
public void stop();
|
||||
public void restart();
|
||||
public void rescan();
|
||||
|
@ -12,6 +12,7 @@ import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.Set;
|
||||
|
||||
import net.i2p.data.DatabaseEntry;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.LeaseSet;
|
||||
import net.i2p.router.JobImpl;
|
||||
@ -61,8 +62,8 @@ class ExpireLeasesJob extends JobImpl {
|
||||
Set toExpire = new HashSet(128);
|
||||
for (Iterator iter = keys.iterator(); iter.hasNext(); ) {
|
||||
Hash key = (Hash)iter.next();
|
||||
Object obj = _facade.getDataStore().get(key);
|
||||
if (obj instanceof LeaseSet) {
|
||||
DatabaseEntry obj = _facade.getDataStore().get(key);
|
||||
if (obj.getType() == DatabaseEntry.KEY_TYPE_LEASESET) {
|
||||
LeaseSet ls = (LeaseSet)obj;
|
||||
if (!ls.isCurrent(Router.CLOCK_FUDGE_FACTOR))
|
||||
toExpire.add(key);
|
||||
|
@ -1,5 +1,8 @@
|
||||
package net.i2p.router.networkdb.kademlia;
|
||||
|
||||
import net.i2p.data.DatabaseEntry;
|
||||
import net.i2p.data.LeaseSet;
|
||||
import net.i2p.data.RouterInfo;
|
||||
import net.i2p.data.i2np.DatabaseSearchReplyMessage;
|
||||
import net.i2p.data.i2np.DatabaseStoreMessage;
|
||||
import net.i2p.data.i2np.I2NPMessage;
|
||||
@ -61,14 +64,15 @@ class FloodOnlyLookupMatchJob extends JobImpl implements ReplyJob {
|
||||
// We do it here first to make sure it is in the DB before
|
||||
// runJob() and search.success() is called???
|
||||
// Should we just pass the DataStructure directly back to somebody?
|
||||
if (dsm.getValueType() == DatabaseStoreMessage.KEY_TYPE_LEASESET) {
|
||||
if (dsm.getEntry().getType() == DatabaseEntry.KEY_TYPE_LEASESET) {
|
||||
// Since HFDSMJ wants to setReceivedAsPublished(), we have to
|
||||
// set a flag saying this was really the result of a query,
|
||||
// so don't do that.
|
||||
dsm.getLeaseSet().setReceivedAsReply();
|
||||
getContext().netDb().store(dsm.getKey(), dsm.getLeaseSet());
|
||||
LeaseSet ls = (LeaseSet) dsm.getEntry();
|
||||
ls.setReceivedAsReply();
|
||||
getContext().netDb().store(dsm.getKey(), ls);
|
||||
} else {
|
||||
getContext().netDb().store(dsm.getKey(), dsm.getRouterInfo());
|
||||
getContext().netDb().store(dsm.getKey(), (RouterInfo) dsm.getEntry());
|
||||
}
|
||||
} catch (IllegalArgumentException iae) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
|
@ -182,8 +182,7 @@ public class FloodSearchJob extends JobImpl {
|
||||
_search = job;
|
||||
}
|
||||
public void runJob() {
|
||||
if ( (getContext().netDb().lookupLeaseSetLocally(_search.getKey()) != null) ||
|
||||
(getContext().netDb().lookupRouterInfoLocally(_search.getKey()) != null) ) {
|
||||
if (getContext().netDb().lookupLocally(_search.getKey()) != null) {
|
||||
_search.success();
|
||||
} else {
|
||||
int remaining = _search.getLookupsRemaining();
|
||||
|
@ -7,8 +7,8 @@ import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import net.i2p.data.DatabaseEntry;
|
||||
import net.i2p.data.DataFormatException;
|
||||
import net.i2p.data.DataStructure;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.LeaseSet;
|
||||
import net.i2p.data.RouterInfo;
|
||||
@ -93,11 +93,11 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
|
||||
}
|
||||
|
||||
@Override
|
||||
public void sendStore(Hash key, DataStructure ds, Job onSuccess, Job onFailure, long sendTimeout, Set toIgnore) {
|
||||
public void sendStore(Hash key, DatabaseEntry ds, Job onSuccess, Job onFailure, long sendTimeout, Set toIgnore) {
|
||||
// if we are a part of the floodfill netDb, don't send out our own leaseSets as part
|
||||
// of the flooding - instead, send them to a random floodfill peer so *they* can flood 'em out.
|
||||
// perhaps statistically adjust this so we are the source every 1/N times... or something.
|
||||
if (floodfillEnabled() && (ds instanceof RouterInfo)) {
|
||||
if (floodfillEnabled() && (ds.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO)) {
|
||||
flood(ds);
|
||||
if (onSuccess != null)
|
||||
_context.jobQueue().addJob(onSuccess);
|
||||
@ -129,12 +129,8 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
|
||||
* We do this to implement Kademlia within the floodfills, i.e.
|
||||
* we flood to those closest to the key.
|
||||
*/
|
||||
public void flood(DataStructure ds) {
|
||||
Hash key;
|
||||
if (ds instanceof LeaseSet)
|
||||
key = ((LeaseSet)ds).getDestination().calculateHash();
|
||||
else
|
||||
key = ((RouterInfo)ds).getIdentity().calculateHash();
|
||||
public void flood(DatabaseEntry ds) {
|
||||
Hash key = ds.getHash();
|
||||
Hash rkey = _context.routingKeyGenerator().getRoutingKey(key);
|
||||
FloodfillPeerSelector sel = (FloodfillPeerSelector)getPeerSelector();
|
||||
List peers = sel.selectFloodfillParticipants(rkey, MAX_TO_FLOOD, getKBuckets());
|
||||
@ -151,12 +147,7 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
|
||||
if (peer.equals(_context.routerHash()))
|
||||
continue;
|
||||
DatabaseStoreMessage msg = new DatabaseStoreMessage(_context);
|
||||
if (ds instanceof LeaseSet) {
|
||||
msg.setLeaseSet((LeaseSet)ds);
|
||||
} else {
|
||||
msg.setRouterInfo((RouterInfo)ds);
|
||||
}
|
||||
msg.setKey(key);
|
||||
msg.setEntry(ds);
|
||||
msg.setReplyGateway(null);
|
||||
msg.setReplyToken(0);
|
||||
msg.setReplyTunnel(null);
|
||||
@ -242,13 +233,9 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
|
||||
List<RouterInfo> rv = new ArrayList();
|
||||
DataStore ds = getDataStore();
|
||||
if (ds != null) {
|
||||
Set keys = ds.getKeys();
|
||||
if (keys != null) {
|
||||
for (Iterator iter = keys.iterator(); iter.hasNext(); ) {
|
||||
Object o = ds.get((Hash)iter.next());
|
||||
if (o instanceof RouterInfo)
|
||||
rv.add((RouterInfo)o);
|
||||
}
|
||||
for (DatabaseEntry o : ds.getEntries()) {
|
||||
if (o.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO)
|
||||
rv.add((RouterInfo)o);
|
||||
}
|
||||
}
|
||||
return rv;
|
||||
|
@ -11,6 +11,7 @@ package net.i2p.router.networkdb.kademlia;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.TreeSet;
|
||||
@ -19,6 +20,7 @@ import net.i2p.data.Hash;
|
||||
import net.i2p.data.RouterInfo;
|
||||
import net.i2p.router.RouterContext;
|
||||
import net.i2p.router.peermanager.PeerProfile;
|
||||
import net.i2p.router.util.RandomIterator;
|
||||
import net.i2p.stat.Rate;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
@ -72,8 +74,10 @@ class FloodfillPeerSelector extends PeerSelector {
|
||||
*/
|
||||
List<Hash> selectNearestExplicitThin(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets, boolean preferConnected) {
|
||||
if (peersToIgnore == null)
|
||||
peersToIgnore = new HashSet(1);
|
||||
peersToIgnore.add(_context.routerHash());
|
||||
peersToIgnore = Collections.singleton(_context.routerHash());
|
||||
else
|
||||
peersToIgnore.add(_context.routerHash());
|
||||
// TODO this is very slow
|
||||
FloodfillSelectionCollector matches = new FloodfillSelectionCollector(key, peersToIgnore, maxNumRouters);
|
||||
if (kbuckets == null) return new ArrayList();
|
||||
kbuckets.getAll(matches);
|
||||
@ -91,8 +95,7 @@ class FloodfillPeerSelector extends PeerSelector {
|
||||
* List is not sorted and not shuffled.
|
||||
*/
|
||||
List<Hash> selectFloodfillParticipants(KBucketSet kbuckets) {
|
||||
Set<Hash> ignore = new HashSet(1);
|
||||
ignore.add(_context.routerHash());
|
||||
Set<Hash> ignore = Collections.singleton(_context.routerHash());
|
||||
return selectFloodfillParticipants(ignore, kbuckets);
|
||||
}
|
||||
|
||||
@ -104,6 +107,8 @@ class FloodfillPeerSelector extends PeerSelector {
|
||||
*/
|
||||
private List<Hash> selectFloodfillParticipants(Set<Hash> toIgnore, KBucketSet kbuckets) {
|
||||
if (kbuckets == null) return Collections.EMPTY_LIST;
|
||||
// TODO this is very slow - use profile getPeersByCapability('f') instead
|
||||
_context.statManager().addRateData("netDb.newFSC", 0, 0);
|
||||
FloodfillSelectionCollector matches = new FloodfillSelectionCollector(null, toIgnore, 0);
|
||||
kbuckets.getAll(matches);
|
||||
return matches.getFloodfillParticipants();
|
||||
@ -127,8 +132,7 @@ class FloodfillPeerSelector extends PeerSelector {
|
||||
* Group 3: All others
|
||||
*/
|
||||
List<Hash> selectFloodfillParticipants(Hash key, int maxNumRouters, KBucketSet kbuckets) {
|
||||
Set<Hash> ignore = new HashSet(1);
|
||||
ignore.add(_context.routerHash());
|
||||
Set<Hash> ignore = Collections.singleton(_context.routerHash());
|
||||
return selectFloodfillParticipants(key, maxNumRouters, ignore, kbuckets);
|
||||
}
|
||||
|
||||
@ -147,8 +151,7 @@ class FloodfillPeerSelector extends PeerSelector {
|
||||
*/
|
||||
List<Hash> selectFloodfillParticipants(Hash key, int howMany, Set<Hash> toIgnore, KBucketSet kbuckets) {
|
||||
if (toIgnore == null) {
|
||||
toIgnore = new HashSet(1);
|
||||
toIgnore.add(_context.routerHash());
|
||||
toIgnore = Collections.singleton(_context.routerHash());
|
||||
} else if (!toIgnore.contains(_context.routerHash())) {
|
||||
// copy the Set so we don't confuse StoreJob
|
||||
toIgnore = new HashSet(toIgnore);
|
||||
@ -320,7 +323,6 @@ class FloodfillPeerSelector extends PeerSelector {
|
||||
* Group 4: Non-floodfills, sorted by closest-to-the-key
|
||||
*/
|
||||
public List<Hash> get(int howMany, boolean preferConnected) {
|
||||
Collections.shuffle(_floodfillMatches, _context.random());
|
||||
List<Hash> rv = new ArrayList(howMany);
|
||||
List<Hash> badff = new ArrayList(howMany);
|
||||
List<Hash> unconnectedff = new ArrayList(howMany);
|
||||
@ -329,8 +331,8 @@ class FloodfillPeerSelector extends PeerSelector {
|
||||
// Only add in "good" floodfills here...
|
||||
// Let's say published in last 3h and no failed sends in last 30m
|
||||
// (Forever shitlisted ones are excluded in add() above)
|
||||
for (int i = 0; found < howMany && i < _floodfillMatches.size(); i++) {
|
||||
Hash entry = (Hash) _floodfillMatches.get(i);
|
||||
for (Iterator<Hash> iter = new RandomIterator(_floodfillMatches); (found < howMany) && iter.hasNext(); ) {
|
||||
Hash entry = iter.next();
|
||||
RouterInfo info = _context.netDb().lookupRouterInfoLocally(entry);
|
||||
if (info != null && now - info.getPublished() > 3*60*60*1000) {
|
||||
badff.add(entry);
|
||||
@ -391,6 +393,7 @@ class FloodfillPeerSelector extends PeerSelector {
|
||||
if (peersToIgnore != null && peersToIgnore.contains(Hash.FAKE_HASH)) {
|
||||
// return non-ff
|
||||
peersToIgnore.addAll(selectFloodfillParticipants(peersToIgnore, kbuckets));
|
||||
// TODO this is very slow
|
||||
FloodfillSelectionCollector matches = new FloodfillSelectionCollector(rkey, peersToIgnore, maxNumRouters);
|
||||
kbuckets.getAll(matches);
|
||||
return matches.get(maxNumRouters);
|
||||
|
@ -12,7 +12,7 @@ import java.util.Iterator;
|
||||
import java.util.NoSuchElementException;
|
||||
import java.util.Set;
|
||||
|
||||
import net.i2p.data.DataStructure;
|
||||
import net.i2p.data.DatabaseEntry;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.LeaseSet;
|
||||
import net.i2p.data.RouterInfo;
|
||||
@ -30,7 +30,7 @@ class FloodfillStoreJob extends StoreJob {
|
||||
* Send a data structure to the floodfills
|
||||
*
|
||||
*/
|
||||
public FloodfillStoreJob(RouterContext context, FloodfillNetworkDatabaseFacade facade, Hash key, DataStructure data, Job onSuccess, Job onFailure, long timeoutMs) {
|
||||
public FloodfillStoreJob(RouterContext context, FloodfillNetworkDatabaseFacade facade, Hash key, DatabaseEntry data, Job onSuccess, Job onFailure, long timeoutMs) {
|
||||
this(context, facade, key, data, onSuccess, onFailure, timeoutMs, null);
|
||||
}
|
||||
|
||||
@ -38,7 +38,7 @@ class FloodfillStoreJob extends StoreJob {
|
||||
* @param toSkip set of peer hashes of people we dont want to send the data to (e.g. we
|
||||
* already know they have it). This can be null.
|
||||
*/
|
||||
public FloodfillStoreJob(RouterContext context, FloodfillNetworkDatabaseFacade facade, Hash key, DataStructure data, Job onSuccess, Job onFailure, long timeoutMs, Set<Hash> toSkip) {
|
||||
public FloodfillStoreJob(RouterContext context, FloodfillNetworkDatabaseFacade facade, Hash key, DatabaseEntry data, Job onSuccess, Job onFailure, long timeoutMs, Set<Hash> toSkip) {
|
||||
super(context, facade, key, data, onSuccess, onFailure, timeoutMs, toSkip);
|
||||
_facade = facade;
|
||||
}
|
||||
@ -63,15 +63,12 @@ class FloodfillStoreJob extends StoreJob {
|
||||
}
|
||||
// Get the time stamp from the data we sent, so the Verify job can meke sure that
|
||||
// it finds something stamped with that time or newer.
|
||||
long published = 0;
|
||||
DataStructure data = _state.getData();
|
||||
boolean isRouterInfo = data instanceof RouterInfo;
|
||||
DatabaseEntry data = _state.getData();
|
||||
boolean isRouterInfo = data.getType() == DatabaseEntry.KEY_TYPE_ROUTERINFO;
|
||||
long published = data.getDate();
|
||||
if (isRouterInfo) {
|
||||
published = ((RouterInfo) data).getPublished();
|
||||
// Temporarily disable
|
||||
return;
|
||||
} else if (data instanceof LeaseSet) {
|
||||
published = ((LeaseSet) data).getEarliestLeaseDate();
|
||||
}
|
||||
// we should always have exactly one successful entry
|
||||
Hash sentTo = null;
|
||||
|
@ -4,7 +4,7 @@ import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
import net.i2p.data.DataStructure;
|
||||
import net.i2p.data.DatabaseEntry;
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.RouterInfo;
|
||||
import net.i2p.data.i2np.DatabaseLookupMessage;
|
||||
@ -201,10 +201,7 @@ public class FloodfillVerifyStoreJob extends JobImpl {
|
||||
// Verify it's as recent as the one we sent
|
||||
boolean success = false;
|
||||
DatabaseStoreMessage dsm = (DatabaseStoreMessage)_message;
|
||||
if (_isRouterInfo && dsm.getValueType() == DatabaseStoreMessage.KEY_TYPE_ROUTERINFO)
|
||||
success = dsm.getRouterInfo().getPublished() >= _published;
|
||||
else if ((!_isRouterInfo) && dsm.getValueType() == DatabaseStoreMessage.KEY_TYPE_LEASESET)
|
||||
success = dsm.getLeaseSet().getEarliestLeaseDate() >= _published;
|
||||
success = dsm.getEntry().getDate() >= _published;
|
||||
if (success) {
|
||||
// store ok, w00t!
|
||||
getContext().profileManager().dbLookupSuccessful(_target, delay);
|
||||
@ -218,7 +215,7 @@ public class FloodfillVerifyStoreJob extends JobImpl {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Verify failed (older) for " + _key);
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Rcvd older lease: " + dsm.getLeaseSet());
|
||||
_log.info("Rcvd older lease: " + dsm.getEntry());
|
||||
} else if (_message instanceof DatabaseSearchReplyMessage) {
|
||||
// assume 0 old, all new, 0 invalid, 0 dup
|
||||
getContext().profileManager().dbLookupReply(_target, 0,
|
||||
@ -245,11 +242,7 @@ public class FloodfillVerifyStoreJob extends JobImpl {
|
||||
* So at least we'll try THREE ffs round-robin if things continue to fail...
|
||||
*/
|
||||
private void resend() {
|
||||
DataStructure ds;
|
||||
if (_isRouterInfo)
|
||||
ds = _facade.lookupRouterInfoLocally(_key);
|
||||
else
|
||||
ds = _facade.lookupLeaseSetLocally(_key);
|
||||
DatabaseEntry ds = _facade.lookupLocally(_key);
|
||||
if (ds != null) {
|
||||
Set<Hash> toSkip = new HashSet(2);
|
||||
if (_sentTo != null)
|
||||
|
@ -57,9 +57,7 @@ public class HandleFloodfillDatabaseLookupMessageJob extends HandleDatabaseLooku
|
||||
// that would increment the netDb.lookupsHandled and netDb.lookupsMatched stats
|
||||
DatabaseStoreMessage msg = new DatabaseStoreMessage(getContext());
|
||||
RouterInfo me = getContext().router().getRouterInfo();
|
||||
msg.setKey(me.getIdentity().getHash());
|
||||
msg.setRouterInfo(me);
|
||||
msg.setValueType(DatabaseStoreMessage.KEY_TYPE_ROUTERINFO);
|
||||
msg.setEntry(me);
|
||||
sendMessage(msg, toPeer, replyTunnel);
|
||||
}
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user