propagate from branch 'i2p.i2p.zzz.test' (head c295ab421dd719cfe0e273268b5b4e48505e4f61)

to branch 'i2p.i2p' (head 995914d8e049d9bb695fd25e4cf5be860cd4e487)
This commit is contained in:
zzz
2010-03-18 15:49:03 +00:00
69 changed files with 1457 additions and 1213 deletions

View File

@ -163,10 +163,22 @@ class PeerConnectionOut implements Runnable
removeMessage(Message.PIECE);
// XXX - Should also register overhead...
if (m.type == Message.PIECE)
state.uploaded(m.len);
// Don't let other clients requesting big chunks get an advantage
// when we are seeding;
// only count the rest of the upload after sendMessage().
int remainder = 0;
if (m.type == Message.PIECE) {
if (m.len <= PeerState.PARTSIZE) {
state.uploaded(m.len);
} else {
state.uploaded(PeerState.PARTSIZE);
remainder = m.len - PeerState.PARTSIZE;
}
}
m.sendMessage(dout);
if (remainder > 0)
state.uploaded(remainder);
m = null;
}
}

View File

@ -203,7 +203,7 @@ public class I2PTunnelHTTPClient extends I2PTunnelClientBase implements Runnable
if (size <= 0) {
if (_log.shouldLog(Log.INFO))
_log.info("Proxy list is empty - no outproxy available");
l.log("Proxy list is emtpy - no outproxy available");
l.log("Proxy list is empty - no outproxy available");
return null;
}
int index = I2PAppContext.getGlobalContext().random().nextInt(size);

View File

@ -50,7 +50,7 @@ public class SOCKSUDPUnwrapper implements Source, Sink {
int headerlen = h.getBytes().length;
byte unwrapped[] = new byte[data.length - headerlen];
System.arraycopy(unwrapped, 0, data, headerlen, unwrapped.length);
System.arraycopy(data, headerlen, unwrapped, 0, unwrapped.length);
this.sink.send(dest, unwrapped);
}

View File

@ -39,8 +39,8 @@ public class SOCKSUDPWrapper implements Source, Sink {
byte[] header = h.getBytes();
byte wrapped[] = new byte[header.length + data.length];
System.arraycopy(wrapped, 0, header, 0, header.length);
System.arraycopy(wrapped, header.length, data, 0, data.length);
System.arraycopy(header, 0, wrapped, 0, header.length);
System.arraycopy(data, 0, wrapped, header.length, data.length);
this.sink.send(from, wrapped);
}

View File

@ -298,6 +298,9 @@ public class ConfigNetHandler extends FormHandler {
_context.router().shutdownGracefully(Router.EXIT_GRACEFUL_RESTART);
}
private static final int DEF_BURST_PCT = 10;
private static final int DEF_BURST_TIME = 20;
private void updateRates() {
boolean updated = false;
@ -310,14 +313,27 @@ public class ConfigNetHandler extends FormHandler {
}
}
// Since burst is now hidden in the gui, set burst to +10% for 20 seconds
if ( (_inboundRate != null) && (_inboundRate.length() > 0) &&
!_inboundRate.equals(_context.getProperty(FIFOBandwidthRefiller.PROP_INBOUND_BANDWIDTH, "" + FIFOBandwidthRefiller.DEFAULT_INBOUND_BANDWIDTH))) {
_context.router().setConfigSetting(FIFOBandwidthRefiller.PROP_INBOUND_BANDWIDTH, _inboundRate);
try {
int rate = Integer.parseInt(_inboundRate) * (100 + DEF_BURST_PCT) / 100;
int kb = DEF_BURST_TIME * rate;
_context.router().setConfigSetting(FIFOBandwidthRefiller.PROP_INBOUND_BURST_BANDWIDTH, "" + rate);
_context.router().setConfigSetting(FIFOBandwidthRefiller.PROP_INBOUND_BANDWIDTH_PEAK, "" + kb);
} catch (NumberFormatException nfe) {}
updated = true;
}
if ( (_outboundRate != null) && (_outboundRate.length() > 0) &&
!_outboundRate.equals(_context.getProperty(FIFOBandwidthRefiller.PROP_OUTBOUND_BANDWIDTH, "" + FIFOBandwidthRefiller.DEFAULT_OUTBOUND_BANDWIDTH))) {
_context.router().setConfigSetting(FIFOBandwidthRefiller.PROP_OUTBOUND_BANDWIDTH, _outboundRate);
try {
int rate = Integer.parseInt(_outboundRate) * (100 + DEF_BURST_PCT) / 100;
int kb = DEF_BURST_TIME * rate;
_context.router().setConfigSetting(FIFOBandwidthRefiller.PROP_OUTBOUND_BURST_BANDWIDTH, "" + rate);
_context.router().setConfigSetting(FIFOBandwidthRefiller.PROP_OUTBOUND_BANDWIDTH_PEAK, "" + kb);
} catch (NumberFormatException nfe) {}
updated = true;
}

View File

@ -7,7 +7,7 @@ import java.util.concurrent.ConcurrentHashMap;
import net.i2p.I2PAppContext;
public class NavHelper {
private static Map<String, String> _apps = new ConcurrentHashMap();
private static Map<String, String> _apps = new ConcurrentHashMap(4);
/**
* To register a new client application so that it shows up on the router
@ -28,7 +28,9 @@ public class NavHelper {
* Translated string is loaded by PluginStarter
*/
public static String getClientAppLinks(I2PAppContext ctx) {
StringBuilder buf = new StringBuilder(1024);
if (_apps.isEmpty())
return "";
StringBuilder buf = new StringBuilder(256);
for (Iterator<String> iter = _apps.keySet().iterator(); iter.hasNext(); ) {
String name = iter.next();
String path = _apps.get(name);

View File

@ -282,7 +282,11 @@ public class NetDbRenderer {
}
for (Iterator iter = info.getAddresses().iterator(); iter.hasNext(); ) {
RouterAddress addr = (RouterAddress)iter.next();
buf.append("<b>").append(DataHelper.stripHTML(addr.getTransportStyle())).append(":</b> ");
String style = addr.getTransportStyle();
buf.append("<b>").append(DataHelper.stripHTML(style)).append(":</b> ");
int cost = addr.getCost();
if (!((style.equals("SSU") && cost == 5) || (style.equals("NTCP") && cost == 10)))
buf.append('[').append("cost").append('=').append("" + cost).append("] ");
for (Iterator optIter = addr.getOptions().keySet().iterator(); optIter.hasNext(); ) {
String name = (String)optIter.next();
String val = addr.getOptions().getProperty(name);

View File

@ -180,7 +180,7 @@ public class PluginStarter implements Runnable {
* @return true on success
* @throws just about anything, caller would be wise to catch Throwable
*/
static boolean stopPlugin(RouterContext ctx, String appName) throws IOException {
static boolean stopPlugin(RouterContext ctx, String appName) throws Exception {
Log log = ctx.logManager().getLog(PluginStarter.class);
File pluginDir = new File(ctx.getAppDir(), PluginUpdateHandler.PLUGIN_DIR + '/' + appName);
if ((!pluginDir.exists()) || (!pluginDir.isDirectory())) {
@ -228,7 +228,7 @@ public class PluginStarter implements Runnable {
}
/** @return true on success - caller should call stopPlugin() first */
static boolean deletePlugin(RouterContext ctx, String appName) throws IOException {
static boolean deletePlugin(RouterContext ctx, String appName) throws Exception {
Log log = ctx.logManager().getLog(PluginStarter.class);
File pluginDir = new File(ctx.getAppDir(), PluginUpdateHandler.PLUGIN_DIR + '/' + appName);
if ((!pluginDir.exists()) || (!pluginDir.isDirectory())) {
@ -348,8 +348,12 @@ public class PluginStarter implements Runnable {
} catch (IOException ioe) {}
}
/** @param action "start" or "stop" or "uninstall" */
private static void runClientApps(RouterContext ctx, File pluginDir, List<ClientAppConfig> apps, String action) {
/**
* @param action "start" or "stop" or "uninstall"
* @throws just about anything if an app has a delay less than zero, caller would be wise to catch Throwable
* If no apps have a delay less than zero, it shouldn't throw anything
*/
private static void runClientApps(RouterContext ctx, File pluginDir, List<ClientAppConfig> apps, String action) throws Exception {
Log log = ctx.logManager().getLog(PluginStarter.class);
for(ClientAppConfig app : apps) {
if (action.equals("start") && app.disabled)
@ -388,10 +392,18 @@ public class PluginStarter implements Runnable {
}
addToClasspath(cp, app.clientName, log);
}
if (app.delay == 0 || !action.equals("start")) {
if (app.delay < 0 && action.equals("start")) {
// this will throw exceptions
LoadClientAppsJob.runClientInline(app.className, app.clientName, argVal, log);
} else if (app.delay == 0 || !action.equals("start")) {
// quick check, will throw ClassNotFoundException on error
LoadClientAppsJob.testClient(app.className);
// run this guy now
LoadClientAppsJob.runClient(app.className, app.clientName, argVal, log);
} else {
// quick check, will throw ClassNotFoundException on error
LoadClientAppsJob.testClient(app.className);
// wait before firing it up
ctx.jobQueue().addJob(new LoadClientAppsJob.DelayedRunClient(ctx, app.className, app.clientName, argVal, app.delay));
}

View File

@ -205,13 +205,14 @@ public class UpdateHandler {
} else {
_log.log(Log.CRIT, "Update was VERIFIED, will be installed at next restart");
StringBuilder buf = new StringBuilder(64);
buf.append("<b>").append(_("Update downloaded")).append("</b><br>");
buf.append("<b>").append(_("Update downloaded")).append("<br>");
if (System.getProperty("wrapper.version") != null)
buf.append(_("Click Restart to install"));
else
buf.append(_("Click Shutdown and restart to install"));
if (up.newVersion() != null)
buf.append(' ').append(_("Version {0}", up.newVersion()));
buf.append("</b>");
updateStatus(buf.toString());
}
} else {

View File

@ -10,7 +10,8 @@
<h1><%=intl._("I2P Router Logs")%></h1>
<div class="main" id="main">
<div class="joblog"><h3><%=intl._("I2P Version & Running Environment")%></h3><a name="version"> </a>
<i><%=intl._("Please include this information in bug reports")%>:</i>
<p><%=intl._("Please report bugs on <a href=\"http://trac.i2p2.i2p/newticket\">trac.i2p2.i2p</a>.")%>
<p><i><%=intl._("Please include this information in bug reports")%>:</i>
<p>
<b>I2P version:</b> <jsp:getProperty name="helper" property="version" /><br>
<b>Java version:</b> <%=System.getProperty("java.vendor")%> <%=System.getProperty("java.version")%><br>

View File

@ -6,6 +6,7 @@ import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import java.util.concurrent.atomic.AtomicLong;
import net.i2p.I2PAppContext;
import net.i2p.client.I2PSession;
@ -29,7 +30,7 @@ public class Connection {
private long _sendStreamId;
private long _receiveStreamId;
private long _lastSendTime;
private long _lastSendId;
private AtomicLong _lastSendId;
private boolean _resetReceived;
private boolean _resetSent;
private long _resetSentOn;
@ -49,7 +50,7 @@ public class Connection {
private boolean _isInbound;
private boolean _updatedShareOpts;
/** Packet ID (Long) to PacketLocal for sent but unacked packets */
private final Map _outboundPackets;
private final Map<Long, PacketLocal> _outboundPackets;
private PacketQueue _outboundQueue;
private ConnectionPacketHandler _handler;
private ConnectionOptions _options;
@ -102,7 +103,7 @@ public class Connection {
_options = (opts != null ? opts : new ConnectionOptions());
_outputStream.setWriteTimeout((int)_options.getWriteTimeout());
_inputStream.setReadTimeout((int)_options.getReadTimeout());
_lastSendId = -1;
_lastSendId = new AtomicLong(-1);
_nextSendTime = -1;
_ackedPackets = 0;
_createdOn = _context.clock().now();
@ -137,9 +138,7 @@ public class Connection {
}
public long getNextOutboundPacketNum() {
synchronized (this) {
return ++_lastSendId;
}
return _lastSendId.incrementAndGet();
}
void closeReceived() {
@ -175,7 +174,7 @@ public class Connection {
return false;
started = true;
if ( (_outboundPackets.size() >= _options.getWindowSize()) || (_activeResends > 0) ||
(_lastSendId - _highestAckedThrough > _options.getWindowSize()) ) {
(_lastSendId.get() - _highestAckedThrough > _options.getWindowSize()) ) {
if (timeoutMs > 0) {
if (timeLeft <= 0) {
if (_log.shouldLog(Log.INFO))
@ -211,10 +210,10 @@ public class Connection {
void ackImmediately() {
PacketLocal packet = null;
synchronized (_outboundPackets) {
if (_outboundPackets.size() > 0) {
if (!_outboundPackets.isEmpty()) {
// ordered, so pick the lowest to retransmit
Iterator iter = _outboundPackets.values().iterator();
packet = (PacketLocal)iter.next();
Iterator<PacketLocal> iter = _outboundPackets.values().iterator();
packet = iter.next();
//iter.remove();
}
}
@ -403,10 +402,10 @@ public class Connection {
}
}
List acked = null;
List<PacketLocal> acked = null;
synchronized (_outboundPackets) {
for (Iterator iter = _outboundPackets.keySet().iterator(); iter.hasNext(); ) {
Long id = (Long)iter.next();
for (Iterator<Long> iter = _outboundPackets.keySet().iterator(); iter.hasNext(); ) {
Long id = iter.next();
if (id.longValue() <= ackThrough) {
boolean nacked = false;
if (nacks != null) {
@ -414,7 +413,7 @@ public class Connection {
for (int i = 0; i < nacks.length; i++) {
if (nacks[i] == id.longValue()) {
nacked = true;
PacketLocal nackedPacket = (PacketLocal)_outboundPackets.get(id);
PacketLocal nackedPacket = _outboundPackets.get(id);
nackedPacket.incrementNACKs();
break; // NACKed
}
@ -423,7 +422,7 @@ public class Connection {
if (!nacked) { // aka ACKed
if (acked == null)
acked = new ArrayList(1);
PacketLocal ackedPacket = (PacketLocal)_outboundPackets.get(id);
PacketLocal ackedPacket = _outboundPackets.get(id);
ackedPacket.ackReceived();
acked.add(ackedPacket);
}
@ -433,7 +432,7 @@ public class Connection {
}
if (acked != null) {
for (int i = 0; i < acked.size(); i++) {
PacketLocal p = (PacketLocal)acked.get(i);
PacketLocal p = acked.get(i);
_outboundPackets.remove(new Long(p.getSequenceNum()));
_ackedPackets++;
if (p.getNumSends() > 1) {
@ -443,7 +442,7 @@ public class Connection {
}
}
}
if ( (_outboundPackets.size() <= 0) && (_activeResends != 0) ) {
if ( (_outboundPackets.isEmpty()) && (_activeResends != 0) ) {
if (_log.shouldLog(Log.INFO))
_log.info("All outbound packets acked, clearing " + _activeResends);
_activeResends = 0;
@ -570,8 +569,8 @@ public class Connection {
private void killOutstandingPackets() {
//boolean tagsCancelled = false;
synchronized (_outboundPackets) {
for (Iterator iter = _outboundPackets.values().iterator(); iter.hasNext(); ) {
PacketLocal pl = (PacketLocal)iter.next();
for (Iterator<PacketLocal> iter = _outboundPackets.values().iterator(); iter.hasNext(); ) {
PacketLocal pl = iter.next();
//if ( (pl.getTagsSent() != null) && (pl.getTagsSent().size() > 0) )
// tagsCancelled = true;
pl.cancelled();
@ -652,11 +651,11 @@ public class Connection {
/** What was the last packet Id sent to the peer?
* @return The last sent packet ID
*/
public long getLastSendId() { return _lastSendId; }
public long getLastSendId() { return _lastSendId.get(); }
/** Set the packet Id that was sent to a peer.
* @param id The packet ID
*/
public void setLastSendId(long id) { _lastSendId = id; }
public void setLastSendId(long id) { _lastSendId.set(id); }
/**
* Retrieve the current ConnectionOptions.
@ -783,7 +782,7 @@ public class Connection {
if (_ackSinceCongestion) {
_lastCongestionSeenAt = _options.getWindowSize();
_lastCongestionTime = _context.clock().now();
_lastCongestionHighestUnacked = _lastSendId;
_lastCongestionHighestUnacked = _lastSendId.get();
_ackSinceCongestion = false;
}
}
@ -1022,7 +1021,7 @@ public class Connection {
}
if (getCloseReceivedOn() > 0)
buf.append(" close received ").append(DataHelper.formatDuration(_context.clock().now() - getCloseReceivedOn())).append(" ago");
buf.append(" sent: ").append(1 + _lastSendId);
buf.append(" sent: ").append(1 + _lastSendId.get());
if (_inputStream != null)
buf.append(" rcvd: ").append(1 + _inputStream.getHighestBlockId() - missing);

View File

@ -1,10 +1,10 @@
package net.i2p.client.streaming;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import net.i2p.I2PAppContext;
import net.i2p.I2PException;
@ -32,14 +32,13 @@ public class ConnectionManager {
private ConnectionPacketHandler _conPacketHandler;
private TCBShare _tcbShare;
/** Inbound stream ID (Long) to Connection map */
private Map _connectionByInboundId;
private ConcurrentHashMap<Long, Connection> _connectionByInboundId;
/** Ping ID (Long) to PingRequest */
private final Map _pendingPings;
private final Map<Long, PingRequest> _pendingPings;
private boolean _allowIncoming;
private int _maxConcurrentStreams;
private ConnectionOptions _defaultOptions;
private volatile int _numWaiting;
private final Object _connectionLock;
private long SoTimeout;
public ConnectionManager(I2PAppContext context, I2PSession session, int maxConcurrent, ConnectionOptions defaultOptions) {
@ -48,9 +47,8 @@ public class ConnectionManager {
_maxConcurrentStreams = maxConcurrent;
_defaultOptions = defaultOptions;
_log = _context.logManager().getLog(ConnectionManager.class);
_connectionByInboundId = new HashMap(32);
_pendingPings = new HashMap(4);
_connectionLock = new Object();
_connectionByInboundId = new ConcurrentHashMap(32);
_pendingPings = new ConcurrentHashMap(4);
_messageHandler = new MessageHandler(_context, this);
_packetHandler = new PacketHandler(_context, this);
_connectionHandler = new ConnectionHandler(_context, this);
@ -77,22 +75,17 @@ public class ConnectionManager {
}
Connection getConnectionByInboundId(long id) {
synchronized (_connectionLock) {
return (Connection)_connectionByInboundId.get(new Long(id));
}
return _connectionByInboundId.get(Long.valueOf(id));
}
/**
* not guaranteed to be unique, but in case we receive more than one packet
* on an inbound connection that we havent ack'ed yet...
*/
Connection getConnectionByOutboundId(long id) {
synchronized (_connectionLock) {
for (Iterator iter = _connectionByInboundId.values().iterator(); iter.hasNext(); ) {
Connection con = (Connection)iter.next();
for (Connection con : _connectionByInboundId.values()) {
if (DataHelper.eq(con.getSendStreamId(), id))
return con;
}
}
return null;
}
@ -135,27 +128,26 @@ public class ConnectionManager {
boolean reject = false;
int active = 0;
int total = 0;
synchronized (_connectionLock) {
total = _connectionByInboundId.size();
for (Iterator iter = _connectionByInboundId.values().iterator(); iter.hasNext(); ) {
if ( ((Connection)iter.next()).getIsConnected() )
active++;
}
// just for the stat
//total = _connectionByInboundId.size();
//for (Iterator iter = _connectionByInboundId.values().iterator(); iter.hasNext(); ) {
// if ( ((Connection)iter.next()).getIsConnected() )
// active++;
//}
if (locked_tooManyStreams()) {
reject = true;
} else {
while (true) {
Connection oldCon = (Connection)_connectionByInboundId.put(new Long(receiveId), con);
Connection oldCon = _connectionByInboundId.putIfAbsent(Long.valueOf(receiveId), con);
if (oldCon == null) {
break;
} else {
_connectionByInboundId.put(new Long(receiveId), oldCon);
// receiveId already taken, try another
receiveId = _context.random().nextLong(Packet.MAX_STREAM_ID-1)+1;
}
}
}
}
_context.statManager().addRateData("stream.receiveActive", active, total);
@ -179,9 +171,7 @@ public class ConnectionManager {
try {
con.getPacketHandler().receivePacket(synPacket, con);
} catch (I2PException ie) {
synchronized (_connectionLock) {
_connectionByInboundId.remove(new Long(receiveId));
}
_connectionByInboundId.remove(Long.valueOf(receiveId));
return null;
}
@ -215,8 +205,7 @@ public class ConnectionManager {
_numWaiting--;
return null;
}
boolean reject = false;
synchronized (_connectionLock) {
if (locked_tooManyStreams()) {
// allow a full buffer of pending/waiting streams
if (_numWaiting > _maxConcurrentStreams) {
@ -227,27 +216,30 @@ public class ConnectionManager {
_numWaiting--;
return null;
}
// no remaining streams, lets wait a bit
try { _connectionLock.wait(remaining); } catch (InterruptedException ie) {}
// got rid of the lock, so just sleep (fixme?)
// try { _connectionLock.wait(remaining); } catch (InterruptedException ie) {}
try { Thread.sleep(remaining/4); } catch (InterruptedException ie) {}
} else {
con = new Connection(_context, this, _schedulerChooser, _outboundQueue, _conPacketHandler, opts);
con.setRemotePeer(peer);
while (_connectionByInboundId.containsKey(new Long(receiveId))) {
while (_connectionByInboundId.containsKey(Long.valueOf(receiveId))) {
receiveId = _context.random().nextLong(Packet.MAX_STREAM_ID-1)+1;
}
_connectionByInboundId.put(new Long(receiveId), con);
_connectionByInboundId.put(Long.valueOf(receiveId), con);
break; // stop looping as a psuedo-wait
}
}
}
// ok we're in...
con.setReceiveStreamId(receiveId);
con.eventOccurred();
_log.debug("Connect() conDelay = " + opts.getConnectDelay());
if (_log.shouldLog(Log.DEBUG))
_log.debug("Connect() conDelay = " + opts.getConnectDelay());
if (opts.getConnectDelay() <= 0) {
con.waitForConnect();
}
@ -258,12 +250,15 @@ public class ConnectionManager {
return con;
}
/**
* Doesn't need to be locked any more
* @return too many
*/
private boolean locked_tooManyStreams() {
if (_maxConcurrentStreams <= 0) return false;
if (_connectionByInboundId.size() < _maxConcurrentStreams) return false;
int active = 0;
for (Iterator iter = _connectionByInboundId.values().iterator(); iter.hasNext(); ) {
Connection con = (Connection)iter.next();
for (Connection con : _connectionByInboundId.values()) {
if (con.getIsConnected())
active++;
}
@ -293,13 +288,10 @@ public class ConnectionManager {
*
*/
public void disconnectAllHard() {
synchronized (_connectionLock) {
for (Iterator iter = _connectionByInboundId.values().iterator(); iter.hasNext(); ) {
Connection con = (Connection)iter.next();
con.disconnect(false, false);
}
_connectionByInboundId.clear();
_connectionLock.notifyAll();
for (Iterator<Connection> iter = _connectionByInboundId.values().iterator(); iter.hasNext(); ) {
Connection con = iter.next();
con.disconnect(false, false);
iter.remove();
}
_tcbShare.stop();
}
@ -310,17 +302,15 @@ public class ConnectionManager {
* @param con Connection to drop.
*/
public void removeConnection(Connection con) {
boolean removed = false;
synchronized (_connectionLock) {
Object o = _connectionByInboundId.remove(new Long(con.getReceiveStreamId()));
removed = (o == con);
Object o = _connectionByInboundId.remove(Long.valueOf(con.getReceiveStreamId()));
boolean removed = (o == con);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Connection removed? " + removed + " remaining: "
+ _connectionByInboundId.size() + ": " + con);
if (!removed && _log.shouldLog(Log.DEBUG))
_log.debug("Failed to remove " + con +"\n" + _connectionByInboundId.values());
_connectionLock.notifyAll();
}
if (removed) {
_context.statManager().addRateData("stream.con.lifetimeMessagesSent", 1+con.getLastSendId(), con.getLifetime());
MessageInputStream stream = con.getInputStream();
@ -344,10 +334,8 @@ public class ConnectionManager {
/** return a set of Connection objects
* @return set of Connection objects
*/
public Set listConnections() {
synchronized (_connectionLock) {
public Set<Connection> listConnections() {
return new HashSet(_connectionByInboundId.values());
}
}
/** blocking */
@ -368,7 +356,7 @@ public class ConnectionManager {
}
public boolean ping(Destination peer, long timeoutMs, boolean blocking, PingNotifier notifier) {
Long id = new Long(_context.random().nextLong(Packet.MAX_STREAM_ID-1)+1);
Long id = Long.valueOf(_context.random().nextLong(Packet.MAX_STREAM_ID-1)+1);
PacketLocal packet = new PacketLocal(_context, peer);
packet.setSendStreamId(id.longValue());
packet.setFlag(Packet.FLAG_ECHO);
@ -381,9 +369,7 @@ public class ConnectionManager {
PingRequest req = new PingRequest(peer, packet, notifier);
synchronized (_pendingPings) {
_pendingPings.put(id, req);
}
_pendingPings.put(id, req);
_outboundQueue.enqueue(packet);
packet.releasePayload();
@ -393,10 +379,7 @@ public class ConnectionManager {
if (!req.pongReceived())
try { req.wait(timeoutMs); } catch (InterruptedException ie) {}
}
synchronized (_pendingPings) {
_pendingPings.remove(id);
}
_pendingPings.remove(id);
} else {
SimpleTimer.getInstance().addEvent(new PingFailed(id, notifier), timeoutMs);
}
@ -418,13 +401,8 @@ public class ConnectionManager {
}
public void timeReached() {
boolean removed = false;
synchronized (_pendingPings) {
Object o = _pendingPings.remove(_id);
if (o != null)
removed = true;
}
if (removed) {
PingRequest pr = _pendingPings.remove(_id);
if (pr != null) {
if (_notifier != null)
_notifier.pingComplete(false);
if (_log.shouldLog(Log.INFO))
@ -433,7 +411,7 @@ public class ConnectionManager {
}
}
private class PingRequest {
private static class PingRequest {
private boolean _ponged;
private Destination _peer;
private PacketLocal _packet;
@ -445,7 +423,8 @@ public class ConnectionManager {
_notifier = notifier;
}
public void pong() {
_log.debug("Ping successful");
// static, no log
//_log.debug("Ping successful");
//_context.sessionKeyManager().tagsDelivered(_peer.getPublicKey(), _packet.getKeyUsed(), _packet.getTagsSent());
synchronized (ConnectionManager.PingRequest.this) {
_ponged = true;
@ -458,10 +437,7 @@ public class ConnectionManager {
}
void receivePong(long pingId) {
PingRequest req = null;
synchronized (_pendingPings) {
req = (PingRequest)_pendingPings.remove(new Long(pingId));
}
PingRequest req = _pendingPings.remove(Long.valueOf(pingId));
if (req != null)
req.pong();
}

View File

@ -1,13 +1,14 @@
package net.i2p.client.streaming;
import java.util.ArrayList;
import java.util.List;
import java.util.Iterator;
import java.util.Set;
import net.i2p.I2PAppContext;
import net.i2p.client.I2PSession;
import net.i2p.client.I2PSessionException;
import net.i2p.client.I2PSessionListener;
import net.i2p.util.Log;
import net.i2p.util.ConcurrentHashSet;
/**
* Receive raw information from the I2PSession and turn it into
@ -18,12 +19,12 @@ public class MessageHandler implements I2PSessionListener {
private ConnectionManager _manager;
private I2PAppContext _context;
private Log _log;
private final List _listeners;
private final Set<I2PSocketManager.DisconnectListener> _listeners;
public MessageHandler(I2PAppContext ctx, ConnectionManager mgr) {
_manager = mgr;
_context = ctx;
_listeners = new ArrayList(1);
_listeners = new ConcurrentHashSet(1);
_log = ctx.logManager().getLog(MessageHandler.class);
_context.statManager().createRateStat("stream.packetReceiveFailure", "When do we fail to decrypt or otherwise receive a packet sent to us?", "Stream", new long[] { 60*60*1000, 24*60*60*1000 });
}
@ -77,14 +78,10 @@ public class MessageHandler implements I2PSessionListener {
_log.warn("I2PSession disconnected");
_manager.disconnectAllHard();
List listeners = null;
synchronized (_listeners) {
listeners = new ArrayList(_listeners);
_listeners.clear();
}
for (int i = 0; i < listeners.size(); i++) {
I2PSocketManager.DisconnectListener lsnr = (I2PSocketManager.DisconnectListener)listeners.get(i);
for (Iterator<I2PSocketManager.DisconnectListener> iter = _listeners.iterator(); iter.hasNext(); ) {
I2PSocketManager.DisconnectListener lsnr = iter.next();
lsnr.sessionDisconnected();
iter.remove();
}
}
@ -104,13 +101,9 @@ public class MessageHandler implements I2PSessionListener {
}
public void addDisconnectListener(I2PSocketManager.DisconnectListener lsnr) {
synchronized (_listeners) {
_listeners.add(lsnr);
}
}
public void removeDisconnectListener(I2PSocketManager.DisconnectListener lsnr) {
synchronized (_listeners) {
_listeners.remove(lsnr);
}
}
}

View File

@ -222,8 +222,8 @@ public class MessageOutputStream extends OutputStream {
// We've seen the queue blow up before, maybe it was this before the rewrite...
// So perhaps it IS wise to be "overly worried" ...
forceReschedule(_passiveFlushDelay);
if (_log.shouldLog(Log.INFO))
_log.info("Enqueueing the flusher for " + _passiveFlushDelay + "ms out");
if (_log.shouldLog(Log.DEBUG))
_log.debug("Enqueueing the flusher for " + _passiveFlushDelay + "ms out");
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("NOT enqueing the flusher");

View File

@ -55,7 +55,8 @@ public class SchedulerChooser {
}
public void eventOccurred(Connection con) {
_log.log(Log.ERROR, "Yell at jrandom: Event occurred on " + con, new Exception("source"));
if (_log.shouldLog(Log.WARN))
_log.warn("Yell at jrandom: Event occurred on " + con, new Exception("source"));
}
public boolean accept(Connection con) { return true; }
};

View File

@ -1,10 +1,10 @@
package net.i2p.util;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Queue;
import java.util.concurrent.LinkedBlockingQueue;
import net.i2p.I2PAppContext;
import net.i2p.data.ByteArray;
@ -36,12 +36,12 @@ public final class ByteCache {
}
private Log _log;
/** list of available and available entries */
private final List _available;
private Queue<ByteArray> _available;
private int _maxCached;
private int _entrySize;
private long _lastOverflow;
/** do we actually want to cache? */
/** do we actually want to cache? Warning - setting to false may NPE, this should be fixed or removed */
private static final boolean _cache = true;
/** how often do we cleanup the cache */
@ -51,7 +51,7 @@ public final class ByteCache {
private ByteCache(int maxCachedEntries, int entrySize) {
if (_cache)
_available = new ArrayList(maxCachedEntries);
_available = new LinkedBlockingQueue(maxCachedEntries);
_maxCached = maxCachedEntries;
_entrySize = entrySize;
_lastOverflow = -1;
@ -62,6 +62,12 @@ public final class ByteCache {
private void resize(int maxCachedEntries) {
if (_maxCached >= maxCachedEntries) return;
_maxCached = maxCachedEntries;
// make a bigger one, move the cached items over
Queue newLBQ = new LinkedBlockingQueue(maxCachedEntries);
ByteArray ba;
while ((ba = _available.poll()) != null)
newLBQ.offer(ba);
_available = newLBQ;
}
/**
@ -70,10 +76,9 @@ public final class ByteCache {
*/
public final ByteArray acquire() {
if (_cache) {
synchronized (_available) {
if (_available.size() > 0)
return (ByteArray)_available.remove(0);
}
ByteArray rv = _available.poll();
if (rv != null)
return rv;
}
_lastOverflow = System.currentTimeMillis();
byte data[] = new byte[_entrySize];
@ -100,10 +105,7 @@ public final class ByteCache {
if (shouldZero)
Arrays.fill(entry.getData(), (byte)0x0);
synchronized (_available) {
if (_available.size() < _maxCached)
_available.add(entry);
}
_available.offer(entry);
}
}
@ -112,13 +114,11 @@ public final class ByteCache {
if (System.currentTimeMillis() - _lastOverflow > EXPIRE_PERIOD) {
// we haven't exceeded the cache size in a few minutes, so lets
// shrink the cache
synchronized (_available) {
int toRemove = _available.size() / 2;
for (int i = 0; i < toRemove; i++)
_available.remove(0);
_available.poll();
if ( (toRemove > 0) && (_log.shouldLog(Log.DEBUG)) )
_log.debug("Removing " + toRemove + " cached entries of size " + _entrySize);
}
}
}
}

View File

@ -12,10 +12,8 @@
<!-- use pack200 compression, saves about 33%
see http://java.sun.com/j2se/1.5.0/docs/guide/deployment/deployment-guide/pack200.html
However it makes the unpacked jars much larger...
For further testing...
<pack200 />
-->
<pack200 />
<!-- adding this element will make the installer attempt to launch itself with administrator permissions,
but see http://www.nabble.com/Classpath-security-issues-on-Vista-td22456230.html

View File

@ -11,7 +11,16 @@ import java.io.FileInputStream;
import java.io.Writer;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.*;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.TreeSet;
import net.i2p.data.Base64;
import net.i2p.data.DataHelper;
@ -56,9 +65,9 @@ public class Blocklist {
private int _blocklistSize;
private final Object _lock = new Object();
private Entry _wrapSave;
private final Set<Hash> _inProcess = new HashSet(0);
private Map<Hash, String> _peerBlocklist = new HashMap(0);
private final Set<Integer> _singleIPBlocklist = new ConcurrentHashSet(0);
private final Set<Hash> _inProcess = new HashSet(4);
private Map<Hash, String> _peerBlocklist = new HashMap(4);
private final Set<Integer> _singleIPBlocklist = new ConcurrentHashSet(4);
public Blocklist(RouterContext context) {
_context = context;
@ -109,8 +118,8 @@ public class Blocklist {
return;
}
}
for (Iterator iter = _peerBlocklist.keySet().iterator(); iter.hasNext(); ) {
Hash peer = (Hash) iter.next();
for (Iterator<Hash> iter = _peerBlocklist.keySet().iterator(); iter.hasNext(); ) {
Hash peer = iter.next();
String reason;
String comment = (String) _peerBlocklist.get(peer);
if (comment != null)
@ -125,8 +134,8 @@ public class Blocklist {
return;
FloodfillNetworkDatabaseFacade fndf = (FloodfillNetworkDatabaseFacade) _context.netDb();
int count = 0;
for (Iterator iter = fndf.getKnownRouterData().iterator(); iter.hasNext(); ) {
RouterInfo ri = (RouterInfo) iter.next();
for (Iterator<RouterInfo> iter = fndf.getKnownRouterData().iterator(); iter.hasNext(); ) {
RouterInfo ri = iter.next();
Hash peer = ri.getIdentity().getHash();
if (isBlocklisted(peer))
count++;
@ -458,15 +467,15 @@ public class Blocklist {
* this tries to not return duplicates
* but I suppose it could.
*/
public List getAddresses(Hash peer) {
List rv = new ArrayList(1);
public List<byte[]> getAddresses(Hash peer) {
List<byte[]> rv = new ArrayList(1);
RouterInfo pinfo = _context.netDb().lookupRouterInfoLocally(peer);
if (pinfo == null) return rv;
Set paddr = pinfo.getAddresses();
Set<RouterAddress> paddr = pinfo.getAddresses();
if (paddr == null || paddr.size() == 0)
return rv;
String oldphost = null;
List pladdr = new ArrayList(paddr);
List<RouterAddress> pladdr = new ArrayList(paddr);
// for each peer address
for (int j = 0; j < paddr.size(); j++) {
RouterAddress pa = (RouterAddress) pladdr.get(j);
@ -495,9 +504,9 @@ public class Blocklist {
* If so, and it isn't shitlisted, shitlist it forever...
*/
public boolean isBlocklisted(Hash peer) {
List ips = getAddresses(peer);
for (Iterator iter = ips.iterator(); iter.hasNext(); ) {
byte ip[] = (byte[]) iter.next();
List<byte[]> ips = getAddresses(peer);
for (Iterator<byte[]> iter = ips.iterator(); iter.hasNext(); ) {
byte ip[] = iter.next();
if (isBlocklisted(ip)) {
if (! _context.shitlist().isShitlisted(peer))
// nice knowing you...
@ -715,8 +724,8 @@ public class Blocklist {
// look through the file for each address to find which one was the cause
List ips = getAddresses(peer);
for (Iterator iter = ips.iterator(); iter.hasNext(); ) {
byte ip[] = (byte[]) iter.next();
for (Iterator<byte[]> iter = ips.iterator(); iter.hasNext(); ) {
byte ip[] = iter.next();
int ipint = toInt(ip);
FileInputStream in = null;
try {
@ -762,12 +771,12 @@ public class Blocklist {
public void renderStatusHTML(Writer out) throws IOException {
// move to the jsp
//out.write("<h2>Banned IPs</h2>");
Set singles = new TreeSet();
Set<Integer> singles = new TreeSet();
singles.addAll(_singleIPBlocklist);
if (singles.size() > 0) {
out.write("<table><tr><td><b>Transient IPs</b></td></tr>");
for (Iterator iter = singles.iterator(); iter.hasNext(); ) {
int ip = ((Integer) iter.next()).intValue();
for (Iterator<Integer> iter = singles.iterator(); iter.hasNext(); ) {
int ip = iter.next().intValue();
out.write("<tr><td align=right>"); out.write(toStr(ip)); out.write("</td></tr>\n");
}
out.write("</table>");

View File

@ -11,7 +11,6 @@ package net.i2p.router;
import java.io.IOException;
import java.io.Writer;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
@ -30,7 +29,7 @@ public abstract class CommSystemFacade implements Service {
public void renderStatusHTML(Writer out) throws IOException { renderStatusHTML(out, null, 0); }
/** Create the set of RouterAddress structures based on the router's config */
public Set createAddresses() { return new HashSet(); }
public Set<RouterAddress> createAddresses() { return Collections.EMPTY_SET; }
public int countActivePeers() { return 0; }
public int countActiveSendPeers() { return 0; }

View File

@ -24,8 +24,8 @@ class DummyPeerManagerFacade implements PeerManagerFacade {
public void startup() {}
public void restart() {}
public void renderStatusHTML(Writer out) { }
public List selectPeers(PeerSelectionCriteria criteria) { return null; }
public List getPeersByCapability(char capability) { return null; }
public List<Hash> selectPeers(PeerSelectionCriteria criteria) { return null; }
public List<Hash> getPeersByCapability(char capability) { return null; }
public void setCapabilities(Hash peer, String caps) {}
public void removeCapabilities(Hash peer) {}
public Hash selectRandomByCapability(char capability) { return null; }

View File

@ -240,7 +240,7 @@ public class InNetMessagePool implements Service {
}
public int handleReplies(I2NPMessage messageBody) {
List origMessages = _context.messageRegistry().getOriginalMessages(messageBody);
List<OutNetMessage> origMessages = _context.messageRegistry().getOriginalMessages(messageBody);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Original messages for inbound message: " + origMessages.size());
if (origMessages.size() > 1) {
@ -250,7 +250,7 @@ public class InNetMessagePool implements Service {
}
for (int i = 0; i < origMessages.size(); i++) {
OutNetMessage omsg = (OutNetMessage)origMessages.get(i);
OutNetMessage omsg = origMessages.get(i);
ReplyJob job = omsg.getOnReplyJob();
if (_log.shouldLog(Log.DEBUG))
_log.debug("Original message [" + i + "] " + omsg.getReplySelector()

View File

@ -19,7 +19,7 @@ public interface Job {
*/
public String getName();
/** unique id */
public int getJobId();
public long getJobId();
/**
* Timing criteria for the task
*/

View File

@ -15,8 +15,8 @@ import net.i2p.util.Log;
public abstract class JobImpl implements Job {
private RouterContext _context;
private JobTiming _timing;
private static int _idSrc = 0;
private int _id;
private static long _idSrc = 0;
private long _id;
private Exception _addedBy;
private long _madeReadyOn;
@ -28,7 +28,7 @@ public abstract class JobImpl implements Job {
_madeReadyOn = 0;
}
public int getJobId() { return _id; }
public long getJobId() { return _id; }
public JobTiming getTiming() { return _timing; }
public final RouterContext getContext() { return _context; }

View File

@ -12,10 +12,14 @@ import java.io.IOException;
import java.io.Writer;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.SortedMap;
import java.util.TreeMap;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import net.i2p.data.DataHelper;
import net.i2p.router.networkdb.HandleDatabaseLookupMessageJob;
@ -33,15 +37,15 @@ public class JobQueue {
private RouterContext _context;
/** Integer (runnerId) to JobQueueRunner for created runners */
private final HashMap _queueRunners;
private final Map<Integer, JobQueueRunner> _queueRunners;
/** a counter to identify a job runner */
private volatile static int _runnerId = 0;
/** list of jobs that are ready to run ASAP */
private ArrayList _readyJobs;
private BlockingQueue<Job> _readyJobs;
/** list of jobs that are scheduled for running in the future */
private ArrayList _timedJobs;
private List<Job> _timedJobs;
/** job name to JobStat for that job */
private final SortedMap _jobStats;
private final Map<String, JobStats> _jobStats;
/** how many job queue runners can go concurrently */
private int _maxRunners = 1;
private QueuePumper _pumper;
@ -52,9 +56,12 @@ public class JobQueue {
private final Object _jobLock;
/** how many when we go parallel */
private static final int RUNNERS = 4;
/** default max # job queue runners operating */
private final static int DEFAULT_MAX_RUNNERS = 1;
/** router.config parameter to override the max runners */
/** router.config parameter to override the max runners @deprecated unimplemented */
private final static String PROP_MAX_RUNNERS = "router.maxJobRunners";
/** how frequently should we check and update the max runners */
@ -63,33 +70,39 @@ public class JobQueue {
/** if a job is this lagged, spit out a warning, but keep going */
private long _lagWarning = DEFAULT_LAG_WARNING;
private final static long DEFAULT_LAG_WARNING = 5*1000;
/** @deprecated unimplemented */
private final static String PROP_LAG_WARNING = "router.jobLagWarning";
/** if a job is this lagged, the router is hosed, so shut it down */
/** if a job is this lagged, the router is hosed, so spit out a warning (dont shut it down) */
private long _lagFatal = DEFAULT_LAG_FATAL;
private final static long DEFAULT_LAG_FATAL = 30*1000;
/** @deprecated unimplemented */
private final static String PROP_LAG_FATAL = "router.jobLagFatal";
/** if a job takes this long to run, spit out a warning, but keep going */
private long _runWarning = DEFAULT_RUN_WARNING;
private final static long DEFAULT_RUN_WARNING = 5*1000;
/** @deprecated unimplemented */
private final static String PROP_RUN_WARNING = "router.jobRunWarning";
/** if a job takes this long to run, the router is hosed, so shut it down */
/** if a job takes this long to run, the router is hosed, so spit out a warning (dont shut it down) */
private long _runFatal = DEFAULT_RUN_FATAL;
private final static long DEFAULT_RUN_FATAL = 30*1000;
/** @deprecated unimplemented */
private final static String PROP_RUN_FATAL = "router.jobRunFatal";
/** don't enforce fatal limits until the router has been up for this long */
private long _warmupTime = DEFAULT_WARMUP_TIME;
private final static long DEFAULT_WARMUP_TIME = 10*60*1000;
private final static String PROP_WARMUM_TIME = "router.jobWarmupTime";
/** @deprecated unimplemented */
private final static String PROP_WARMUP_TIME = "router.jobWarmupTime";
/** max ready and waiting jobs before we start dropping 'em */
private int _maxWaitingJobs = DEFAULT_MAX_WAITING_JOBS;
private final static int DEFAULT_MAX_WAITING_JOBS = 100;
/** @deprecated unimplemented */
private final static String PROP_MAX_WAITING_JOBS = "router.maxWaitingJobs";
/**
* queue runners wait on this whenever they're not doing anything, and
* this gets notified *once* whenever there are ready jobs
@ -109,16 +122,14 @@ public class JobQueue {
new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
_alive = true;
_readyJobs = new ArrayList(16);
_readyJobs = new LinkedBlockingQueue();
_timedJobs = new ArrayList(64);
_jobLock = new Object();
_queueRunners = new HashMap();
_jobStats = Collections.synchronizedSortedMap(new TreeMap());
_queueRunners = new ConcurrentHashMap(RUNNERS);
_jobStats = new ConcurrentHashMap();
_allowParallelOperation = false;
_pumper = new QueuePumper();
I2PThread pumperThread = new I2PThread(_pumper);
pumperThread.setDaemon(true);
pumperThread.setName("QueuePumper");
I2PThread pumperThread = new I2PThread(_pumper, "Job Queue Pumper", true);
//pumperThread.setPriority(I2PThread.NORM_PRIORITY+1);
pumperThread.start();
}
@ -128,7 +139,7 @@ public class JobQueue {
*
*/
public void addJob(Job job) {
if (job == null) return;
if (job == null || !_alive) return;
if (job instanceof JobImpl)
((JobImpl)job).addedToQueue();
@ -136,6 +147,7 @@ public class JobQueue {
long numReady = 0;
boolean alreadyExists = false;
boolean dropped = false;
// getNext() is now outside the jobLock, is that ok?
synchronized (_jobLock) {
if (_readyJobs.contains(job))
alreadyExists = true;
@ -155,7 +167,7 @@ public class JobQueue {
job.getTiming().setStartAfter(_context.clock().now());
if (job instanceof JobImpl)
((JobImpl)job).madeReady();
_readyJobs.add(job);
_readyJobs.offer(job);
} else {
_timedJobs.add(job);
}
@ -167,12 +179,10 @@ public class JobQueue {
_context.statManager().addRateData("jobQueue.readyJobs", numReady, 0);
if (dropped) {
_context.statManager().addRateData("jobQueue.droppedJobs", 1, 1);
if (_log.shouldLog(Log.WARN))
_log.warn("Dropping job due to overload! # ready jobs: "
if (_log.shouldLog(Log.ERROR))
_log.error("Dropping job due to overload! # ready jobs: "
+ numReady + ": job = " + job);
}
return;
}
public void removeJob(Job job) {
@ -189,17 +199,15 @@ public class JobQueue {
}
public int getReadyCount() {
synchronized (_jobLock) {
return _readyJobs.size();
}
}
public long getMaxLag() {
synchronized (_jobLock) {
if (_readyJobs.size() <= 0) return 0;
Job j = _readyJobs.peek();
if (j == null) return 0;
// first job is the one that has been waiting the longest
long startAfter = ((Job)_readyJobs.get(0)).getTiming().getStartAfter();
long startAfter = j.getTiming().getStartAfter();
return _context.clock().now() - startAfter;
}
}
/**
@ -228,9 +236,10 @@ public class JobQueue {
public void allowParallelOperation() {
_allowParallelOperation = true;
runQueue(4);
runQueue(RUNNERS);
}
/** @deprecated do you really want to do this? */
public void restart() {
synchronized (_jobLock) {
_timedJobs.clear();
@ -241,14 +250,21 @@ public class JobQueue {
void shutdown() {
_alive = false;
synchronized (_jobLock) {
_jobLock.notifyAll();
}
_timedJobs.clear();
_readyJobs.clear();
// The JobQueueRunners are NOT daemons,
// so they must be stopped.
Job poison = new PoisonJob();
for (int i = 0; i < _queueRunners.size(); i++)
_readyJobs.offer(poison);
/********
if (_log.shouldLog(Log.WARN)) {
StringBuilder buf = new StringBuilder(1024);
buf.append("current jobs: \n");
for (Iterator iter = _queueRunners.values().iterator(); iter.hasNext(); ) {
JobQueueRunner runner = (JobQueueRunner)iter.next();
JobQueueRunner runner = iter.next();
Job j = runner.getCurrentJob();
buf.append("Runner ").append(runner.getRunnerId()).append(": ");
@ -279,7 +295,9 @@ public class JobQueue {
buf.append(_timedJobs.get(i).toString()).append("\n\t");
_log.log(Log.WARN, buf.toString());
}
********/
}
boolean isAlive() { return _alive; }
/**
@ -287,9 +305,8 @@ public class JobQueue {
*/
public long getLastJobBegin() {
long when = -1;
// not synchronized, so might b0rk if the runners are changed
for (Iterator iter = _queueRunners.values().iterator(); iter.hasNext(); ) {
long cur = ((JobQueueRunner)iter.next()).getLastBegin();
for (JobQueueRunner runner : _queueRunners.values()) {
long cur = runner.getLastBegin();
if (cur > when)
cur = when;
}
@ -300,9 +317,8 @@ public class JobQueue {
*/
public long getLastJobEnd() {
long when = -1;
// not synchronized, so might b0rk if the runners are changed
for (Iterator iter = _queueRunners.values().iterator(); iter.hasNext(); ) {
long cur = ((JobQueueRunner)iter.next()).getLastEnd();
for (JobQueueRunner runner : _queueRunners.values()) {
long cur = runner.getLastEnd();
if (cur > when)
cur = when;
}
@ -315,9 +331,7 @@ public class JobQueue {
public Job getLastJob() {
Job j = null;
long when = -1;
// not synchronized, so might b0rk if the runners are changed
for (Iterator iter = _queueRunners.values().iterator(); iter.hasNext(); ) {
JobQueueRunner cur = (JobQueueRunner)iter.next();
for (JobQueueRunner cur : _queueRunners.values()) {
if (cur.getLastBegin() > when) {
j = cur.getCurrentJob();
when = cur.getLastBegin();
@ -333,13 +347,10 @@ public class JobQueue {
Job getNext() {
while (_alive) {
try {
synchronized (_jobLock) {
if (_readyJobs.size() > 0) {
return (Job)_readyJobs.remove(0);
} else {
_jobLock.wait();
}
}
Job j = _readyJobs.take();
if (j.getJobId() == POISON_ID)
break;
return j;
} catch (InterruptedException ie) {}
}
if (_log.shouldLog(Log.WARN))
@ -355,8 +366,7 @@ public class JobQueue {
* the current job.
*
*/
public void runQueue(int numThreads) {
synchronized (_queueRunners) {
public synchronized void runQueue(int numThreads) {
// we're still starting up [serially] and we've got at least one runner,
// so dont do anything
if ( (_queueRunners.size() > 0) && (!_allowParallelOperation) ) return;
@ -377,8 +387,7 @@ public class JobQueue {
t.start();
}
} else if (_queueRunners.size() == numThreads) {
for (Iterator iter = _queueRunners.values().iterator(); iter.hasNext(); ) {
JobQueueRunner runner = (JobQueueRunner)iter.next();
for (JobQueueRunner runner : _queueRunners.values()) {
runner.startRunning();
}
} else { // numThreads < # runners, so shrink
@ -387,7 +396,6 @@ public class JobQueue {
// runner.stopRunning();
//}
}
}
}
void removeRunner(int id) { _queueRunners.remove(Integer.valueOf(id)); }
@ -407,11 +415,11 @@ public class JobQueue {
while (_alive) {
long now = _context.clock().now();
long timeToWait = -1;
ArrayList toAdd = null;
List<Job> toAdd = null;
try {
synchronized (_jobLock) {
for (int i = 0; i < _timedJobs.size(); i++) {
Job j = (Job)_timedJobs.get(i);
Job j = _timedJobs.get(i);
// find jobs due to start before now
long timeLeft = j.getTiming().getStartAfter() - now;
if (timeLeft <= 0) {
@ -437,7 +445,7 @@ public class JobQueue {
// extra alloc. (no, i'm not just being insane - i'm updating this based
// on some profiling data ;)
for (int i = 0; i < toAdd.size(); i++)
_readyJobs.add(toAdd.get(i));
_readyJobs.offer(toAdd.get(i));
_jobLock.notifyAll();
} else {
if (timeToWait < 0)
@ -476,17 +484,15 @@ public class JobQueue {
private void updateJobTimings(long delta) {
synchronized (_jobLock) {
for (int i = 0; i < _timedJobs.size(); i++) {
Job j = (Job)_timedJobs.get(i);
Job j = _timedJobs.get(i);
j.getTiming().offsetChanged(delta);
}
for (int i = 0; i < _readyJobs.size(); i++) {
Job j = (Job)_readyJobs.get(i);
for (Job j : _readyJobs) {
j.getTiming().offsetChanged(delta);
}
}
synchronized (_runnerLock) {
for (Iterator iter = _queueRunners.values().iterator(); iter.hasNext(); ) {
JobQueueRunner runner = (JobQueueRunner)iter.next();
for (JobQueueRunner runner : _queueRunners.values()) {
Job job = runner.getCurrentJob();
if (job != null)
job.getTiming().offsetChanged(delta);
@ -509,14 +515,14 @@ public class JobQueue {
if (lag < 0) lag = 0;
if (duration < 0) duration = 0;
JobStats stats = null;
if (!_jobStats.containsKey(key)) {
_jobStats.put(key, new JobStats(key));
JobStats stats = _jobStats.get(key);
if (stats == null) {
stats = new JobStats(key);
_jobStats.put(key, stats);
// yes, if two runners finish the same job at the same time, this could
// create an extra object. but, who cares, its pushed out of the map
// immediately anyway.
}
stats = (JobStats)_jobStats.get(key);
stats.jobRan(duration, lag);
String dieMsg = null;
@ -555,26 +561,39 @@ public class JobQueue {
}
/** job ID counter changed from int to long so it won't wrap negative */
private static final int POISON_ID = -99999;
private static class PoisonJob implements Job {
public String getName() { return null; }
public long getJobId() { return POISON_ID; }
public JobTiming getTiming() { return null; }
public void runJob() {}
public Exception getAddedBy() { return null; }
public void dropped() {}
}
////
// the remainder are utility methods for dumping status info
////
public void renderStatusHTML(Writer out) throws IOException {
ArrayList readyJobs = null;
ArrayList timedJobs = null;
ArrayList activeJobs = new ArrayList(1);
ArrayList justFinishedJobs = new ArrayList(4);
List<Job> readyJobs = null;
List<Job> timedJobs = null;
List<Job> activeJobs = new ArrayList(RUNNERS);
List<Job> justFinishedJobs = new ArrayList(RUNNERS);
//out.write("<!-- jobQueue rendering -->\n");
out.flush();
int states[] = null;
//int states[] = null;
int numRunners = 0;
synchronized (_queueRunners) {
states = new int[_queueRunners.size()];
{
//states = new int[_queueRunners.size()];
int i = 0;
for (Iterator iter = _queueRunners.values().iterator(); iter.hasNext(); i++) {
JobQueueRunner runner = (JobQueueRunner)iter.next();
states[i] = runner.getState();
for (Iterator<JobQueueRunner> iter = _queueRunners.values().iterator(); iter.hasNext(); i++) {
JobQueueRunner runner = iter.next();
//states[i] = runner.getState();
Job job = runner.getCurrentJob();
if (job != null) {
activeJobs.add(job);
@ -621,21 +640,21 @@ public class JobQueue {
buf.append("<hr><b>Active jobs: ").append(activeJobs.size()).append("</b><ol>\n");
for (int i = 0; i < activeJobs.size(); i++) {
Job j = (Job)activeJobs.get(i);
Job j = activeJobs.get(i);
buf.append("<li>[started ").append(DataHelper.formatDuration(now-j.getTiming().getStartAfter())).append(" ago]: ");
buf.append(j.toString()).append("</li>\n");
}
buf.append("</ol>\n");
buf.append("<hr><b>Just finished jobs: ").append(justFinishedJobs.size()).append("</b><ol>\n");
for (int i = 0; i < justFinishedJobs.size(); i++) {
Job j = (Job)justFinishedJobs.get(i);
Job j = justFinishedJobs.get(i);
buf.append("<li>[finished ").append(DataHelper.formatDuration(now-j.getTiming().getActualEnd())).append(" ago]: ");
buf.append(j.toString()).append("</li>\n");
}
buf.append("</ol>\n");
buf.append("<hr><b>Ready/waiting jobs: ").append(readyJobs.size()).append("</b><ol>\n");
for (int i = 0; i < readyJobs.size(); i++) {
Job j = (Job)readyJobs.get(i);
Job j = readyJobs.get(i);
buf.append("<li>[waiting ");
buf.append(DataHelper.formatDuration(now-j.getTiming().getStartAfter()));
buf.append("]: ");
@ -645,13 +664,13 @@ public class JobQueue {
out.flush();
buf.append("<hr><b>Scheduled jobs: ").append(timedJobs.size()).append("</b><ol>\n");
TreeMap ordered = new TreeMap();
TreeMap<Long, Job> ordered = new TreeMap();
for (int i = 0; i < timedJobs.size(); i++) {
Job j = (Job)timedJobs.get(i);
Job j = timedJobs.get(i);
ordered.put(new Long(j.getTiming().getStartAfter()), j);
}
for (Iterator iter = ordered.values().iterator(); iter.hasNext(); ) {
Job j = (Job)iter.next();
for (Iterator<Job> iter = ordered.values().iterator(); iter.hasNext(); ) {
Job j = iter.next();
long time = j.getTiming().getStartAfter() - now;
buf.append("<li>").append(j.getName()).append(" in ");
buf.append(DataHelper.formatDuration(time)).append("</li>\n");
@ -685,13 +704,10 @@ public class JobQueue {
long maxPendingTime = -1;
long minPendingTime = -1;
TreeMap tstats = null;
synchronized (_jobStats) {
tstats = new TreeMap(_jobStats);
}
TreeMap<String, JobStats> tstats = new TreeMap(_jobStats);
for (Iterator iter = tstats.values().iterator(); iter.hasNext(); ) {
JobStats stats = (JobStats)iter.next();
for (Iterator<JobStats> iter = tstats.values().iterator(); iter.hasNext(); ) {
JobStats stats = iter.next();
buf.append("<tr>");
buf.append("<td><b>").append(stats.getName()).append("</b></td>");
buf.append("<td align=\"right\">").append(stats.getRuns()).append("</td>");

View File

@ -23,12 +23,12 @@ class JobQueueRunner implements Runnable {
_currentJob = null;
_lastJob = null;
_log = _context.logManager().getLog(JobQueueRunner.class);
_context.statManager().createRateStat("jobQueue.jobRun", "How long jobs take", "JobQueue", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("jobQueue.jobRunSlow", "How long jobs that take over a second take", "JobQueue", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("jobQueue.jobRun", "How long jobs take", "JobQueue", new long[] { 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("jobQueue.jobRunSlow", "How long jobs that take over a second take", "JobQueue", new long[] { 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("jobQueue.jobLag", "How long jobs have to wait before running", "JobQueue", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("jobQueue.jobWait", "How long does a job sit on the job queue?", "JobQueue", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
_context.statManager().createRateStat("jobQueue.jobRunnerInactive", "How long are runners inactive?", "JobQueue", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
_state = 1;
_context.statManager().createRateStat("jobQueue.jobWait", "How long does a job sit on the job queue?", "JobQueue", new long[] { 60*60*1000l, 24*60*60*1000l });
//_context.statManager().createRateStat("jobQueue.jobRunnerInactive", "How long are runners inactive?", "JobQueue", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
//_state = 1;
}
final int getState() { return _state; }
@ -41,16 +41,16 @@ class JobQueueRunner implements Runnable {
public long getLastBegin() { return _lastBegin; }
public long getLastEnd() { return _lastEnd; }
public void run() {
_state = 2;
//_state = 2;
long lastActive = _context.clock().now();
long jobNum = 0;
while ( (_keepRunning) && (_context.jobQueue().isAlive()) ) {
_state = 3;
//_state = 3;
try {
Job job = _context.jobQueue().getNext();
_state = 4;
//_state = 4;
if (job == null) {
_state = 5;
//_state = 5;
if (_context.router().isAlive())
if (_log.shouldLog(Log.ERROR))
_log.error("getNext returned null - dead?");
@ -60,14 +60,14 @@ class JobQueueRunner implements Runnable {
long enqueuedTime = 0;
if (job instanceof JobImpl) {
_state = 6;
//_state = 6;
long when = ((JobImpl)job).getMadeReadyOn();
if (when <= 0) {
_state = 7;
//_state = 7;
_log.error("Job was not made ready?! " + job,
new Exception("Not made ready?!"));
} else {
_state = 8;
//_state = 8;
enqueuedTime = now - when;
}
}
@ -75,27 +75,27 @@ class JobQueueRunner implements Runnable {
long betweenJobs = now - lastActive;
_currentJob = job;
_lastJob = null;
_state = 9;
//_state = 9;
if (_log.shouldLog(Log.DEBUG))
_log.debug("Runner " + _id + " running job " + job.getJobId() + ": " + job.getName());
long origStartAfter = job.getTiming().getStartAfter();
long doStart = _context.clock().now();
_state = 10;
//_state = 10;
job.getTiming().start();
runCurrentJob();
job.getTiming().end();
_state = 11;
//_state = 11;
long duration = job.getTiming().getActualEnd() - job.getTiming().getActualStart();
long beforeUpdate = _context.clock().now();
_state = 12;
//_state = 12;
_context.jobQueue().updateStats(job, doStart, origStartAfter, duration);
_state = 13;
//_state = 13;
long diff = _context.clock().now() - beforeUpdate;
long lag = doStart - origStartAfter;
if (lag < 0) lag = 0;
_context.statManager().addRateData("jobQueue.jobRunnerInactive", betweenJobs, betweenJobs);
//_context.statManager().addRateData("jobQueue.jobRunnerInactive", betweenJobs, betweenJobs);
_context.statManager().addRateData("jobQueue.jobRun", duration, duration);
_context.statManager().addRateData("jobQueue.jobLag", lag, 0);
_context.statManager().addRateData("jobQueue.jobWait", enqueuedTime, enqueuedTime);
@ -107,7 +107,7 @@ class JobQueueRunner implements Runnable {
+ ") on job " + _currentJob);
}
_state = 14;
//_state = 14;
if (diff > 100) {
if (_log.shouldLog(Log.WARN))
@ -121,7 +121,7 @@ class JobQueueRunner implements Runnable {
_currentJob = null;
_lastEnd = lastActive;
jobNum++;
_state = 15;
//_state = 15;
//if ( (jobNum % 10) == 0)
// System.gc();
@ -130,22 +130,22 @@ class JobQueueRunner implements Runnable {
_log.log(Log.CRIT, "WTF, error running?", t);
}
}
_state = 16;
//_state = 16;
if (_context.router().isAlive())
if (_log.shouldLog(Log.CRIT))
_log.log(Log.CRIT, "Queue runner " + _id + " exiting");
_context.jobQueue().removeRunner(_id);
_state = 17;
//_state = 17;
}
private void runCurrentJob() {
try {
_state = 18;
//_state = 18;
_lastBegin = _context.clock().now();
_currentJob.runJob();
_state = 19;
//_state = 19;
} catch (OutOfMemoryError oom) {
_state = 20;
//_state = 20;
try {
if (_log.shouldLog(Log.CRIT))
_log.log(Log.CRIT, "Router ran out of memory, shutting down", oom);
@ -157,7 +157,7 @@ class JobQueueRunner implements Runnable {
try { Thread.sleep(1000); } catch (InterruptedException ie) {}
System.exit(-1);
} catch (Throwable t) {
_state = 21;
//_state = 21;
if (_log.shouldLog(Log.CRIT))
_log.log(Log.CRIT, "Error processing job [" + _currentJob.getName()
+ "] on thread " + _id + ": " + t.getMessage(), t);

View File

@ -12,10 +12,10 @@ import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import net.i2p.data.DataFormatException;
import net.i2p.data.DataStructure;
@ -41,7 +41,7 @@ public class KeyManager {
private PublicKey _publicKey;
private SigningPrivateKey _signingPrivateKey;
private SigningPublicKey _signingPublicKey;
private final Map _leaseSetKeys; // Destination --> LeaseSetKeys
private final Map<Hash, LeaseSetKeys> _leaseSetKeys; // Destination --> LeaseSetKeys
private SynchronizeKeysJob _synchronizeJob;
public final static String PROP_KEYDIR = "router.keyBackupDir";
@ -63,7 +63,7 @@ public class KeyManager {
setPublicKey(null);
setSigningPrivateKey(null);
setSigningPublicKey(null);
_leaseSetKeys = new HashMap();
_leaseSetKeys = new ConcurrentHashMap();
}
public void startup() {
@ -102,9 +102,7 @@ public class KeyManager {
public void registerKeys(Destination dest, SigningPrivateKey leaseRevocationPrivateKey, PrivateKey endpointDecryptionKey) {
_log.info("Registering keys for destination " + dest.calculateHash().toBase64());
LeaseSetKeys keys = new LeaseSetKeys(dest, leaseRevocationPrivateKey, endpointDecryptionKey);
synchronized (_leaseSetKeys) {
_leaseSetKeys.put(dest.calculateHash(), keys);
}
_leaseSetKeys.put(dest.calculateHash(), keys);
}
private void queueWrite() {
@ -118,27 +116,19 @@ public class KeyManager {
public LeaseSetKeys unregisterKeys(Destination dest) {
if (_log.shouldLog(Log.INFO))
_log.info("Unregistering keys for destination " + dest.calculateHash().toBase64());
LeaseSetKeys rv = null;
synchronized (_leaseSetKeys) {
rv = (LeaseSetKeys)_leaseSetKeys.remove(dest.calculateHash());
}
return rv;
return _leaseSetKeys.remove(dest.calculateHash());
}
public LeaseSetKeys getKeys(Destination dest) {
return getKeys(dest.calculateHash());
}
public LeaseSetKeys getKeys(Hash dest) {
synchronized (_leaseSetKeys) {
return (LeaseSetKeys)_leaseSetKeys.get(dest);
}
return _leaseSetKeys.get(dest);
}
public Set getAllKeys() {
public Set<LeaseSetKeys> getAllKeys() {
HashSet keys = new HashSet();
synchronized (_leaseSetKeys) {
keys.addAll(_leaseSetKeys.values());
}
keys.addAll(_leaseSetKeys.values());
return keys;
}

View File

@ -46,18 +46,18 @@ public class OutNetMessage {
private ReplyJob _onReply;
private Job _onFailedReply;
private MessageSelector _replySelector;
private Set _failedTransports;
private Set<String> _failedTransports;
private long _sendBegin;
private long _transmitBegin;
private Exception _createdBy;
private long _created;
/** for debugging, contains a mapping of even name to Long (e.g. "begin sending", "handleOutbound", etc) */
private HashMap _timestamps;
private HashMap<String, Long> _timestamps;
/**
* contains a list of timestamp event names in the order they were fired
* (some JVMs have less than 10ms resolution, so the Long above doesn't guarantee order)
*/
private List _timestampOrder;
private List<String> _timestampOrder;
private int _queueSize;
private long _prepareBegin;
private long _prepareEnd;
@ -108,11 +108,11 @@ public class OutNetMessage {
}
return now - _created;
}
public Map getTimestamps() {
public Map<String, Long> getTimestamps() {
if (_log.shouldLog(Log.INFO)) {
synchronized (this) {
locked_initTimestamps();
return (Map)_timestamps.clone();
return (Map<String, Long>)_timestamps.clone();
}
}
return Collections.EMPTY_MAP;
@ -121,7 +121,7 @@ public class OutNetMessage {
if (_log.shouldLog(Log.INFO)) {
synchronized (this) {
locked_initTimestamps();
return (Long)_timestamps.get(eventName);
return _timestamps.get(eventName);
}
}
return ZERO;
@ -339,8 +339,8 @@ public class OutNetMessage {
synchronized (this) {
long lastWhen = -1;
for (int i = 0; i < _timestampOrder.size(); i++) {
String name = (String)_timestampOrder.get(i);
Long when = (Long)_timestamps.get(name);
String name = _timestampOrder.get(i);
Long when = _timestamps.get(name);
buf.append("\t[");
long diff = when.longValue() - lastWhen;
if ( (lastWhen > 0) && (diff > 500) )

View File

@ -25,8 +25,8 @@ public interface PeerManagerFacade extends Service {
*
* @return List of Hash objects of the RouterIdentity for matching peers
*/
public List selectPeers(PeerSelectionCriteria criteria);
public List getPeersByCapability(char capability);
public List<Hash> selectPeers(PeerSelectionCriteria criteria);
public List<Hash> getPeersByCapability(char capability);
public void setCapabilities(Hash peer, String caps);
public void removeCapabilities(Hash peer);
public Hash selectRandomByCapability(char capability);

View File

@ -44,27 +44,27 @@ class RouterThrottleImpl implements RouterThrottle {
_log = context.logManager().getLog(RouterThrottleImpl.class);
setTunnelStatus();
_context.statManager().createRateStat("router.throttleNetworkCause", "How lagged the jobQueue was when an I2NP was throttled", "Throttle", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 });
_context.statManager().createRateStat("router.throttleNetDbCause", "How lagged the jobQueue was when a networkDb request was throttled", "Throttle", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 });
_context.statManager().createRateStat("router.throttleTunnelCause", "How lagged the jobQueue was when a tunnel request was throttled", "Throttle", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 });
//_context.statManager().createRateStat("router.throttleNetDbCause", "How lagged the jobQueue was when a networkDb request was throttled", "Throttle", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 });
//_context.statManager().createRateStat("router.throttleTunnelCause", "How lagged the jobQueue was when a tunnel request was throttled", "Throttle", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 });
_context.statManager().createRateStat("tunnel.bytesAllocatedAtAccept", "How many bytes had been 'allocated' for participating tunnels when we accepted a request?", "Tunnels", new long[] { 10*60*1000, 60*60*1000, 24*60*60*1000 });
_context.statManager().createRateStat("router.throttleTunnelProcessingTime1m", "How long it takes to process a message (1 minute average) when we throttle a tunnel?", "Throttle", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 });
_context.statManager().createRateStat("router.throttleTunnelProcessingTime10m", "How long it takes to process a message (10 minute average) when we throttle a tunnel?", "Throttle", new long[] { 60*1000, 10*60*1000, 60*60*1000, 24*60*60*1000 });
_context.statManager().createRateStat("router.throttleTunnelMaxExceeded", "How many tunnels we are participating in when we refuse one due to excees?", "Throttle", new long[] { 10*60*1000, 60*60*1000, 24*60*60*1000 });
_context.statManager().createRateStat("router.throttleTunnelProbTooFast", "How many tunnels beyond the previous 1h average are we participating in when we throttle?", "Throttle", new long[] { 10*60*1000, 60*60*1000, 24*60*60*1000 });
_context.statManager().createRateStat("router.throttleTunnelProbTestSlow", "How slow are our tunnel tests when our average exceeds the old average and we throttle?", "Throttle", new long[] { 10*60*1000, 60*60*1000, 24*60*60*1000 });
//_context.statManager().createRateStat("router.throttleTunnelProbTestSlow", "How slow are our tunnel tests when our average exceeds the old average and we throttle?", "Throttle", new long[] { 10*60*1000, 60*60*1000, 24*60*60*1000 });
_context.statManager().createRateStat("router.throttleTunnelBandwidthExceeded", "How much bandwidth is allocated when we refuse due to bandwidth allocation?", "Throttle", new long[] { 10*60*1000, 60*60*1000, 24*60*60*1000 });
_context.statManager().createRateStat("router.throttleTunnelBytesAllowed", "How many bytes are allowed to be sent when we get a tunnel request (period is how many are currently allocated)?", "Throttle", new long[] { 10*60*1000, 60*60*1000, 24*60*60*1000 });
_context.statManager().createRateStat("router.throttleTunnelBytesUsed", "Used Bps at request (period = max KBps)?", "Throttle", new long[] { 10*60*1000, 60*60*1000, 24*60*60*1000 });
_context.statManager().createRateStat("router.throttleTunnelFailCount1m", "How many messages failed to be sent in the last 2 minutes when we throttle based on a spike in failures (period = 10 minute average failure count)?", "Throttle", new long[] { 60*1000, 10*60*1000, 60*60*1000});
_context.statManager().createRateStat("router.throttleTunnelQueueOverload", "How many pending tunnel request messages have we received when we reject them due to overload (period = time to process each)?", "Throttle", new long[] { 60*1000, 10*60*1000, 60*60*1000});
//_context.statManager().createRateStat("router.throttleTunnelQueueOverload", "How many pending tunnel request messages have we received when we reject them due to overload (period = time to process each)?", "Throttle", new long[] { 60*1000, 10*60*1000, 60*60*1000});
}
public boolean acceptNetworkMessage() {
//if (true) return true;
long lag = _context.jobQueue().getMaxLag();
if ( (lag > JOB_LAG_LIMIT) && (_context.router().getUptime() > 60*1000) ) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Throttling network reader, as the job lag is " + lag);
if (_log.shouldLog(Log.WARN))
_log.warn("Throttling network reader, as the job lag is " + lag);
_context.statManager().addRateData("router.throttleNetworkCause", lag, lag);
return false;
} else {

View File

@ -11,12 +11,12 @@ package net.i2p.router.peermanager;
import java.io.IOException;
import java.io.Writer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import net.i2p.data.Hash;
import net.i2p.data.RouterInfo;
@ -26,6 +26,7 @@ import net.i2p.router.networkdb.kademlia.FloodfillNetworkDatabaseFacade;
import net.i2p.util.Log;
import net.i2p.util.SimpleScheduler;
import net.i2p.util.SimpleTimer;
import net.i2p.util.ConcurrentHashSet;
/**
* Manage the current state of the statistics
@ -46,8 +47,8 @@ class PeerManager {
private RouterContext _context;
private ProfileOrganizer _organizer;
private ProfilePersistenceHelper _persistenceHelper;
private List _peersByCapability[];
private final Map _capabilitiesByPeer;
private Set<Hash> _peersByCapability[];
private final Map<Hash, String> _capabilitiesByPeer;
public PeerManager(RouterContext context) {
_context = context;
@ -55,10 +56,10 @@ class PeerManager {
_persistenceHelper = new ProfilePersistenceHelper(context);
_organizer = context.profileOrganizer();
_organizer.setUs(context.routerHash());
_capabilitiesByPeer = new HashMap(128);
_peersByCapability = new List[26];
_capabilitiesByPeer = new ConcurrentHashMap(128);
_peersByCapability = new Set[26];
for (int i = 0; i < _peersByCapability.length; i++)
_peersByCapability[i] = new ArrayList(64);
_peersByCapability[i] = new ConcurrentHashSet();
loadProfiles();
////_context.jobQueue().addJob(new EvaluateProfilesJob(_context));
SimpleScheduler.getInstance().addPeriodicEvent(new Reorg(), 0, 45*1000);
@ -77,14 +78,16 @@ class PeerManager {
void storeProfiles() {
Set peers = selectPeers();
for (Iterator iter = peers.iterator(); iter.hasNext(); ) {
Hash peer = (Hash)iter.next();
for (Iterator<Hash> iter = peers.iterator(); iter.hasNext(); ) {
Hash peer = iter.next();
storeProfile(peer);
}
}
Set selectPeers() {
return _organizer.selectAllPeers();
}
void storeProfile(Hash peer) {
if (peer == null) return;
PeerProfile prof = _organizer.getProfile(peer);
@ -92,10 +95,11 @@ class PeerManager {
if (true)
_persistenceHelper.writeProfile(prof);
}
void loadProfiles() {
Set profiles = _persistenceHelper.readProfiles();
for (Iterator iter = profiles.iterator(); iter.hasNext();) {
PeerProfile prof = (PeerProfile)iter.next();
Set<PeerProfile> profiles = _persistenceHelper.readProfiles();
for (Iterator<PeerProfile> iter = profiles.iterator(); iter.hasNext();) {
PeerProfile prof = iter.next();
if (prof != null) {
_organizer.addProfile(prof);
if (_log.shouldLog(Log.DEBUG))
@ -107,10 +111,11 @@ class PeerManager {
/**
* Find some peers that meet the criteria and we have the netDb info for locally
*
* Only used by PeerTestJob (PURPOSE_TEST)
*/
List selectPeers(PeerSelectionCriteria criteria) {
Set peers = new HashSet(criteria.getMinimumRequired());
Set exclude = new HashSet(1);
List<Hash> selectPeers(PeerSelectionCriteria criteria) {
Set<Hash> peers = new HashSet(criteria.getMinimumRequired());
Set<Hash> exclude = new HashSet(1);
exclude.add(_context.routerHash());
switch (criteria.getPurpose()) {
case PeerSelectionCriteria.PURPOSE_TEST:
@ -143,10 +148,10 @@ class PeerManager {
default:
break;
}
if (peers.size() <= 0) {
if (peers.isEmpty()) {
if (_log.shouldLog(Log.WARN))
_log.warn("We ran out of peers when looking for reachable ones after finding "
+ peers.size() + " with "
+ "0 with "
+ _organizer.countWellIntegratedPeers() + "/"
+ _organizer.countHighCapacityPeers() + "/"
+ _organizer.countFastPeers() + " integrated/high capacity/fast peers");
@ -160,18 +165,18 @@ class PeerManager {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Setting capabilities for " + peer.toBase64() + " to " + caps);
if (caps != null) caps = caps.toLowerCase();
synchronized (_capabilitiesByPeer) {
String oldCaps = null;
if (caps != null)
oldCaps = (String)_capabilitiesByPeer.put(peer, caps);
oldCaps = _capabilitiesByPeer.put(peer, caps);
else
oldCaps = (String)_capabilitiesByPeer.remove(peer);
oldCaps = _capabilitiesByPeer.remove(peer);
if (oldCaps != null) {
for (int i = 0; i < oldCaps.length(); i++) {
char c = oldCaps.charAt(i);
if ( (caps == null) || (caps.indexOf(c) < 0) ) {
List peers = locked_getPeers(c);
Set<Hash> peers = locked_getPeers(c);
if (peers != null)
peers.remove(peer);
}
@ -182,15 +187,15 @@ class PeerManager {
char c = caps.charAt(i);
if ( (oldCaps != null) && (oldCaps.indexOf(c) >= 0) )
continue;
List peers = locked_getPeers(c);
if ( (peers != null) && (!peers.contains(peer)) )
Set<Hash> peers = locked_getPeers(c);
if (peers != null)
peers.add(peer);
}
}
}
}
private List locked_getPeers(char c) {
/** locking no longer req'd */
private Set<Hash> locked_getPeers(char c) {
c = Character.toLowerCase(c);
int i = c - 'a';
if ( (i < 0) || (i >= _peersByCapability.length) ) {
@ -204,18 +209,19 @@ class PeerManager {
public void removeCapabilities(Hash peer) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Removing capabilities from " + peer.toBase64());
synchronized (_capabilitiesByPeer) {
String oldCaps = (String)_capabilitiesByPeer.remove(peer);
if (oldCaps != null) {
for (int i = 0; i < oldCaps.length(); i++) {
char c = oldCaps.charAt(i);
List peers = locked_getPeers(c);
Set<Hash> peers = locked_getPeers(c);
if (peers != null)
peers.remove(peer);
}
}
}
}
/*******
public Hash selectRandomByCapability(char capability) {
int index = _context.random().nextInt(Integer.MAX_VALUE);
synchronized (_capabilitiesByPeer) {
@ -227,20 +233,29 @@ class PeerManager {
}
return null;
}
public List getPeersByCapability(char capability) {
if (false) {
synchronized (_capabilitiesByPeer) {
List peers = locked_getPeers(capability);
if (peers != null)
return new ArrayList(peers);
}
********/
/**
* The only user of this is TunnelPeerSelector for unreachables?
*/
public List<Hash> getPeersByCapability(char capability) {
if (true) {
Set<Hash> peers = locked_getPeers(capability);
if (peers != null)
return new ArrayList(peers);
return null;
} else {
// Wow this looks really slow...
// What is the point of keeping all the data structures above
// if we are going to go through the whole netdb anyway?
// Not sure why jrandom switched to do it this way,
// the checkin comments aren't clear...
// Since the locking is gone, switch back to the above.
FloodfillNetworkDatabaseFacade f = (FloodfillNetworkDatabaseFacade)_context.netDb();
List routerInfos = f.getKnownRouterData();
List rv = new ArrayList();
for (Iterator iter = routerInfos.iterator(); iter.hasNext(); ) {
RouterInfo ri = (RouterInfo)iter.next();
List<RouterInfo> routerInfos = f.getKnownRouterData();
List<Hash> rv = new ArrayList();
for (Iterator<RouterInfo> iter = routerInfos.iterator(); iter.hasNext(); ) {
RouterInfo ri = iter.next();
String caps = ri.getCapabilities();
if (caps.indexOf(capability) >= 0)
rv.add(ri.getIdentity().calculateHash());

View File

@ -57,7 +57,7 @@ public class PeerManagerFacadeImpl implements PeerManagerFacade {
_manager.loadProfiles();
}
public List selectPeers(PeerSelectionCriteria criteria) {
public List<Hash> selectPeers(PeerSelectionCriteria criteria) {
return _manager.selectPeers(criteria);
}
@ -69,11 +69,15 @@ public class PeerManagerFacadeImpl implements PeerManagerFacade {
if (_manager == null) return;
_manager.removeCapabilities(peer);
}
/** @deprecated unused */
public Hash selectRandomByCapability(char capability) {
if (_manager == null) return null;
return _manager.selectRandomByCapability(capability);
//if (_manager == null) return null;
//return _manager.selectRandomByCapability(capability);
return null;
}
public List getPeersByCapability(char capability) {
public List<Hash> getPeersByCapability(char capability) {
if (_manager == null) return new ArrayList(0);
return _manager.getPeersByCapability(capability);
}

View File

@ -217,7 +217,7 @@ public class ProfileOrganizer {
return activePeers;
}
private boolean isX(Map m, Hash peer) {
private boolean isX(Map<Hash, PeerProfile> m, Hash peer) {
getReadLock();
try {
return m.containsKey(peer);
@ -272,10 +272,10 @@ public class ProfileOrganizer {
* @param matches set to store the return value in
*
*/
public void selectFastPeers(int howMany, Set exclude, Set matches) {
public void selectFastPeers(int howMany, Set<Hash> exclude, Set<Hash> matches) {
selectFastPeers(howMany, exclude, matches, 0);
}
public void selectFastPeers(int howMany, Set exclude, Set matches, int mask) {
public void selectFastPeers(int howMany, Set<Hash> exclude, Set<Hash> matches, int mask) {
getReadLock();
try {
locked_selectPeers(_fastPeers, howMany, exclude, matches, mask);
@ -295,10 +295,10 @@ public class ProfileOrganizer {
* Return a set of Hashes for peers that have a high capacity
*
*/
public void selectHighCapacityPeers(int howMany, Set exclude, Set matches) {
public void selectHighCapacityPeers(int howMany, Set<Hash> exclude, Set<Hash> matches) {
selectHighCapacityPeers(howMany, exclude, matches, 0);
}
public void selectHighCapacityPeers(int howMany, Set exclude, Set matches, int mask) {
public void selectHighCapacityPeers(int howMany, Set<Hash> exclude, Set<Hash> matches, int mask) {
getReadLock();
try {
// we only use selectHighCapacityPeers when we are selecting for PURPOSE_TEST
@ -326,10 +326,10 @@ public class ProfileOrganizer {
* Return a set of Hashes for peers that are well integrated into the network.
*
*/
public void selectWellIntegratedPeers(int howMany, Set exclude, Set matches) {
public void selectWellIntegratedPeers(int howMany, Set<Hash> exclude, Set<Hash> matches) {
selectWellIntegratedPeers(howMany, exclude, matches, 0);
}
public void selectWellIntegratedPeers(int howMany, Set exclude, Set matches, int mask) {
public void selectWellIntegratedPeers(int howMany, Set<Hash> exclude, Set<Hash> matches, int mask) {
getReadLock();
try {
locked_selectPeers(_wellIntegratedPeers, howMany, exclude, matches, mask);
@ -350,13 +350,13 @@ public class ProfileOrganizer {
* we are already talking with
*
*/
public void selectNotFailingPeers(int howMany, Set exclude, Set matches) {
public void selectNotFailingPeers(int howMany, Set<Hash> exclude, Set<Hash> matches) {
selectNotFailingPeers(howMany, exclude, matches, false, 0);
}
public void selectNotFailingPeers(int howMany, Set exclude, Set matches, int mask) {
public void selectNotFailingPeers(int howMany, Set<Hash> exclude, Set<Hash> matches, int mask) {
selectNotFailingPeers(howMany, exclude, matches, false, mask);
}
public void selectNotFailingPeers(int howMany, Set exclude, Set matches, boolean onlyNotFailing) {
public void selectNotFailingPeers(int howMany, Set<Hash> exclude, Set<Hash> matches, boolean onlyNotFailing) {
selectNotFailingPeers(howMany, exclude, matches, onlyNotFailing, 0);
}
/**
@ -368,7 +368,7 @@ public class ProfileOrganizer {
* @param matches set to store the matches in
* @param onlyNotFailing if true, don't include any high capacity peers
*/
public void selectNotFailingPeers(int howMany, Set exclude, Set matches, boolean onlyNotFailing, int mask) {
public void selectNotFailingPeers(int howMany, Set<Hash> exclude, Set<Hash> matches, boolean onlyNotFailing, int mask) {
if (matches.size() < howMany)
selectAllNotFailingPeers(howMany, exclude, matches, onlyNotFailing, mask);
return;
@ -388,7 +388,7 @@ public class ProfileOrganizer {
* @param exclude non-null
* No mask parameter, to be fixed
*/
public void selectActiveNotFailingPeers(int howMany, Set exclude, Set matches) {
public void selectActiveNotFailingPeers(int howMany, Set<Hash> exclude, Set<Hash> matches) {
if (matches.size() < howMany) {
getReadLock();
try {
@ -412,7 +412,7 @@ public class ProfileOrganizer {
*
* This DOES cascade further to non-connected peers.
*/
private void selectActiveNotFailingPeers2(int howMany, Set exclude, Set matches, int mask) {
private void selectActiveNotFailingPeers2(int howMany, Set<Hash> exclude, Set<Hash> matches, int mask) {
if (matches.size() < howMany) {
Map<Hash, PeerProfile> activePeers = new HashMap();
getReadLock();
@ -439,14 +439,14 @@ public class ProfileOrganizer {
* Return a set of Hashes for peers that are not failing.
*
*/
public void selectAllNotFailingPeers(int howMany, Set exclude, Set matches, boolean onlyNotFailing) {
public void selectAllNotFailingPeers(int howMany, Set<Hash> exclude, Set<Hash> matches, boolean onlyNotFailing) {
selectAllNotFailingPeers(howMany, exclude, matches, onlyNotFailing, 0);
}
/**
* @param mask ignored, should call locked_selectPeers, to be fixed
*
*/
private void selectAllNotFailingPeers(int howMany, Set exclude, Set matches, boolean onlyNotFailing, int mask) {
private void selectAllNotFailingPeers(int howMany, Set<Hash> exclude, Set<Hash> matches, boolean onlyNotFailing, int mask) {
if (matches.size() < howMany) {
int orig = matches.size();
int needed = howMany - orig;
@ -495,7 +495,7 @@ public class ProfileOrganizer {
* I'm not quite sure why you'd want this... (other than for failover from the better results)
*
*/
public void selectFailingPeers(int howMany, Set exclude, Set matches) {
public void selectFailingPeers(int howMany, Set<Hash> exclude, Set<Hash> matches) {
getReadLock();
try {
locked_selectPeers(_failingPeers, howMany, exclude, matches);
@ -564,12 +564,12 @@ public class ProfileOrganizer {
* recent == last 20s
*
*/
public List selectPeersRecentlyRejecting() {
public List<Hash> selectPeersRecentlyRejecting() {
getReadLock();
try {
long cutoff = _context.clock().now() - (20*1000);
int count = _notFailingPeers.size();
List l = new ArrayList(count / 128);
List<Hash> l = new ArrayList(count / 128);
for (Iterator<PeerProfile> iter = _notFailingPeers.values().iterator(); iter.hasNext(); ) {
PeerProfile prof = iter.next();
if (prof.getTunnelHistory().getLastRejectedBandwidth() > cutoff)
@ -583,10 +583,10 @@ public class ProfileOrganizer {
* Find the hashes for all peers we are actively profiling
*
*/
public Set selectAllPeers() {
public Set<Hash> selectAllPeers() {
getReadLock();
try {
Set allPeers = new HashSet(_failingPeers.size() + _notFailingPeers.size() + _highCapacityPeers.size() + _fastPeers.size());
Set<Hash> allPeers = new HashSet(_failingPeers.size() + _notFailingPeers.size() + _highCapacityPeers.size() + _fastPeers.size());
allPeers.addAll(_failingPeers.keySet());
allPeers.addAll(_notFailingPeers.keySet());
allPeers.addAll(_highCapacityPeers.keySet());
@ -853,10 +853,10 @@ public class ProfileOrganizer {
* high capacity group to define the integration threshold.
*
*/
private void locked_calculateThresholds(Set allPeers) {
private void locked_calculateThresholds(Set<PeerProfile> allPeers) {
double totalCapacity = 0;
double totalIntegration = 0;
Set reordered = new TreeSet(_comp);
Set<PeerProfile> reordered = new TreeSet(_comp);
for (Iterator<PeerProfile> iter = allPeers.iterator(); iter.hasNext(); ) {
PeerProfile profile = iter.next();
@ -895,7 +895,7 @@ public class ProfileOrganizer {
* (highest first) for active nonfailing peers whose
* capacity is greater than the growth factor
*/
private void locked_calculateCapacityThreshold(double totalCapacity, Set reordered) {
private void locked_calculateCapacityThreshold(double totalCapacity, Set<PeerProfile> reordered) {
int numNotFailing = reordered.size();
double meanCapacity = avg(totalCapacity, numNotFailing);
@ -964,7 +964,7 @@ public class ProfileOrganizer {
* @param reordered ordered set of PeerProfile objects, ordered by capacity
* (highest first) for active nonfailing peers
*/
private void locked_calculateSpeedThreshold(Set reordered) {
private void locked_calculateSpeedThreshold(Set<PeerProfile> reordered) {
if (true) {
locked_calculateSpeedThresholdMean(reordered);
return;
@ -996,7 +996,7 @@ public class ProfileOrganizer {
*****/
}
private void locked_calculateSpeedThresholdMean(Set reordered) {
private void locked_calculateSpeedThresholdMean(Set<PeerProfile> reordered) {
double total = 0;
int count = 0;
for (Iterator<PeerProfile> iter = reordered.iterator(); iter.hasNext(); ) {
@ -1040,10 +1040,10 @@ public class ProfileOrganizer {
* matches set until it has howMany elements in it.
*
*/
private void locked_selectPeers(Map peers, int howMany, Set toExclude, Set matches) {
private void locked_selectPeers(Map<Hash, PeerProfile> peers, int howMany, Set<Hash> toExclude, Set<Hash> matches) {
locked_selectPeers(peers, howMany, toExclude, matches, 0);
}
private void locked_selectPeers(Map peers, int howMany, Set toExclude, Set matches, int mask) {
private void locked_selectPeers(Map<Hash, PeerProfile> peers, int howMany, Set<Hash> toExclude, Set<Hash> matches, int mask) {
List all = new ArrayList(peers.keySet());
if (toExclude != null)
all.removeAll(toExclude);
@ -1051,7 +1051,7 @@ public class ProfileOrganizer {
all.removeAll(matches);
all.remove(_us);
Collections.shuffle(all, _random);
Set IPSet = new HashSet(8);
Set<Integer> IPSet = new HashSet(8);
for (int i = 0; (matches.size() < howMany) && (i < all.size()); i++) {
Hash peer = (Hash)all.get(i);
boolean ok = isSelectable(peer);
@ -1073,8 +1073,8 @@ public class ProfileOrganizer {
* @param mask is 1-4 (number of bytes to match)
* @param IPMatches all IPs so far, modified by this routine
*/
private boolean notRestricted(Hash peer, Set IPSet, int mask) {
Set peerIPs = maskedIPSet(peer, mask);
private boolean notRestricted(Hash peer, Set<Integer> IPSet, int mask) {
Set<Integer> peerIPs = maskedIPSet(peer, mask);
if (containsAny(IPSet, peerIPs))
return false;
IPSet.addAll(peerIPs);
@ -1087,8 +1087,8 @@ public class ProfileOrganizer {
*
* @return an opaque set of masked IPs for this peer
*/
private Set maskedIPSet(Hash peer, int mask) {
Set rv = new HashSet(2);
private Set<Integer> maskedIPSet(Hash peer, int mask) {
Set<Integer> rv = new HashSet(2);
byte[] commIP = _context.commSystem().getIP(peer);
if (commIP != null)
rv.add(maskedIP(commIP, mask));

View File

@ -18,6 +18,49 @@ import net.i2p.router.RouterContext;
* so they can be used both by LoadClientAppsJob and by the configuration
* page in the router console.
*
* <pre>
*
* clients.config format:
*
* Lines are of the form clientApp.x.prop=val, where x is the app number.
* App numbers MUST start with 0 and be consecutive.
*
* Properties are as follows:
* main: Full class name. Required. The main() method in this
* class will be run.
* name: Name to be displayed on console.
* args: Arguments to the main class, separated by spaces or tabs.
* Arguments containing spaces or tabs may be quoted with ' or "
* delay: Seconds before starting, default 120
* onBoot: {true|false}, default false, forces a delay of 0,
* overrides delay setting
* startOnLoad: {true|false} Is the client to be run at all?
* Default true
*
* The following additional properties are used only by plugins:
* stopargs: Arguments to stop the client.
* uninstallargs: Arguments to stop the client.
* classpath: Additional classpath elements for the client,
* separated by commas.
*
* The following substitutions are made in the args, stopargs,
* uninstallargs, and classpath lines, for plugins only:
* $I2P: The base I2P install directory
* $CONFIG: The user's configuration directory (e.g. ~/.i2p)
* $PLUGIN: This plugin's directory (e.g. ~/.i2p/plugins/foo)
*
* All properties except "main" are optional.
* Lines starting with "#" are comments.
*
* If the delay is less than zero, the client is run immediately,
* in the same thread, so that exceptions may be propagated to the console.
* In this case, the client should either throw an exception, return quickly,
* or spawn its own thread.
* If the delay is greater than or equal to zero, it will be run
* in a new thread, and exceptions will be logged but not propagated
* to the console.
*
* </pre>
*/
public class ClientAppConfig {
/** wait 2 minutes before starting up client apps */

View File

@ -11,7 +11,7 @@ import net.i2p.util.I2PThread;
import net.i2p.util.Log;
/**
* Run any client applications specified in the router.config. If any clientApp
* Run any client applications specified in clients.config. If any clientApp
* contains the config property ".onBoot=true" it'll be launched immediately, otherwise
* it'll get queued up for starting 2 minutes later.
*
@ -40,7 +40,7 @@ public class LoadClientAppsJob extends JobImpl {
if (app.disabled)
continue;
String argVal[] = parseArgs(app.args);
if (app.delay == 0) {
if (app.delay <= 0) {
// run this guy now
runClient(app.className, app.clientName, argVal, _log);
} else {
@ -118,6 +118,36 @@ public class LoadClientAppsJob extends JobImpl {
return rv;
}
/**
* Use to test if the class is present,
* to propagate an error back to the user,
* since runClient() runs in a separate thread.
*
* @since 0.7.13
*/
public static void testClient(String className) throws ClassNotFoundException {
Class.forName(className);
}
/**
* Run client in this thread.
*
* @throws just about anything, caller would be wise to catch Throwable
* @since 0.7.13
*/
public static void runClientInline(String className, String clientName, String args[], Log log) throws Exception {
if (log.shouldLog(Log.INFO))
log.info("Loading up the client application " + clientName + ": " + className + " " + Arrays.toString(args));
if (args == null)
args = new String[0];
Class cls = Class.forName(className);
Method method = cls.getMethod("main", new Class[] { String[].class });
method.invoke(cls, new Object[] { args });
}
/**
* Run client in a new thread.
*/
public static void runClient(String className, String clientName, String args[], Log log) {
if (log.shouldLog(Log.INFO))
log.info("Loading up the client application " + clientName + ": " + className + " " + Arrays.toString(args));

View File

@ -116,7 +116,7 @@ public class CommSystemFacadeImpl extends CommSystemFacade {
return sum * 1000 / frameSize;
}
public List getBids(OutNetMessage msg) {
public List<TransportBid> getBids(OutNetMessage msg) {
return _manager.getBids(msg);
}
public TransportBid getBid(OutNetMessage msg) {
@ -174,8 +174,8 @@ public class CommSystemFacadeImpl extends CommSystemFacade {
}
@Override
public Set createAddresses() {
Map addresses = null;
public Set<RouterAddress> createAddresses() {
Map<String, RouterAddress> addresses = null;
boolean newCreated = false;
if (_manager != null) {

View File

@ -4,24 +4,45 @@ import java.io.IOException;
import java.io.Writer;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import net.i2p.I2PAppContext;
import net.i2p.util.I2PThread;
import net.i2p.util.Log;
/**
* Concurrent plan:
*
* It's difficult to get rid of the locks on _pendingInboundRequests
* since locked_satisyInboundAvailable() leaves Requests on the head
* of the queue.
*
* When we go to Java 6, we can convert from a locked ArrayList to
* a LinkedBlockingDeque, where locked_sIA will poll() from the
* head of the queue, and if the request is not fully satisfied,
* offerFirst() (i.e. push) it back on the head.
*
* Ditto outbound of course.
*
* In the meantime, for Java 5, we have lockless 'shortcut'
* methods for the common case where we are under the bandwidth limits.
* And the volatile counters are now AtomicIntegers / AtomicLongs.
*
*/
public class FIFOBandwidthLimiter {
private Log _log;
private I2PAppContext _context;
private final List _pendingInboundRequests;
private final List _pendingOutboundRequests;
private final List<Request> _pendingInboundRequests;
private final List<Request> _pendingOutboundRequests;
/** how many bytes we can consume for inbound transmission immediately */
private volatile int _availableInbound;
private AtomicInteger _availableInbound = new AtomicInteger();
/** how many bytes we can consume for outbound transmission immediately */
private volatile int _availableOutbound;
private AtomicInteger _availableOutbound = new AtomicInteger();
/** how many bytes we can queue up for bursting */
private volatile int _unavailableInboundBurst;
private AtomicInteger _unavailableInboundBurst = new AtomicInteger();
/** how many bytes we can queue up for bursting */
private volatile int _unavailableOutboundBurst;
private AtomicInteger _unavailableOutboundBurst = new AtomicInteger();
/** how large _unavailableInbound can get */
private int _maxInboundBurst;
/** how large _unavailableInbound can get */
@ -35,13 +56,13 @@ public class FIFOBandwidthLimiter {
/** shortcut of whether our inbound rate is unlimited */
private boolean _inboundUnlimited;
/** lifetime counter of bytes received */
private volatile long _totalAllocatedInboundBytes;
private AtomicLong _totalAllocatedInboundBytes = new AtomicLong();
/** lifetime counter of bytes sent */
private volatile long _totalAllocatedOutboundBytes;
private AtomicLong _totalAllocatedOutboundBytes = new AtomicLong();
/** lifetime counter of tokens available for use but exceeded our maxInboundBurst size */
private volatile long _totalWastedInboundBytes;
private AtomicLong _totalWastedInboundBytes = new AtomicLong();
/** lifetime counter of tokens available for use but exceeded our maxOutboundBurst size */
private volatile long _totalWastedOutboundBytes;
private AtomicLong _totalWastedOutboundBytes = new AtomicLong();
private FIFOBandwidthRefiller _refiller;
private long _lastTotalSent;
@ -75,8 +96,8 @@ public class FIFOBandwidthLimiter {
}
_pendingInboundRequests = new ArrayList(16);
_pendingOutboundRequests = new ArrayList(16);
_lastTotalSent = _totalAllocatedOutboundBytes;
_lastTotalReceived = _totalAllocatedInboundBytes;
_lastTotalSent = _totalAllocatedOutboundBytes.get();
_lastTotalReceived = _totalAllocatedInboundBytes.get();
_sendBps = 0;
_recvBps = 0;
_lastStatsUpdated = now();
@ -90,10 +111,10 @@ public class FIFOBandwidthLimiter {
//public long getAvailableInboundBytes() { return _availableInboundBytes; }
//public long getAvailableOutboundBytes() { return _availableOutboundBytes; }
public long getTotalAllocatedInboundBytes() { return _totalAllocatedInboundBytes; }
public long getTotalAllocatedOutboundBytes() { return _totalAllocatedOutboundBytes; }
public long getTotalWastedInboundBytes() { return _totalWastedInboundBytes; }
public long getTotalWastedOutboundBytes() { return _totalWastedOutboundBytes; }
public long getTotalAllocatedInboundBytes() { return _totalAllocatedInboundBytes.get(); }
public long getTotalAllocatedOutboundBytes() { return _totalAllocatedOutboundBytes.get(); }
public long getTotalWastedInboundBytes() { return _totalWastedInboundBytes.get(); }
public long getTotalWastedOutboundBytes() { return _totalWastedOutboundBytes.get(); }
//public long getMaxInboundBytes() { return _maxInboundBytes; }
//public void setMaxInboundBytes(int numBytes) { _maxInboundBytes = numBytes; }
//public long getMaxOutboundBytes() { return _maxOutboundBytes; }
@ -116,14 +137,14 @@ public class FIFOBandwidthLimiter {
public void reinitialize() {
_pendingInboundRequests.clear();
_pendingOutboundRequests.clear();
_availableInbound = 0;
_availableOutbound = 0;
_availableInbound.set(0);
_availableOutbound.set(0);
_maxInbound = 0;
_maxOutbound = 0;
_maxInboundBurst = 0;
_maxOutboundBurst = 0;
_unavailableInboundBurst = 0;
_unavailableOutboundBurst = 0;
_unavailableInboundBurst.set(0);
_unavailableOutboundBurst.set(0);
_inboundUnlimited = false;
_outboundUnlimited = false;
_refiller.reinitialize();
@ -132,58 +153,66 @@ public class FIFOBandwidthLimiter {
public Request createRequest() { return new SimpleRequest(); }
/**
* Request some bytes, blocking until they become available
*
* Request some bytes. Does not block.
*/
public Request requestInbound(int bytesIn, String purpose) { return requestInbound(bytesIn, purpose, null, null); }
public Request requestInbound(int bytesIn, String purpose, CompleteListener lsnr, Object attachment) {
if (_inboundUnlimited) {
_totalAllocatedInboundBytes += bytesIn;
public Request requestInbound(int bytesIn, String purpose) {
// try to satisfy without grabbing the global lock
if (shortcutSatisfyInboundRequest(bytesIn))
return _noop;
}
return requestInbound(bytesIn, purpose, null, null);
}
public Request requestInbound(int bytesIn, String purpose, CompleteListener lsnr, Object attachment) {
SimpleRequest req = new SimpleRequest(bytesIn, 0, purpose, lsnr, attachment);
requestInbound(req, bytesIn, purpose);
return req;
}
public void requestInbound(Request req, int bytesIn, String purpose) {
req.init(bytesIn, 0, purpose);
if (false) { ((SimpleRequest)req).allocateAll(); return; }
int pending = 0;
/**
* The transports don't use this any more, so make it private
* and a SimpleRequest instead of a Request
* So there's no more casting
*/
private void requestInbound(SimpleRequest req, int bytesIn, String purpose) {
// don't init twice - uncomment if we make public again?
//req.init(bytesIn, 0, purpose);
int pending;
synchronized (_pendingInboundRequests) {
pending = _pendingInboundRequests.size();
_pendingInboundRequests.add(req);
}
satisfyInboundRequests(((SimpleRequest)req).satisfiedBuffer);
((SimpleRequest)req).satisfiedBuffer.clear();
satisfyInboundRequests(req.satisfiedBuffer);
req.satisfiedBuffer.clear();
if (pending > 0)
_context.statManager().addRateData("bwLimiter.pendingInboundRequests", pending, pending);
}
/**
* Request some bytes, blocking until they become available
*
*/
public Request requestOutbound(int bytesOut, String purpose) { return requestOutbound(bytesOut, purpose, null, null); }
public Request requestOutbound(int bytesOut, String purpose, CompleteListener lsnr, Object attachment) {
if (_outboundUnlimited) {
_totalAllocatedOutboundBytes += bytesOut;
return _noop;
}
/**
* Request some bytes. Does not block.
*/
public Request requestOutbound(int bytesOut, String purpose) {
// try to satisfy without grabbing the global lock
if (shortcutSatisfyOutboundRequest(bytesOut))
return _noop;
return requestOutbound(bytesOut, purpose, null, null);
}
public Request requestOutbound(int bytesOut, String purpose, CompleteListener lsnr, Object attachment) {
SimpleRequest req = new SimpleRequest(0, bytesOut, purpose, lsnr, attachment);
requestOutbound(req, bytesOut, purpose);
return req;
}
public void requestOutbound(Request req, int bytesOut, String purpose) {
req.init(0, bytesOut, purpose);
if (false) { ((SimpleRequest)req).allocateAll(); return; }
int pending = 0;
private void requestOutbound(SimpleRequest req, int bytesOut, String purpose) {
// don't init twice - uncomment if we make public again?
//req.init(0, bytesOut, purpose);
int pending;
synchronized (_pendingOutboundRequests) {
pending = _pendingOutboundRequests.size();
_pendingOutboundRequests.add(req);
}
satisfyOutboundRequests(((SimpleRequest)req).satisfiedBuffer);
((SimpleRequest)req).satisfiedBuffer.clear();
satisfyOutboundRequests(req.satisfiedBuffer);
req.satisfiedBuffer.clear();
if (pending > 0)
_context.statManager().addRateData("bwLimiter.pendingOutboundRequests", pending, pending);
}
@ -200,7 +229,7 @@ public class FIFOBandwidthLimiter {
void setOutboundBurstBytes(int bytes) { _maxOutboundBurst = bytes; }
StringBuilder getStatus() {
StringBuilder rv = new StringBuilder(64);
StringBuilder rv = new StringBuilder(128);
rv.append("Available: ").append(_availableInbound).append('/').append(_availableOutbound).append(' ');
rv.append("Max: ").append(_maxInbound).append('/').append(_maxOutbound).append(' ');
rv.append("Burst: ").append(_unavailableInboundBurst).append('/').append(_unavailableOutboundBurst).append(' ');
@ -215,64 +244,72 @@ public class FIFOBandwidthLimiter {
* @param maxBurstIn allow up to this many bytes in from the burst section for this time period (may be negative)
* @param maxBurstOut allow up to this many bytes in from the burst section for this time period (may be negative)
*/
final void refillBandwidthQueues(List buf, long bytesInbound, long bytesOutbound, long maxBurstIn, long maxBurstOut) {
final void refillBandwidthQueues(List<Request> buf, long bytesInbound, long bytesOutbound, long maxBurstIn, long maxBurstOut) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Refilling the queues with " + bytesInbound + "/" + bytesOutbound + ": " + getStatus().toString());
_availableInbound += bytesInbound;
_availableOutbound += bytesOutbound;
if (_availableInbound > _maxInbound) {
// Take some care throughout to minimize accesses to the atomics,
// both for efficiency and to not let strange things happen if
// it changes out from under us
// This never had locks before concurrent, anyway
int avi = _availableInbound.addAndGet((int) bytesInbound);
if (avi > _maxInbound) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("available inbound (" + _availableInbound + ") exceeds our inbound burst (" + _maxInbound + "), so no supplement");
_unavailableInboundBurst += _availableInbound - _maxInbound;
_availableInbound = _maxInbound;
if (_unavailableInboundBurst > _maxInboundBurst) {
_totalWastedInboundBytes += _unavailableInboundBurst - _maxInboundBurst;
_unavailableInboundBurst = _maxInboundBurst;
_log.debug("available inbound (" + avi + ") exceeds our inbound burst (" + _maxInbound + "), so no supplement");
int uib = _unavailableInboundBurst.addAndGet(avi - _maxInbound);
_availableInbound.set(_maxInbound);
if (uib > _maxInboundBurst) {
_totalWastedInboundBytes.addAndGet(uib - _maxInboundBurst);
_unavailableInboundBurst.set(_maxInboundBurst);
}
} else {
// try to pull in up to 1/10th of the burst rate, since we refill every 100ms
int want = (int)maxBurstIn;
if (want > (_maxInbound - _availableInbound))
want = _maxInbound - _availableInbound;
if (want > (_maxInbound - avi))
want = _maxInbound - avi;
if (_log.shouldLog(Log.DEBUG))
_log.debug("want to pull " + want + " from the inbound burst (" + _unavailableInboundBurst + ") to supplement " + _availableInbound + " (max: " + _maxInbound + ")");
_log.debug("want to pull " + want + " from the inbound burst (" + _unavailableInboundBurst + ") to supplement " + avi + " (max: " + _maxInbound + ")");
if (want > 0) {
if (want <= _unavailableInboundBurst) {
_availableInbound += want;
_unavailableInboundBurst -= want;
int uib = _unavailableInboundBurst.get();
if (want <= uib) {
_availableInbound.addAndGet(want);
_unavailableInboundBurst.addAndGet(0 - want);
} else {
_availableInbound += _unavailableInboundBurst;
_unavailableInboundBurst = 0;
_availableInbound.addAndGet(uib);
_unavailableInboundBurst.set(0);
}
}
}
if (_availableOutbound > _maxOutbound) {
int avo = _availableOutbound.addAndGet((int) bytesOutbound);
if (avo > _maxOutbound) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("available outbound (" + _availableOutbound + ") exceeds our outbound burst (" + _maxOutbound + "), so no supplement");
_unavailableOutboundBurst += _availableOutbound - _maxOutbound;
_availableOutbound = _maxOutbound;
if (_unavailableOutboundBurst > _maxOutboundBurst) {
_totalWastedOutboundBytes += _unavailableOutboundBurst - _maxOutboundBurst;
_unavailableOutboundBurst = _maxOutboundBurst;
_log.debug("available outbound (" + avo + ") exceeds our outbound burst (" + _maxOutbound + "), so no supplement");
int uob = _unavailableOutboundBurst.getAndAdd(avo - _maxOutbound);
_availableOutbound.set(_maxOutbound);
if (uob > _maxOutboundBurst) {
_totalWastedOutboundBytes.getAndAdd(uob - _maxOutboundBurst);
_unavailableOutboundBurst.set(_maxOutboundBurst);
}
} else {
// try to pull in up to 1/10th of the burst rate, since we refill every 100ms
int want = (int)maxBurstOut;
if (want > (_maxOutbound - _availableOutbound))
want = _maxOutbound - _availableOutbound;
if (want > (_maxOutbound - avo))
want = _maxOutbound - avo;
if (_log.shouldLog(Log.DEBUG))
_log.debug("want to pull " + want + " from the outbound burst (" + _unavailableOutboundBurst + ") to supplement " + _availableOutbound + " (max: " + _maxOutbound + ")");
_log.debug("want to pull " + want + " from the outbound burst (" + _unavailableOutboundBurst + ") to supplement " + avo + " (max: " + _maxOutbound + ")");
if (want > 0) {
if (want <= _unavailableOutboundBurst) {
_availableOutbound += want;
_unavailableOutboundBurst -= want;
int uob = _unavailableOutboundBurst.get();
if (want <= uob) {
_availableOutbound.addAndGet(want);
_unavailableOutboundBurst.addAndGet(0 - want);
} else {
_availableOutbound += _unavailableOutboundBurst;
_unavailableOutboundBurst = 0;
_availableOutbound.addAndGet(uob);
_unavailableOutboundBurst.set(0);
}
}
}
@ -286,8 +323,8 @@ public class FIFOBandwidthLimiter {
long time = now - _lastStatsUpdated;
// If at least one second has passed
if (time >= 1000) {
long totS = _totalAllocatedOutboundBytes;
long totR = _totalAllocatedInboundBytes;
long totS = _totalAllocatedOutboundBytes.get();
long totR = _totalAllocatedInboundBytes.get();
long sent = totS - _lastTotalSent; // How much we sent meanwhile
long recv = totR - _lastTotalReceived; // How much we received meanwhile
_lastTotalSent = totS;
@ -337,20 +374,22 @@ public class FIFOBandwidthLimiter {
/**
* Go through the queue, satisfying as many requests as possible (notifying
* each one satisfied that the request has been granted).
*
* @param buffer returned with the satisfied outbound requests only
*/
private final void satisfyRequests(List buffer) {
private final void satisfyRequests(List<Request> buffer) {
buffer.clear();
satisfyInboundRequests(buffer);
buffer.clear();
satisfyOutboundRequests(buffer);
}
private final void satisfyInboundRequests(List satisfied) {
private final void satisfyInboundRequests(List<Request> satisfied) {
synchronized (_pendingInboundRequests) {
if (_inboundUnlimited) {
locked_satisfyInboundUnlimited(satisfied);
} else {
if (_availableInbound > 0) {
if (_availableInbound.get() > 0) {
locked_satisfyInboundAvailable(satisfied);
} else {
// no bandwidth available
@ -370,6 +409,7 @@ public class FIFOBandwidthLimiter {
}
}
/** called from debug logging only */
private long locked_getLongestInboundWait() {
long start = -1;
for (int i = 0; i < _pendingInboundRequests.size(); i++) {
@ -382,6 +422,8 @@ public class FIFOBandwidthLimiter {
else
return now() - start;
}
/** called from debug logging only */
private long locked_getLongestOutboundWait() {
long start = -1;
for (int i = 0; i < _pendingOutboundRequests.size(); i++) {
@ -400,11 +442,11 @@ public class FIFOBandwidthLimiter {
* There are no limits, so just give every inbound request whatever they want
*
*/
private final void locked_satisfyInboundUnlimited(List satisfied) {
private final void locked_satisfyInboundUnlimited(List<Request> satisfied) {
while (_pendingInboundRequests.size() > 0) {
SimpleRequest req = (SimpleRequest)_pendingInboundRequests.remove(0);
int allocated = req.getPendingInboundRequested();
_totalAllocatedInboundBytes += allocated;
_totalAllocatedInboundBytes.addAndGet(allocated);
req.allocateBytes(allocated, 0);
satisfied.add(req);
long waited = now() - req.getRequestTime();
@ -425,9 +467,9 @@ public class FIFOBandwidthLimiter {
*
* @return list of requests that were completely satisfied
*/
private final void locked_satisfyInboundAvailable(List satisfied) {
private final void locked_satisfyInboundAvailable(List<Request> satisfied) {
for (int i = 0; i < _pendingInboundRequests.size(); i++) {
if (_availableInbound <= 0) break;
if (_availableInbound.get() <= 0) break;
SimpleRequest req = (SimpleRequest)_pendingInboundRequests.get(i);
long waited = now() - req.getRequestTime();
if (req.getAborted()) {
@ -452,13 +494,14 @@ public class FIFOBandwidthLimiter {
}
// ok, they are really waiting for us to give them stuff
int requested = req.getPendingInboundRequested();
int allocated = 0;
if (_availableInbound > requested)
int avi = _availableInbound.get();
int allocated;
if (avi >= requested)
allocated = requested;
else
allocated = _availableInbound;
_availableInbound -= allocated;
_totalAllocatedInboundBytes += allocated;
allocated = avi;
_availableInbound.addAndGet(0 - allocated);
_totalAllocatedInboundBytes.addAndGet(allocated);
req.allocateBytes(allocated, 0);
satisfied.add(req);
if (req.getPendingInboundRequested() > 0) {
@ -485,12 +528,12 @@ public class FIFOBandwidthLimiter {
}
}
private final void satisfyOutboundRequests(List satisfied) {
private final void satisfyOutboundRequests(List<Request> satisfied) {
synchronized (_pendingOutboundRequests) {
if (_outboundUnlimited) {
locked_satisfyOutboundUnlimited(satisfied);
} else {
if (_availableOutbound > 0) {
if (_availableOutbound.get() > 0) {
locked_satisfyOutboundAvailable(satisfied);
} else {
// no bandwidth available
@ -514,11 +557,11 @@ public class FIFOBandwidthLimiter {
* There are no limits, so just give every outbound request whatever they want
*
*/
private final void locked_satisfyOutboundUnlimited(List satisfied) {
private final void locked_satisfyOutboundUnlimited(List<Request> satisfied) {
while (_pendingOutboundRequests.size() > 0) {
SimpleRequest req = (SimpleRequest)_pendingOutboundRequests.remove(0);
int allocated = req.getPendingOutboundRequested();
_totalAllocatedOutboundBytes += allocated;
_totalAllocatedOutboundBytes.addAndGet(allocated);
req.allocateBytes(0, allocated);
satisfied.add(req);
long waited = now() - req.getRequestTime();
@ -540,9 +583,9 @@ public class FIFOBandwidthLimiter {
*
* @return list of requests that were completely satisfied
*/
private final void locked_satisfyOutboundAvailable(List satisfied) {
private final void locked_satisfyOutboundAvailable(List<Request> satisfied) {
for (int i = 0; i < _pendingOutboundRequests.size(); i++) {
if (_availableOutbound <= 0) break;
if (_availableOutbound.get() <= 0) break;
SimpleRequest req = (SimpleRequest)_pendingOutboundRequests.get(i);
long waited = now() - req.getRequestTime();
if (req.getAborted()) {
@ -567,13 +610,14 @@ public class FIFOBandwidthLimiter {
}
// ok, they are really waiting for us to give them stuff
int requested = req.getPendingOutboundRequested();
int allocated = 0;
if (_availableOutbound > requested)
int avo = _availableOutbound.get();
int allocated;
if (avo >= requested)
allocated = requested;
else
allocated = _availableOutbound;
_availableOutbound -= allocated;
_totalAllocatedOutboundBytes += allocated;
allocated = avo;
_availableOutbound.addAndGet(0 - allocated);
_totalAllocatedOutboundBytes.addAndGet(allocated);
req.allocateBytes(0, allocated);
satisfied.add(req);
if (req.getPendingOutboundRequested() > 0) {
@ -618,6 +662,50 @@ public class FIFOBandwidthLimiter {
}
}
/**
* Lockless total satisfaction,
* at some minor risk of exceeding the limits
* and driving the available counter below zero
*
* @param requested number of bytes
* @return satisfaction
* @since 0.7.13
*/
private boolean shortcutSatisfyInboundRequest(int requested) {
boolean rv = _inboundUnlimited ||
(_pendingInboundRequests.isEmpty() &&
_availableInbound.get() >= requested);
if (rv) {
_availableInbound.addAndGet(0 - requested);
_totalAllocatedInboundBytes.addAndGet(requested);
}
if (_log.shouldLog(Log.INFO))
_log.info("IB shortcut for " + requested + "B? " + rv);
return rv;
}
/**
* Lockless total satisfaction,
* at some minor risk of exceeding the limits
* and driving the available counter below zero
*
* @param requested number of bytes
* @return satisfaction
* @since 0.7.13
*/
private boolean shortcutSatisfyOutboundRequest(int requested) {
boolean rv = _outboundUnlimited ||
(_pendingOutboundRequests.isEmpty() &&
_availableOutbound.get() >= requested);
if (rv) {
_availableOutbound.addAndGet(0 - requested);
_totalAllocatedOutboundBytes.addAndGet(requested);
}
if (_log.shouldLog(Log.INFO))
_log.info("OB shortcut for " + requested + "B? " + rv);
return rv;
}
/** @deprecated not worth translating */
public void renderStatusHTML(Writer out) throws IOException {
/*******
@ -665,7 +753,7 @@ public class FIFOBandwidthLimiter {
private int _allocationsSinceWait;
private boolean _aborted;
private boolean _waited;
List satisfiedBuffer;
List<Request> satisfiedBuffer;
private CompleteListener _lsnr;
private Object _attachment;
@ -741,7 +829,6 @@ public class FIFOBandwidthLimiter {
void allocateAll() {
_inAllocated = _inTotal;
_outAllocated = _outTotal;
_outAllocated = _outTotal;
if (_lsnr == null)
_allocationsSinceWait++;
if (_log.shouldLog(Log.DEBUG)) _log.debug("allocate all");
@ -778,6 +865,13 @@ public class FIFOBandwidthLimiter {
public String toString() { return getRequestName(); }
}
/**
* This is somewhat complicated by having both
* inbound and outbound in a single request.
* Making a request unidirectional would
* be a good simplification.
* But NTCP would have to be changed as it puts them on one queue.
*/
public interface Request {
/** describe this particular request */
public String getRequestName();

View File

@ -71,7 +71,7 @@ public class FIFOBandwidthRefiller implements Runnable {
public void run() {
// bootstrap 'em with nothing
_lastRefillTime = _limiter.now();
List buffer = new ArrayList(2);
List<FIFOBandwidthLimiter.Request> buffer = new ArrayList(2);
while (true) {
long now = _limiter.now();
if (now >= _lastCheckConfigTime + _configCheckPeriodMs) {
@ -95,7 +95,7 @@ public class FIFOBandwidthRefiller implements Runnable {
_lastCheckConfigTime = _lastRefillTime;
}
private boolean updateQueues(List buffer, long now) {
private boolean updateQueues(List<FIFOBandwidthLimiter.Request> buffer, long now) {
long numMs = (now - _lastRefillTime);
if (_log.shouldLog(Log.INFO))
_log.info("Updating bandwidth after " + numMs + " (status: " + _limiter.getStatus().toString()

View File

@ -421,8 +421,8 @@ public abstract class TransportImpl implements Transport {
}
}
/** To protect dev anonymity. Set to true after 0.7.12 is out */
public static final boolean ADJUST_COST = !RouterVersion.VERSION.equals("0.7.11");
/** Do we increase the advertised cost when approaching conn limits? */
public static final boolean ADJUST_COST = true;
/** What addresses are we currently listening to? */
public RouterAddress getCurrentAddress() {

View File

@ -140,7 +140,7 @@ public class TransportManager implements TransportEventListener {
configTransports();
_log.debug("Starting up the transport manager");
for (int i = 0; i < _transports.size(); i++) {
Transport t = (Transport)_transports.get(i);
Transport t = _transports.get(i);
RouterAddress addr = t.startListening();
if (_log.shouldLog(Log.DEBUG))
_log.debug("Transport " + i + " (" + t.getStyle() + ") started");
@ -161,14 +161,14 @@ public class TransportManager implements TransportEventListener {
if (_upnpManager != null)
_upnpManager.stop();
for (int i = 0; i < _transports.size(); i++) {
((Transport)_transports.get(i)).stopListening();
_transports.get(i).stopListening();
}
_transports.clear();
}
public Transport getTransport(String style) {
for (int i = 0; i < _transports.size(); i++) {
Transport t = (Transport)_transports.get(i);
Transport t = _transports.get(i);
if(style.equals(t.getStyle()))
return t;
}
@ -189,7 +189,7 @@ public class TransportManager implements TransportEventListener {
public int countActivePeers() {
int peers = 0;
for (int i = 0; i < _transports.size(); i++) {
peers += ((Transport)_transports.get(i)).countActivePeers();
peers += _transports.get(i).countActivePeers();
}
return peers;
}
@ -197,7 +197,7 @@ public class TransportManager implements TransportEventListener {
public int countActiveSendPeers() {
int peers = 0;
for (int i = 0; i < _transports.size(); i++) {
peers += ((Transport)_transports.get(i)).countActiveSendPeers();
peers += _transports.get(i).countActiveSendPeers();
}
return peers;
}
@ -210,7 +210,7 @@ public class TransportManager implements TransportEventListener {
*/
public boolean haveOutboundCapacity(int pct) {
for (int i = 0; i < _transports.size(); i++) {
if (((Transport)_transports.get(i)).haveCapacity(pct))
if (_transports.get(i).haveCapacity(pct))
return true;
}
return false;
@ -225,7 +225,7 @@ public class TransportManager implements TransportEventListener {
if (_transports.size() <= 0)
return false;
for (int i = 0; i < _transports.size(); i++) {
if (!((Transport)_transports.get(i)).haveCapacity(HIGH_CAPACITY_PCT))
if (!_transports.get(i).haveCapacity(HIGH_CAPACITY_PCT))
return false;
}
return true;
@ -253,7 +253,7 @@ public class TransportManager implements TransportEventListener {
public Vector getClockSkews() {
Vector skews = new Vector();
for (int i = 0; i < _transports.size(); i++) {
Vector tempSkews = ((Transport)_transports.get(i)).getClockSkews();
Vector tempSkews = _transports.get(i).getClockSkews();
if ((tempSkews == null) || (tempSkews.size() <= 0)) continue;
skews.addAll(tempSkews);
}
@ -275,12 +275,12 @@ public class TransportManager implements TransportEventListener {
public void recheckReachability() {
for (int i = 0; i < _transports.size(); i++)
((Transport)_transports.get(i)).recheckReachability();
_transports.get(i).recheckReachability();
}
public boolean isBacklogged(Hash dest) {
for (int i = 0; i < _transports.size(); i++) {
Transport t = (Transport)_transports.get(i);
Transport t = _transports.get(i);
if (t.isBacklogged(dest))
return true;
}
@ -289,7 +289,7 @@ public class TransportManager implements TransportEventListener {
public boolean isEstablished(Hash dest) {
for (int i = 0; i < _transports.size(); i++) {
Transport t = (Transport)_transports.get(i);
Transport t = _transports.get(i);
if (t.isEstablished(dest))
return true;
}
@ -303,7 +303,7 @@ public class TransportManager implements TransportEventListener {
*/
public boolean wasUnreachable(Hash dest) {
for (int i = 0; i < _transports.size(); i++) {
Transport t = (Transport)_transports.get(i);
Transport t = _transports.get(i);
if (!t.wasUnreachable(dest))
return false;
}
@ -371,22 +371,22 @@ public class TransportManager implements TransportEventListener {
}
public TransportBid getBid(OutNetMessage msg) {
List bids = getBids(msg);
List<TransportBid> bids = getBids(msg);
if ( (bids == null) || (bids.size() <= 0) )
return null;
else
return (TransportBid)bids.get(0);
return bids.get(0);
}
public List getBids(OutNetMessage msg) {
public List<TransportBid> getBids(OutNetMessage msg) {
if (msg == null)
throw new IllegalArgumentException("Null message? no bidding on a null outNetMessage!");
if (_context.router().getRouterInfo().equals(msg.getTarget()))
throw new IllegalArgumentException("WTF, bids for a message bound to ourselves?");
List rv = new ArrayList(_transports.size());
List<TransportBid> rv = new ArrayList(_transports.size());
Set failedTransports = msg.getFailedTransports();
for (int i = 0; i < _transports.size(); i++) {
Transport t = (Transport)_transports.get(i);
Transport t = _transports.get(i);
if (failedTransports.contains(t.getStyle())) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Skipping transport " + t.getStyle() + " as it already failed");
@ -415,7 +415,7 @@ public class TransportManager implements TransportEventListener {
Set failedTransports = msg.getFailedTransports();
TransportBid rv = null;
for (int i = 0; i < _transports.size(); i++) {
Transport t = (Transport)_transports.get(i);
Transport t = _transports.get(i);
if (t.isUnreachable(peer)) {
unreachableTransports++;
// this keeps GetBids() from shitlisting for "no common transports"
@ -482,7 +482,7 @@ public class TransportManager implements TransportEventListener {
public List getMostRecentErrorMessages() {
List rv = new ArrayList(16);
for (int i = 0; i < _transports.size(); i++) {
Transport t = (Transport)_transports.get(i);
Transport t = _transports.get(i);
rv.addAll(t.getMostRecentErrorMessages());
}
return rv;
@ -491,7 +491,7 @@ public class TransportManager implements TransportEventListener {
public void renderStatusHTML(Writer out, String urlBase, int sortFlags) throws IOException {
TreeMap transports = new TreeMap();
for (int i = 0; i < _transports.size(); i++) {
Transport t = (Transport)_transports.get(i);
Transport t = _transports.get(i);
transports.put(t.getStyle(), t);
}
for (Iterator iter = transports.values().iterator(); iter.hasNext(); ) {
@ -501,7 +501,7 @@ public class TransportManager implements TransportEventListener {
StringBuilder buf = new StringBuilder(4*1024);
buf.append("<h3>Router Transport Addresses</h3><pre>\n");
for (int i = 0; i < _transports.size(); i++) {
Transport t = (Transport)_transports.get(i);
Transport t = _transports.get(i);
if (t.getCurrentAddress() != null)
buf.append(t.getCurrentAddress()).append("\n\n");
else

View File

@ -302,7 +302,7 @@ public class EventPumper implements Runnable {
public void wantsWrite(NTCPConnection con, byte data[]) {
ByteBuffer buf = ByteBuffer.wrap(data);
FIFOBandwidthLimiter.Request req = _context.bandwidthLimiter().requestOutbound(data.length, "NTCP write", null, null);//con, buf);
FIFOBandwidthLimiter.Request req = _context.bandwidthLimiter().requestOutbound(data.length, "NTCP write");//con, buf);
if (req.getPendingOutboundRequested() > 0) {
if (_log.shouldLog(Log.INFO))
_log.info("queued write on " + con + " for " + data.length);
@ -471,7 +471,7 @@ public class EventPumper implements Runnable {
buf.get(data);
releaseBuf(buf);
ByteBuffer rbuf = ByteBuffer.wrap(data);
FIFOBandwidthLimiter.Request req = _context.bandwidthLimiter().requestInbound(read, "NTCP read", null, null); //con, buf);
FIFOBandwidthLimiter.Request req = _context.bandwidthLimiter().requestInbound(read, "NTCP read"); //con, buf);
if (req.getPendingInboundRequested() > 0) {
key.interestOps(key.interestOps() & ~SelectionKey.OP_READ);
if (_log.shouldLog(Log.DEBUG))

View File

@ -62,7 +62,7 @@ public class NTCPConnection implements FIFOBandwidthLimiter.CompleteListener {
* and already cleared through the bandwidth limiter.
*/
private final LinkedBlockingQueue<ByteBuffer> _writeBufs;
/** Todo: This is only so we can abort() them when we close() ??? */
/** Requests that were not granted immediately */
private final Set<FIFOBandwidthLimiter.Request> _bwRequests;
private boolean _established;
private long _establishedOn;

View File

@ -1,7 +1,10 @@
package net.i2p.router.transport.udp;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import net.i2p.router.RouterContext;
import net.i2p.util.I2PThread;
@ -18,8 +21,9 @@ public class ACKSender implements Runnable {
private UDPTransport _transport;
private PacketBuilder _builder;
/** list of peers (PeerState) who we have received data from but not yet ACKed to */
private final List _peersToACK;
private final BlockingQueue<PeerState> _peersToACK;
private boolean _alive;
private static final long POISON_PS = -9999999999l;
/** how frequently do we want to send ACKs to a peer? */
static final int ACK_FREQUENCY = 500;
@ -28,7 +32,7 @@ public class ACKSender implements Runnable {
_context = ctx;
_log = ctx.logManager().getLog(ACKSender.class);
_transport = transport;
_peersToACK = new ArrayList(4);
_peersToACK = new LinkedBlockingQueue();
_builder = new PacketBuilder(_context, transport);
_alive = true;
_context.statManager().createRateStat("udp.sendACKCount", "how many ack messages were sent to a peer", "udp", UDPTransport.RATES);
@ -37,27 +41,34 @@ public class ACKSender implements Runnable {
_context.statManager().createRateStat("udp.abortACK", "How often do we schedule up an ACK send only to find it had already been sent (through piggyback)?", "udp", UDPTransport.RATES);
}
/**
* Add to the queue.
* For speed, don't check for duplicates here.
* The runner will remove them in its own thread.
*/
public void ackPeer(PeerState peer) {
synchronized (_peersToACK) {
if (!_peersToACK.contains(peer))
_peersToACK.add(peer);
_peersToACK.notifyAll();
}
if (_alive)
_peersToACK.offer(peer);
}
public void startup() {
_alive = true;
I2PThread t = new I2PThread(this, "UDP ACK sender");
t.setDaemon(true);
_peersToACK.clear();
I2PThread t = new I2PThread(this, "UDP ACK sender", true);
t.start();
}
public void shutdown() {
_alive = false;
synchronized (_peersToACK) {
_peersToACK.clear();
_peersToACK.notifyAll();
PeerState poison = new PeerState(_context, _transport);
poison.setTheyRelayToUsAs(POISON_PS);
_peersToACK.offer(poison);
for (int i = 1; i <= 5 && !_peersToACK.isEmpty(); i++) {
try {
Thread.sleep(i * 50);
} catch (InterruptedException ie) {}
}
_peersToACK.clear();
}
private long ackFrequency(long timeSinceACK, long rtt) {
@ -71,47 +82,89 @@ public class ACKSender implements Runnable {
}
public void run() {
// we use a Set to strip out dups that come in on the Queue
Set<PeerState> notYet = new HashSet();
while (_alive) {
PeerState peer = null;
long now = _context.clock().now();
long now = 0;
long remaining = -1;
try {
synchronized (_peersToACK) {
for (int i = 0; i < _peersToACK.size(); i++) {
PeerState cur = (PeerState)_peersToACK.get(i);
long wanted = cur.getWantedACKSendSince();
long delta = wanted + ackFrequency(now-cur.getLastACKSend(), cur.getRTT()) - now;
if ( ( (wanted > 0) && (delta < 0) ) || (cur.unsentACKThresholdReached()) ) {
_peersToACK.remove(i);
peer = cur;
break;
}
}
if (peer == null) {
if (_peersToACK.size() <= 0)
_peersToACK.wait();
long wanted = 0;
while (_alive) {
// Pull from the queue until we find one ready to ack
// Any that are not ready we will put back on the queue
PeerState cur = null;
try {
if (notYet.isEmpty())
// wait forever
cur = _peersToACK.take();
else
_peersToACK.wait(50);
} else {
remaining = _peersToACK.size();
}
}
} catch (InterruptedException ie) {}
// Don't wait if nothing there, just put everybody back and sleep below
cur = _peersToACK.poll();
} catch (InterruptedException ie) {}
if (cur != null) {
if (cur.getTheyRelayToUsAs() == POISON_PS)
return;
wanted = cur.getWantedACKSendSince();
now = _context.clock().now();
long delta = wanted + ackFrequency(now-cur.getLastACKSend(), cur.getRTT()) - now;
if (wanted <= 0) {
// it got acked by somebody - discard, remove any dups, and go around again
notYet.remove(cur);
} else if ( (delta <= 0) || (cur.unsentACKThresholdReached()) ) {
// found one to ack
peer = cur;
notYet.remove(cur); // in case a dup
try {
// bulk operations may throw an exception
_peersToACK.addAll(notYet);
} catch (Exception e) {}
notYet.clear();
break;
} else {
// not yet, go around again
// moving from the Queue to the Set and then back removes duplicates
boolean added = notYet.add(cur);
if (added && _log.shouldLog(Log.DEBUG))
_log.debug("Pending ACK (delta = " + delta + ") for " + cur);
}
} else if (!notYet.isEmpty()) {
// put them all back and wait a while
try {
// bulk operations may throw an exception
_peersToACK.addAll(notYet);
} catch (Exception e) {}
if (_log.shouldLog(Log.INFO))
_log.info("sleeping, pending size = " + notYet.size());
notYet.clear();
try {
// sleep a little longer than the divided frequency,
// so it will be ready after we circle around a few times
Thread.sleep(5 + (ACK_FREQUENCY / 3));
} catch (InterruptedException ie) {}
} // else go around again where we will wait at take()
} // inner while()
if (peer != null) {
long lastSend = peer.getLastACKSend();
long wanted = peer.getWantedACKSendSince();
List ackBitfields = peer.retrieveACKBitfields(false);
// set above before the break
//long wanted = peer.getWantedACKSendSince();
List<ACKBitfield> ackBitfields = peer.retrieveACKBitfields(false);
if (wanted < 0)
_log.error("wtf, why are we acking something they dont want? remaining=" + remaining + ", peer=" + peer + ", bitfields=" + ackBitfields);
if (wanted < 0) {
if (_log.shouldLog(Log.WARN))
_log.warn("wtf, why are we acking something they dont want? remaining=" + remaining + ", peer=" + peer + ", bitfields=" + ackBitfields);
continue;
}
if ( (ackBitfields != null) && (ackBitfields.size() > 0) ) {
if ( (ackBitfields != null) && (!ackBitfields.isEmpty()) ) {
_context.statManager().addRateData("udp.sendACKCount", ackBitfields.size(), 0);
if (remaining > 0)
_context.statManager().addRateData("udp.sendACKRemaining", remaining, 0);
now = _context.clock().now();
// set above before the break
//now = _context.clock().now();
if (lastSend < 0)
lastSend = now - 1;
_context.statManager().addRateData("udp.ackFrequency", now-lastSend, now-wanted);
@ -119,7 +172,7 @@ public class ACKSender implements Runnable {
UDPPacket ack = _builder.buildACK(peer, ackBitfields);
ack.markType(1);
ack.setFragmentCount(-1);
ack.setMessageType(42);
ack.setMessageType(PacketBuilder.TYPE_ACK);
if (_log.shouldLog(Log.INFO))
_log.info("Sending ACK for " + ackBitfields);

View File

@ -7,6 +7,7 @@ import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import net.i2p.crypto.DHSessionKeyBuilder;
import net.i2p.data.Base64;
@ -37,13 +38,13 @@ public class EstablishmentManager {
private UDPTransport _transport;
private PacketBuilder _builder;
/** map of RemoteHostId to InboundEstablishState */
private final Map _inboundStates;
private final ConcurrentHashMap<RemoteHostId, InboundEstablishState> _inboundStates;
/** map of RemoteHostId to OutboundEstablishState */
private final Map _outboundStates;
private final ConcurrentHashMap<RemoteHostId, OutboundEstablishState> _outboundStates;
/** map of RemoteHostId to List of OutNetMessage for messages exceeding capacity */
private final Map _queuedOutbound;
private final ConcurrentHashMap<RemoteHostId, List<OutNetMessage>> _queuedOutbound;
/** map of nonce (Long) to OutboundEstablishState */
private final Map _liveIntroductions;
private final ConcurrentHashMap<Long, OutboundEstablishState> _liveIntroductions;
private boolean _alive;
private final Object _activityLock;
private int _activity;
@ -56,10 +57,10 @@ public class EstablishmentManager {
_log = ctx.logManager().getLog(EstablishmentManager.class);
_transport = transport;
_builder = new PacketBuilder(ctx, transport);
_inboundStates = new HashMap(32);
_outboundStates = new HashMap(32);
_queuedOutbound = new HashMap(32);
_liveIntroductions = new HashMap(32);
_inboundStates = new ConcurrentHashMap();
_outboundStates = new ConcurrentHashMap();
_queuedOutbound = new ConcurrentHashMap();
_liveIntroductions = new ConcurrentHashMap();
_activityLock = new Object();
_context.statManager().createRateStat("udp.inboundEstablishTime", "How long it takes for a new inbound session to be established", "udp", UDPTransport.RATES);
_context.statManager().createRateStat("udp.outboundEstablishTime", "How long it takes for a new outbound session to be established", "udp", UDPTransport.RATES);
@ -74,8 +75,7 @@ public class EstablishmentManager {
public void startup() {
_alive = true;
I2PThread t = new I2PThread(new Establisher(), "UDP Establisher");
t.setDaemon(true);
I2PThread t = new I2PThread(new Establisher(), "UDP Establisher", true);
t.start();
}
public void shutdown() {
@ -87,21 +87,17 @@ public class EstablishmentManager {
* Grab the active establishing state
*/
InboundEstablishState getInboundState(RemoteHostId from) {
synchronized (_inboundStates) {
InboundEstablishState state = (InboundEstablishState)_inboundStates.get(from);
InboundEstablishState state = _inboundStates.get(from);
// if ( (state == null) && (_log.shouldLog(Log.DEBUG)) )
// _log.debug("No inbound states for " + from + ", with remaining: " + _inboundStates);
return state;
}
}
OutboundEstablishState getOutboundState(RemoteHostId from) {
synchronized (_outboundStates) {
OutboundEstablishState state = (OutboundEstablishState)_outboundStates.get(from);
OutboundEstablishState state = _outboundStates.get(from);
// if ( (state == null) && (_log.shouldLog(Log.DEBUG)) )
// _log.debug("No outbound states for " + from + ", with remaining: " + _outboundStates);
return state;
}
}
private int getMaxConcurrentEstablish() {
@ -163,39 +159,42 @@ public class EstablishmentManager {
int deferred = 0;
boolean rejected = false;
int queueCount = 0;
synchronized (_outboundStates) {
state = (OutboundEstablishState)_outboundStates.get(to);
state = _outboundStates.get(to);
if (state == null) {
if (_outboundStates.size() >= getMaxConcurrentEstablish()) {
List queued = (List)_queuedOutbound.get(to);
if (queued == null) {
queued = new ArrayList(1);
if (_queuedOutbound.size() > MAX_QUEUED_OUTBOUND) {
rejected = true;
} else {
_queuedOutbound.put(to, queued);
}
if (_queuedOutbound.size() > MAX_QUEUED_OUTBOUND) {
rejected = true;
} else {
List<OutNetMessage> newQueued = new ArrayList(1);
List<OutNetMessage> queued = _queuedOutbound.putIfAbsent(to, newQueued);
if (queued == null)
queued = newQueued;
queueCount = queued.size();
if (queueCount < MAX_QUEUED_PER_PEER)
queued.add(msg);
}
queueCount = queued.size();
if ( (queueCount < MAX_QUEUED_PER_PEER) && (!rejected) )
queued.add(msg);
deferred = _queuedOutbound.size();
} else {
state = new OutboundEstablishState(_context, remAddr, port,
msg.getTarget().getIdentity(),
new SessionKey(addr.getIntroKey()), addr);
_outboundStates.put(to, state);
SimpleScheduler.getInstance().addEvent(new Expire(to, state), 10*1000);
OutboundEstablishState oldState = _outboundStates.putIfAbsent(to, state);
boolean isNew = oldState == null;
if (!isNew)
// whoops, somebody beat us to it, throw out the state we just created
state = oldState;
else
SimpleScheduler.getInstance().addEvent(new Expire(to, state), 10*1000);
}
}
if (state != null) {
state.addMessage(msg);
List queued = (List)_queuedOutbound.remove(to);
List<OutNetMessage> queued = _queuedOutbound.remove(to);
if (queued != null)
for (int i = 0; i < queued.size(); i++)
state.addMessage((OutNetMessage)queued.get(i));
state.addMessage(queued.get(i));
}
}
if (rejected) {
_transport.failed(msg, "Too many pending outbound connections");
@ -223,17 +222,9 @@ public class EstablishmentManager {
_state = state;
}
public void timeReached() {
Object removed = null;
synchronized (_outboundStates) {
removed = _outboundStates.remove(_to);
if ( (removed != null) && (removed != _state) ) { // oops, we must have failed, then retried
_outboundStates.put(_to, removed);
removed = null;
}/* else {
locked_admitQueued();
}*/
}
if (removed != null) {
// remove only if value == state
boolean removed = _outboundStates.remove(_to, _state);
if (removed) {
_context.statManager().addRateData("udp.outboundEstablishFailedState", _state.getState(), _state.getLifetime());
if (_log.shouldLog(Log.WARN))
_log.warn("Timing out expired outbound: " + _state);
@ -260,12 +251,11 @@ public class EstablishmentManager {
int maxInbound = getMaxInboundEstablishers();
boolean isNew = false;
InboundEstablishState state = null;
synchronized (_inboundStates) {
if (_inboundStates.size() >= maxInbound)
return; // drop the packet
state = (InboundEstablishState)_inboundStates.get(from);
InboundEstablishState state = _inboundStates.get(from);
if (state == null) {
if (_context.blocklist().isBlocklisted(from.getIP())) {
if (_log.shouldLog(Log.WARN))
@ -276,10 +266,13 @@ public class EstablishmentManager {
return; // drop the packet
state = new InboundEstablishState(_context, from.getIP(), from.getPort(), _transport.getLocalPort());
state.receiveSessionRequest(reader.getSessionRequestReader());
isNew = true;
_inboundStates.put(from, state);
InboundEstablishState oldState = _inboundStates.putIfAbsent(from, state);
isNew = oldState == null;
if (!isNew)
// whoops, somebody beat us to it, throw out the state we just created
state = oldState;
}
}
if (isNew) {
// we don't expect inbound connections when hidden, but it could happen
// Don't offer if we are approaching max connections. While Relay Intros do not
@ -307,10 +300,7 @@ public class EstablishmentManager {
* establishment)
*/
void receiveSessionConfirmed(RemoteHostId from, UDPPacketReader reader) {
InboundEstablishState state = null;
synchronized (_inboundStates) {
state = (InboundEstablishState)_inboundStates.get(from);
}
InboundEstablishState state = _inboundStates.get(from);
if (state != null) {
state.receiveSessionConfirmed(reader.getSessionConfirmedReader());
notifyActivity();
@ -324,10 +314,7 @@ public class EstablishmentManager {
*
*/
void receiveSessionCreated(RemoteHostId from, UDPPacketReader reader) {
OutboundEstablishState state = null;
synchronized (_outboundStates) {
state = (OutboundEstablishState)_outboundStates.get(from);
}
OutboundEstablishState state = _outboundStates.get(from);
if (state != null) {
state.receiveSessionCreated(reader.getSessionCreatedReader());
notifyActivity();
@ -346,21 +333,19 @@ public class EstablishmentManager {
//int active = 0;
//int admitted = 0;
//int remaining = 0;
synchronized (_outboundStates) {
//active = _outboundStates.size();
_outboundStates.remove(state.getRemoteHostId());
if (_queuedOutbound.size() > 0) {
// there shouldn't have been queued messages for this active state, but just in case...
List queued = (List)_queuedOutbound.remove(state.getRemoteHostId());
List<OutNetMessage> queued = _queuedOutbound.remove(state.getRemoteHostId());
if (queued != null) {
for (int i = 0; i < queued.size(); i++)
state.addMessage((OutNetMessage)queued.get(i));
state.addMessage(queued.get(i));
}
//admitted = locked_admitQueued();
}
//remaining = _queuedOutbound.size();
}
//if (admitted > 0)
// _log.log(Log.CRIT, "Admitted " + admitted + " with " + remaining + " remaining queued and " + active + " active");
@ -371,6 +356,7 @@ public class EstablishmentManager {
return peer;
}
/********
private int locked_admitQueued() {
int admitted = 0;
while ( (_queuedOutbound.size() > 0) && (_outboundStates.size() < getMaxConcurrentEstablish()) ) {
@ -409,6 +395,7 @@ public class EstablishmentManager {
}
return admitted;
}
*******/
private void notifyActivity() {
synchronized (_activityLock) {
@ -596,9 +583,7 @@ public class EstablishmentManager {
} catch (DHSessionKeyBuilder.InvalidPublicParameterException ippe) {
if (_log.shouldLog(Log.ERROR))
_log.error("Peer " + state.getRemoteHostId() + " sent us an invalid DH parameter (or were spoofed)", ippe);
synchronized (_inboundStates) {
_inboundStates.remove(state.getRemoteHostId());
}
_inboundStates.remove(state.getRemoteHostId());
return;
}
_transport.send(_builder.buildSessionCreatedPacket(state, _transport.getExternalPort(), _transport.getIntroKey()));
@ -627,14 +612,12 @@ public class EstablishmentManager {
private void handlePendingIntro(OutboundEstablishState state) {
long nonce = _context.random().nextLong(MAX_NONCE);
while (true) {
synchronized (_liveIntroductions) {
OutboundEstablishState old = (OutboundEstablishState)_liveIntroductions.put(new Long(nonce), state);
OutboundEstablishState old = _liveIntroductions.putIfAbsent(new Long(nonce), state);
if (old != null) {
nonce = _context.random().nextLong(MAX_NONCE);
} else {
break;
}
}
}
SimpleScheduler.getInstance().addEvent(new FailIntroduction(state, nonce), INTRO_ATTEMPT_TIMEOUT);
state.setIntroNonce(nonce);
@ -656,16 +639,9 @@ public class EstablishmentManager {
_state = state;
}
public void timeReached() {
OutboundEstablishState removed = null;
synchronized (_liveIntroductions) {
removed = (OutboundEstablishState)_liveIntroductions.remove(new Long(_nonce));
if (removed != _state) {
// another one with the same nonce in a very brief time...
_liveIntroductions.put(new Long(_nonce), removed);
removed = null;
}
}
if (removed != null) {
// remove only if value equal to state
boolean removed = _liveIntroductions.remove(new Long(_nonce), _state);
if (removed) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Send intro for " + _state.getRemoteHostId().toString() + " timed out");
_context.statManager().addRateData("udp.sendIntroRelayTimeout", 1, 0);
@ -677,10 +653,7 @@ public class EstablishmentManager {
/* FIXME Exporting non-public type through public API FIXME */
public void receiveRelayResponse(RemoteHostId bob, UDPPacketReader reader) {
long nonce = reader.getRelayResponseReader().readNonce();
OutboundEstablishState state = null;
synchronized (_liveIntroductions) {
state = (OutboundEstablishState)_liveIntroductions.remove(new Long(nonce));
}
OutboundEstablishState state = _liveIntroductions.remove(new Long(nonce));
if (state == null)
return; // already established
@ -705,10 +678,8 @@ public class EstablishmentManager {
+ addr.toString() + ":" + port + " (according to " + bob.toString(true) + ")");
RemoteHostId oldId = state.getRemoteHostId();
state.introduced(addr, ip, port);
synchronized (_outboundStates) {
_outboundStates.remove(oldId);
_outboundStates.put(state.getRemoteHostId(), state);
}
_outboundStates.remove(oldId);
_outboundStates.put(state.getRemoteHostId(), state);
notifyActivity();
}
@ -748,11 +719,11 @@ public class EstablishmentManager {
long now = _context.clock().now();
long nextSendTime = -1;
InboundEstablishState inboundState = null;
synchronized (_inboundStates) {
//if (_log.shouldLog(Log.DEBUG))
// _log.debug("# inbound states: " + _inboundStates.size());
for (Iterator iter = _inboundStates.values().iterator(); iter.hasNext(); ) {
InboundEstablishState cur = (InboundEstablishState)iter.next();
for (Iterator<InboundEstablishState> iter = _inboundStates.values().iterator(); iter.hasNext(); ) {
InboundEstablishState cur = iter.next();
if (cur.getState() == InboundEstablishState.STATE_CONFIRMED_COMPLETELY) {
// completely received (though the signature may be invalid)
iter.remove();
@ -791,7 +762,6 @@ public class EstablishmentManager {
}
}
}
}
if (inboundState != null) {
//if (_log.shouldLog(Log.DEBUG))
@ -853,12 +823,12 @@ public class EstablishmentManager {
//int admitted = 0;
//int remaining = 0;
//int active = 0;
synchronized (_outboundStates) {
//active = _outboundStates.size();
//if (_log.shouldLog(Log.DEBUG))
// _log.debug("# outbound states: " + _outboundStates.size());
for (Iterator iter = _outboundStates.values().iterator(); iter.hasNext(); ) {
OutboundEstablishState cur = (OutboundEstablishState)iter.next();
for (Iterator<OutboundEstablishState> iter = _outboundStates.values().iterator(); iter.hasNext(); ) {
OutboundEstablishState cur = iter.next();
if (cur == null) continue;
if (cur.getState() == OutboundEstablishState.STATE_CONFIRMED_COMPLETELY) {
// completely received
@ -902,7 +872,6 @@ public class EstablishmentManager {
//admitted = locked_admitQueued();
//remaining = _queuedOutbound.size();
}
//if (admitted > 0)
// _log.log(Log.CRIT, "Admitted " + admitted + " in push with " + remaining + " remaining queued and " + active + " active");

View File

@ -21,17 +21,17 @@ import net.i2p.util.Log;
*
*/
public class InboundEstablishState {
private RouterContext _context;
private Log _log;
private final RouterContext _context;
private final Log _log;
// SessionRequest message
private byte _receivedX[];
private byte _bobIP[];
private int _bobPort;
private final int _bobPort;
private DHSessionKeyBuilder _keyBuilder;
// SessionCreated message
private byte _sentY[];
private byte _aliceIP[];
private int _alicePort;
private final byte _aliceIP[];
private final int _alicePort;
private long _sentRelayTag;
private long _sentSignedOnTime;
private SessionKey _sessionKey;
@ -44,11 +44,11 @@ public class InboundEstablishState {
private boolean _verificationAttempted;
private RouterIdentity _receivedConfirmedIdentity;
// general status
private long _establishBegin;
private long _lastReceive;
private final long _establishBegin;
//private long _lastReceive;
// private long _lastSend;
private long _nextSend;
private RemoteHostId _remoteHostId;
private final RemoteHostId _remoteHostId;
private int _currentState;
private boolean _complete;
@ -121,9 +121,10 @@ public class InboundEstablishState {
public synchronized SessionKey getMACKey() { return _macKey; }
/** what IP do they appear to be on? */
public synchronized byte[] getSentIP() { return _aliceIP; }
public byte[] getSentIP() { return _aliceIP; }
/** what port number do they appear to be coming from? */
public synchronized int getSentPort() { return _alicePort; }
public int getSentPort() { return _alicePort; }
public synchronized byte[] getBobIP() { return _bobIP; }
@ -205,8 +206,8 @@ public class InboundEstablishState {
}
/** how long have we been trying to establish this session? */
public synchronized long getLifetime() { return _context.clock().now() - _establishBegin; }
public synchronized long getEstablishBeginTime() { return _establishBegin; }
public long getLifetime() { return _context.clock().now() - _establishBegin; }
public long getEstablishBeginTime() { return _establishBegin; }
public synchronized long getNextSendTime() { return _nextSend; }
public synchronized void setNextSendTime(long when) { _nextSend = when; }
@ -328,8 +329,7 @@ public class InboundEstablishState {
}
private void packetReceived() {
_lastReceive = _context.clock().now();
_nextSend = _lastReceive;
_nextSend = _context.clock().now();
}
@Override

View File

@ -96,8 +96,8 @@ public class InboundMessageFragments /*implements UDPTransport.PartialACKSource
if (fragments <= 0) return fragments;
Hash fromPeer = from.getRemotePeer();
Map messages = from.getInboundMessages();
Map<Long, InboundMessageState> messages = from.getInboundMessages();
for (int i = 0; i < fragments; i++) {
long mid = data.readMessageId(i);
Long messageId = new Long(mid);
@ -122,7 +122,7 @@ public class InboundMessageFragments /*implements UDPTransport.PartialACKSource
boolean partialACK = false;
synchronized (messages) {
state = (InboundMessageState)messages.get(messageId);
state = messages.get(messageId);
if (state == null) {
state = new InboundMessageState(_context, mid, fromPeer);
messages.put(messageId, state);

View File

@ -172,8 +172,8 @@ public class InboundMessageState {
@Override
public String toString() {
StringBuilder buf = new StringBuilder(32);
buf.append("Message: ").append(_messageId);
StringBuilder buf = new StringBuilder(256);
buf.append("IB Message: ").append(_messageId);
if (isComplete()) {
buf.append(" completely received with ");
buf.append(getCompleteSize()).append(" bytes");

View File

@ -6,12 +6,15 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import net.i2p.data.Base64;
import net.i2p.data.RouterAddress;
import net.i2p.data.RouterInfo;
import net.i2p.data.SessionKey;
import net.i2p.router.RouterContext;
import net.i2p.util.ConcurrentHashSet;
import net.i2p.util.Log;
/**
@ -23,17 +26,17 @@ public class IntroductionManager {
private UDPTransport _transport;
private PacketBuilder _builder;
/** map of relay tag to PeerState that should receive the introduction */
private Map<Long, PeerState> _outbound;
private final Map<Long, PeerState> _outbound;
/** list of peers (PeerState) who have given us introduction tags */
private final List<PeerState> _inbound;
private final Set<PeerState> _inbound;
public IntroductionManager(RouterContext ctx, UDPTransport transport) {
_context = ctx;
_log = ctx.logManager().getLog(IntroductionManager.class);
_transport = transport;
_builder = new PacketBuilder(ctx, transport);
_outbound = Collections.synchronizedMap(new HashMap(128));
_inbound = new ArrayList(128);
_outbound = new ConcurrentHashMap(128);
_inbound = new ConcurrentHashSet(128);
ctx.statManager().createRateStat("udp.receiveRelayIntro", "How often we get a relayed request for us to talk to someone?", "udp", UDPTransport.RATES);
ctx.statManager().createRateStat("udp.receiveRelayRequest", "How often we receive a good request to relay to someone else?", "udp", UDPTransport.RATES);
ctx.statManager().createRateStat("udp.receiveRelayRequestBadTag", "Received relay requests with bad/expired tag", "udp", UDPTransport.RATES);
@ -52,10 +55,7 @@ public class IntroductionManager {
if (peer.getWeRelayToThemAs() > 0)
_outbound.put(new Long(peer.getWeRelayToThemAs()), peer);
if (peer.getTheyRelayToUsAs() > 0) {
synchronized (_inbound) {
if (!_inbound.contains(peer))
_inbound.add(peer);
}
}
}
@ -67,9 +67,7 @@ public class IntroductionManager {
if (peer.getWeRelayToThemAs() > 0)
_outbound.remove(new Long(peer.getWeRelayToThemAs()));
if (peer.getTheyRelayToUsAs() > 0) {
synchronized (_inbound) {
_inbound.remove(peer);
}
_inbound.remove(peer);
}
}
@ -90,14 +88,11 @@ public class IntroductionManager {
* and we want to keep our introducers valid.
*/
public int pickInbound(Properties ssuOptions, int howMany) {
List<PeerState> peers = null;
int start = _context.random().nextInt(Integer.MAX_VALUE);
synchronized (_inbound) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Picking inbound out of " + _inbound.size());
if (_inbound.size() <= 0) return 0;
peers = new ArrayList(_inbound);
}
if (_log.shouldLog(Log.DEBUG))
_log.debug("Picking inbound out of " + _inbound.size());
if (_inbound.isEmpty()) return 0;
List<PeerState> peers = new ArrayList(_inbound);
int sz = peers.size();
start = start % sz;
int found = 0;
@ -164,9 +159,7 @@ public class IntroductionManager {
* @return number of peers that have volunteerd to introduce us
*/
int introducerCount() {
synchronized(_inbound) {
return _inbound.size();
}
}
void receiveRelayIntro(RemoteHostId bob, UDPPacketReader reader) {

View File

@ -1,7 +1,7 @@
package net.i2p.router.transport.udp;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import net.i2p.data.Base64;
import net.i2p.data.ByteArray;
@ -24,15 +24,17 @@ public class MessageReceiver {
private Log _log;
private UDPTransport _transport;
/** list of messages (InboundMessageState) fully received but not interpreted yet */
private final List _completeMessages;
private final BlockingQueue<InboundMessageState> _completeMessages;
private boolean _alive;
private ByteCache _cache;
private static final int THREADS = 5;
private static final long POISON_IMS = -99999999999l;
public MessageReceiver(RouterContext ctx, UDPTransport transport) {
_context = ctx;
_log = ctx.logManager().getLog(MessageReceiver.class);
_transport = transport;
_completeMessages = new ArrayList(16);
_completeMessages = new LinkedBlockingQueue();
_cache = ByteCache.getInstance(64, I2NPMessage.MAX_SIZE);
_context.statManager().createRateStat("udp.inboundExpired", "How many messages were expired before reception?", "udp", UDPTransport.RATES);
_context.statManager().createRateStat("udp.inboundRemaining", "How many messages were remaining when a message is pulled off the complete queue?", "udp", UDPTransport.RATES);
@ -46,9 +48,8 @@ public class MessageReceiver {
public void startup() {
_alive = true;
for (int i = 0; i < 5; i++) {
I2PThread t = new I2PThread(new Runner(), "UDP message receiver " + i);
t.setDaemon(true);
for (int i = 0; i < THREADS; i++) {
I2PThread t = new I2PThread(new Runner(), "UDP message receiver " + i + '/' + THREADS, true);
t.start();
}
}
@ -61,26 +62,31 @@ public class MessageReceiver {
public void shutdown() {
_alive = false;
synchronized (_completeMessages) {
_completeMessages.clear();
_completeMessages.notifyAll();
_completeMessages.clear();
for (int i = 0; i < THREADS; i++) {
InboundMessageState ims = new InboundMessageState(_context, POISON_IMS, null);
_completeMessages.offer(ims);
}
for (int i = 1; i <= 5 && !_completeMessages.isEmpty(); i++) {
try {
Thread.sleep(i * 50);
} catch (InterruptedException ie) {}
}
_completeMessages.clear();
}
public void receiveMessage(InboundMessageState state) {
int total = 0;
long lag = -1;
synchronized (_completeMessages) {
_completeMessages.add(state);
total = _completeMessages.size();
if (total > 1)
lag = ((InboundMessageState)_completeMessages.get(0)).getLifetime();
_completeMessages.notifyAll();
}
if (total > 1)
_context.statManager().addRateData("udp.inboundReady", total, 0);
if (lag > 1000)
_context.statManager().addRateData("udp.inboundLag", lag, total);
//int total = 0;
//long lag = -1;
if (_alive)
_completeMessages.offer(state);
//total = _completeMessages.size();
//if (total > 1)
// lag = ((InboundMessageState)_completeMessages.get(0)).getLifetime();
//if (total > 1)
// _context.statManager().addRateData("udp.inboundReady", total, 0);
//if (lag > 1000)
// _context.statManager().addRateData("udp.inboundLag", lag, total);
}
public void loop(I2NPMessageHandler handler) {
@ -91,19 +97,18 @@ public class MessageReceiver {
long expiredLifetime = 0;
int remaining = 0;
try {
synchronized (_completeMessages) {
while (message == null) {
if (_completeMessages.size() > 0) // grab the tail for lowest latency
message = (InboundMessageState)_completeMessages.remove(_completeMessages.size()-1);
else
_completeMessages.wait(5000);
message = _completeMessages.take();
if ( (message != null) && (message.getMessageId() == POISON_IMS) ) {
message = null;
break;
}
if ( (message != null) && (message.isExpired()) ) {
expiredLifetime += message.getLifetime();
message = null;
expired++;
}
remaining = _completeMessages.size();
}
//remaining = _completeMessages.size();
}
} catch (InterruptedException ie) {}

View File

@ -1,8 +1,8 @@
package net.i2p.router.transport.udp;
import java.net.InetAddress;
import java.util.ArrayList;
import java.util.List;
import java.util.Queue;
import java.util.concurrent.LinkedBlockingQueue;
import net.i2p.crypto.DHSessionKeyBuilder;
import net.i2p.data.Base64;
@ -22,8 +22,8 @@ import net.i2p.util.Log;
*
*/
public class OutboundEstablishState {
private RouterContext _context;
private Log _log;
private final RouterContext _context;
private final Log _log;
// SessionRequest message
private byte _sentX[];
private byte _bobIP[];
@ -44,18 +44,18 @@ public class OutboundEstablishState {
private long _sentSignedOnTime;
private Signature _sentSignature;
// general status
private long _establishBegin;
private long _lastReceive;
private final long _establishBegin;
//private long _lastReceive;
private long _lastSend;
private long _nextSend;
private RemoteHostId _remoteHostId;
private RouterIdentity _remotePeer;
private final RouterIdentity _remotePeer;
private SessionKey _introKey;
private final List _queuedMessages;
private final Queue<OutNetMessage> _queuedMessages;
private int _currentState;
private long _introductionNonce;
// intro
private UDPAddress _remoteAddress;
private final UDPAddress _remoteAddress;
private boolean _complete;
/** nothin sent yet */
@ -87,7 +87,7 @@ public class OutboundEstablishState {
_remotePeer = remotePeer;
_introKey = introKey;
_keyBuilder = null;
_queuedMessages = new ArrayList(4);
_queuedMessages = new LinkedBlockingQueue();
_currentState = STATE_UNKNOWN;
_establishBegin = ctx.clock().now();
_remoteAddress = addr;
@ -113,22 +113,21 @@ public class OutboundEstablishState {
public long getIntroNonce() { return _introductionNonce; }
public void addMessage(OutNetMessage msg) {
synchronized (_queuedMessages) {
if (!_queuedMessages.contains(msg))
_queuedMessages.add(msg);
}
// chance of a duplicate here in a race, that's ok
if (!_queuedMessages.contains(msg))
_queuedMessages.offer(msg);
else if (_log.shouldLog(Log.WARN))
_log.warn("attempt to add duplicate msg to queue: " + msg);
}
public OutNetMessage getNextQueuedMessage() {
synchronized (_queuedMessages) {
if (_queuedMessages.size() > 0)
return (OutNetMessage)_queuedMessages.remove(0);
}
return null;
return _queuedMessages.poll();
}
public RouterIdentity getRemoteIdentity() { return _remotePeer; }
public SessionKey getIntroKey() { return _introKey; }
/** called from constructor, no need to synch */
private void prepareSessionRequest() {
_keyBuilder = new DHSessionKeyBuilder();
byte X[] = _keyBuilder.getMyPublicValue().toByteArray();
@ -142,7 +141,7 @@ public class OutboundEstablishState {
System.arraycopy(X, 0, _sentX, _sentX.length - X.length, X.length);
}
public synchronized byte[] getSentX() { return _sentX; }
public byte[] getSentX() { return _sentX; }
public synchronized byte[] getSentIP() { return _bobIP; }
public synchronized int getSentPort() { return _bobPort; }
@ -403,8 +402,8 @@ public class OutboundEstablishState {
}
/** how long have we been trying to establish this session? */
public synchronized long getLifetime() { return _context.clock().now() - _establishBegin; }
public synchronized long getEstablishBeginTime() { return _establishBegin; }
public long getLifetime() { return _context.clock().now() - _establishBegin; }
public long getEstablishBeginTime() { return _establishBegin; }
public synchronized long getNextSendTime() { return _nextSend; }
public synchronized void setNextSendTime(long when) {
_nextSend = when;
@ -422,8 +421,7 @@ public class OutboundEstablishState {
}
private void packetReceived() {
_lastReceive = _context.clock().now();
_nextSend = _lastReceive;
_nextSend = _context.clock().now();
if (_log.shouldLog(Log.DEBUG))
_log.debug("Got a packet, nextSend == now");
}

View File

@ -28,7 +28,7 @@ public class OutboundMessageFragments {
private UDPTransport _transport;
// private ActiveThrottle _throttle; // LINT not used ??
/** peers we are actively sending messages to */
private final List _activePeers;
private final List<PeerState> _activePeers;
private boolean _alive;
/** which peer should we build the next packet out of? */
private int _nextPeer;
@ -207,7 +207,7 @@ public class OutboundMessageFragments {
synchronized (_activePeers) {
peers = new ArrayList(_activePeers.size());
for (int i = 0; i < _activePeers.size(); i++) {
PeerState state = (PeerState)_activePeers.get(i);
PeerState state = _activePeers.get(i);
if (state.getOutboundMessageCount() <= 0) {
_activePeers.remove(i);
i--;
@ -255,7 +255,7 @@ public class OutboundMessageFragments {
if (cycleTime > 1000)
_context.statManager().addRateData("udp.sendCycleTimeSlow", cycleTime, _activePeers.size());
}
peer = (PeerState)_activePeers.get(i);
peer = _activePeers.get(i);
state = peer.allocateSend();
if (state != null) {
_nextPeer = i + 1;
@ -318,12 +318,12 @@ public class OutboundMessageFragments {
return null;
// ok, simplest possible thing is to always tack on the bitfields if
List msgIds = peer.getCurrentFullACKs();
List<Long> msgIds = peer.getCurrentFullACKs();
if (msgIds == null) msgIds = new ArrayList();
List partialACKBitfields = new ArrayList();
List<ACKBitfield> partialACKBitfields = new ArrayList();
peer.fetchPartialACKs(partialACKBitfields);
int piggybackedPartialACK = partialACKBitfields.size();
List remaining = new ArrayList(msgIds);
List<Long> remaining = new ArrayList(msgIds);
int sparseCount = 0;
UDPPacket rv[] = new UDPPacket[fragments]; //sparse
for (int i = 0; i < fragments; i++) {
@ -356,7 +356,7 @@ public class OutboundMessageFragments {
int piggybackedAck = 0;
if (msgIds.size() != remaining.size()) {
for (int i = 0; i < msgIds.size(); i++) {
Long id = (Long)msgIds.get(i);
Long id = msgIds.get(i);
if (!remaining.contains(id)) {
peer.removeACKMessage(id);
piggybackedAck++;

View File

@ -342,8 +342,8 @@ public class OutboundMessageState {
public String toString() {
short sends[] = _fragmentSends;
ByteArray messageBuf = _messageBuf;
StringBuilder buf = new StringBuilder(64);
buf.append("Message ").append(_messageId);
StringBuilder buf = new StringBuilder(256);
buf.append("OB Message ").append(_messageId);
if (sends != null)
buf.append(" with ").append(sends.length).append(" fragments");
if (messageBuf != null)

View File

@ -31,8 +31,7 @@ public class OutboundRefiller implements Runnable {
public void startup() {
_alive = true;
I2PThread t = new I2PThread(this, "UDP outbound refiller");
t.setDaemon(true);
I2PThread t = new I2PThread(this, "UDP outbound refiller", true);
t.start();
}
public void shutdown() { _alive = false; }

View File

@ -2,8 +2,8 @@ package net.i2p.router.transport.udp;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Date;
import java.util.List;
@ -32,6 +32,25 @@ public class PacketBuilder {
private static final ByteCache _hmacCache = ByteCache.getInstance(64, Hash.HASH_LENGTH);
private static final ByteCache _blockCache = ByteCache.getInstance(64, 16);
/**
* For debugging and stats only - does not go out on the wire.
* These are chosen to be higher than the highest I2NP message type,
* as a data packet is set to the underlying I2NP message type.
*/
static final int TYPE_FIRST = 42;
static final int TYPE_ACK = TYPE_FIRST;
static final int TYPE_PUNCH = 43;
static final int TYPE_RESP = 44;
static final int TYPE_INTRO = 45;
static final int TYPE_RREQ = 46;
static final int TYPE_TCB = 47;
static final int TYPE_TBC = 48;
static final int TYPE_TTA = 49;
static final int TYPE_TFA = 50;
static final int TYPE_CONF = 51;
static final int TYPE_SREQ = 52;
static final int TYPE_CREAT = 53;
/** we only talk to people of the right version */
static final int PROTOCOL_VERSION = 0;
@ -58,7 +77,7 @@ public class PacketBuilder {
* The list itself is passed by reference, and if a messageId is
* included, it should be removed from the list.
*/
public UDPPacket buildPacket(OutboundMessageState state, int fragment, PeerState peer, List ackIdsRemaining, List partialACKsRemaining) {
public UDPPacket buildPacket(OutboundMessageState state, int fragment, PeerState peer, List<Long> ackIdsRemaining, List<ACKBitfield> partialACKsRemaining) {
UDPPacket packet = UDPPacket.acquire(_context, false);
StringBuilder msg = null;
@ -92,18 +111,18 @@ public class PacketBuilder {
// is under the MTU, but for now, since the # of packets acked is so few (usually
// just one or two), and since the packets are so small anyway, an additional five
// or ten bytes doesn't hurt.
if ( (ackIdsRemaining != null) && (ackIdsRemaining.size() > 0) )
if ( (ackIdsRemaining != null) && (!ackIdsRemaining.isEmpty()) )
data[off] |= UDPPacket.DATA_FLAG_EXPLICIT_ACK;
if ( (partialACKsRemaining != null) && (partialACKsRemaining.size() > 0) )
if ( (partialACKsRemaining != null) && (!partialACKsRemaining.isEmpty()) )
data[off] |= UDPPacket.DATA_FLAG_ACK_BITFIELDS;
off++;
if ( (ackIdsRemaining != null) && (ackIdsRemaining.size() > 0) ) {
if ( (ackIdsRemaining != null) && (!ackIdsRemaining.isEmpty()) ) {
DataHelper.toLong(data, off, 1, ackIdsRemaining.size());
off++;
for (int i = 0; i < ackIdsRemaining.size(); i++) {
//while (ackIdsRemaining.size() > 0) {
Long ackId = (Long)ackIdsRemaining.get(i);//(Long)ackIdsRemaining.remove(0);
Long ackId = ackIdsRemaining.get(i);//(Long)ackIdsRemaining.remove(0);
DataHelper.toLong(data, off, 4, ackId.longValue());
off += 4;
if (msg != null) // logging it
@ -118,7 +137,7 @@ public class PacketBuilder {
// leave it blank for now, since we could skip some
off++;
for (int i = 0; i < partialACKsRemaining.size(); i++) {
ACKBitfield bitfield = (ACKBitfield)partialACKsRemaining.get(i);
ACKBitfield bitfield = partialACKsRemaining.get(i);
if (bitfield.receivedComplete()) continue;
DataHelper.toLong(data, off, 4, bitfield.getMessageId());
off += 4;
@ -214,15 +233,18 @@ public class PacketBuilder {
// We use this for keepalive purposes.
// It doesn't generate a reply, but that's ok.
public UDPPacket buildPing(PeerState peer) {
return buildACK(peer, new ArrayList(0));
return buildACK(peer, Collections.EMPTY_LIST);
}
private static final int ACK_PRIORITY = 1;
/**
* Build the ack packet. The list need not be sorted into full and partial;
* this method will put all fulls before the partials in the outgoing packet.
*
* @param ackBitfields list of ACKBitfield instances to either fully or partially ACK
*/
public UDPPacket buildACK(PeerState peer, List ackBitfields) {
public UDPPacket buildACK(PeerState peer, List<ACKBitfield> ackBitfields) {
UDPPacket packet = UDPPacket.acquire(_context, false);
StringBuilder msg = null;
@ -263,7 +285,7 @@ public class PacketBuilder {
DataHelper.toLong(data, off, 1, fullACKCount);
off++;
for (int i = 0; i < ackBitfields.size(); i++) {
ACKBitfield bf = (ACKBitfield)ackBitfields.get(i);
ACKBitfield bf = ackBitfields.get(i);
if (bf.receivedComplete()) {
DataHelper.toLong(data, off, 4, bf.getMessageId());
off += 4;
@ -415,7 +437,7 @@ public class PacketBuilder {
authenticate(packet, ourIntroKey, ourIntroKey, iv);
setTo(packet, to, state.getSentPort());
_ivCache.release(iv);
packet.setMessageType(53);
packet.setMessageType(TYPE_CREAT);
return packet;
}
@ -479,7 +501,7 @@ public class PacketBuilder {
packet.getPacket().setLength(off);
authenticate(packet, state.getIntroKey(), state.getIntroKey());
setTo(packet, to, state.getSentPort());
packet.setMessageType(52);
packet.setMessageType(TYPE_SREQ);
return packet;
}
@ -586,7 +608,7 @@ public class PacketBuilder {
}
setTo(packet, to, state.getSentPort());
packet.setMessageType(51);
packet.setMessageType(TYPE_CONF);
return packet;
}
@ -639,7 +661,7 @@ public class PacketBuilder {
packet.getPacket().setLength(off);
authenticate(packet, toCipherKey, toMACKey);
setTo(packet, toIP, toPort);
packet.setMessageType(50);
packet.setMessageType(TYPE_TFA);
return packet;
}
@ -684,7 +706,7 @@ public class PacketBuilder {
packet.getPacket().setLength(off);
authenticate(packet, aliceIntroKey, aliceIntroKey);
setTo(packet, aliceIP, alicePort);
packet.setMessageType(49);
packet.setMessageType(TYPE_TTA);
return packet;
}
@ -731,7 +753,7 @@ public class PacketBuilder {
packet.getPacket().setLength(off);
authenticate(packet, charlieCipherKey, charlieMACKey);
setTo(packet, charlieIP, charliePort);
packet.setMessageType(48);
packet.setMessageType(TYPE_TBC);
return packet;
}
@ -776,7 +798,7 @@ public class PacketBuilder {
packet.getPacket().setLength(off);
authenticate(packet, bobCipherKey, bobMACKey);
setTo(packet, bobIP, bobPort);
packet.setMessageType(47);
packet.setMessageType(TYPE_TCB);
return packet;
}
@ -875,7 +897,7 @@ public class PacketBuilder {
if (encrypt)
authenticate(packet, new SessionKey(introKey), new SessionKey(introKey));
setTo(packet, introHost, introPort);
packet.setMessageType(46);
packet.setMessageType(TYPE_RREQ);
return packet;
}
@ -925,7 +947,7 @@ public class PacketBuilder {
packet.getPacket().setLength(off);
authenticate(packet, charlie.getCurrentCipherKey(), charlie.getCurrentMACKey());
setTo(packet, charlie.getRemoteIPAddress(), charlie.getRemotePort());
packet.setMessageType(45);
packet.setMessageType(TYPE_INTRO);
return packet;
}
@ -986,7 +1008,7 @@ public class PacketBuilder {
packet.getPacket().setLength(off);
authenticate(packet, aliceIntroKey, aliceIntroKey);
setTo(packet, aliceAddr, alice.getPort());
packet.setMessageType(44);
packet.setMessageType(TYPE_RESP);
return packet;
}
@ -1019,7 +1041,7 @@ public class PacketBuilder {
packet.getPacket().setLength(0);
setTo(packet, to, port);
packet.setMessageType(43);
packet.setMessageType(TYPE_PUNCH);
return packet;
}

View File

@ -1,8 +1,6 @@
package net.i2p.router.transport.udp;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import net.i2p.router.Router;
import net.i2p.router.RouterContext;
@ -31,7 +29,7 @@ public class PacketHandler {
private PeerTestManager _testManager;
private IntroductionManager _introManager;
private boolean _keepReading;
private List _handlers;
private final Handler[] _handlers;
private static final int NUM_HANDLERS = 5;
/** let packets be up to 30s slow */
@ -46,9 +44,9 @@ public class PacketHandler {
_inbound = inbound;
_testManager = testManager;
_introManager = introManager;
_handlers = new ArrayList(NUM_HANDLERS);
_handlers = new Handler[NUM_HANDLERS];
for (int i = 0; i < NUM_HANDLERS; i++) {
_handlers.add(new Handler());
_handlers[i] = new Handler();
}
_context.statManager().createRateStat("udp.handleTime", "How long it takes to handle a received packet after its been pulled off the queue", "udp", UDPTransport.RATES);
_context.statManager().createRateStat("udp.queueTime", "How long after a packet is received can we begin handling it", "udp", UDPTransport.RATES);
@ -81,9 +79,8 @@ public class PacketHandler {
public void startup() {
_keepReading = true;
for (int i = 0; i < _handlers.size(); i++) {
I2PThread t = new I2PThread((Handler)_handlers.get(i), "UDP Packet handler " + i + "/" + _handlers.size());
t.setDaemon(true);
for (int i = 0; i < NUM_HANDLERS; i++) {
I2PThread t = new I2PThread(_handlers[i], "UDP Packet handler " + i + '/' + NUM_HANDLERS, true);
t.start();
}
}
@ -94,10 +91,9 @@ public class PacketHandler {
String getHandlerStatus() {
StringBuilder rv = new StringBuilder();
int size = _handlers.size();
rv.append("Handlers: ").append(size);
for (int i = 0; i < size; i++) {
Handler handler = (Handler)_handlers.get(i);
rv.append("Handlers: ").append(NUM_HANDLERS);
for (int i = 0; i < NUM_HANDLERS; i++) {
Handler handler = _handlers[i];
rv.append(" handler ").append(i).append(" state: ").append(handler._state);
}
return rv.toString();

View File

@ -25,8 +25,7 @@ public class PacketPusher implements Runnable {
public void startup() {
_alive = true;
I2PThread t = new I2PThread(this, "UDP packet pusher");
t.setDaemon(true);
I2PThread t = new I2PThread(this, "UDP packet pusher", true);
t.start();
}
@ -39,7 +38,8 @@ public class PacketPusher implements Runnable {
if (packets != null) {
for (int i = 0; i < packets.length; i++) {
if (packets[i] != null) // null for ACKed fragments
_sender.add(packets[i], 0); // 0 does not block //100); // blocks for up to 100ms
//_sender.add(packets[i], 0); // 0 does not block //100); // blocks for up to 100ms
_sender.add(packets[i]);
}
}
} catch (Exception e) {

View File

@ -8,12 +8,16 @@ import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.Queue;
import java.util.concurrent.LinkedBlockingQueue;
import net.i2p.data.Hash;
import net.i2p.data.SessionKey;
import net.i2p.router.OutNetMessage;
import net.i2p.router.RouterContext;
import net.i2p.util.Log;
import net.i2p.util.ConcurrentHashSet;
/**
* Contain all of the state about a UDP connection to a peer.
@ -73,14 +77,22 @@ public class PeerState {
private int _consecutiveFailedSends;
/** when did we last have a failed send (beginning of period) */
// private long _lastFailedSendPeriod;
/** list of messageIds (Long) that we have received but not yet sent */
private final List _currentACKs;
/**
* Set of messageIds (Long) that we have received but not yet sent
* Since even with the smallest MTU we can fit 131 acks in a message,
* we are unlikely to get backed up on acks, so we don't keep
* them in any particular order.
*/
private final Set<Long> _currentACKs;
/**
* list of the most recent messageIds (Long) that we have received and sent
* an ACK for. We keep a few of these around to retransmit with _currentACKs,
* hopefully saving some spurious retransmissions
*/
private final List _currentACKsResend;
private final Queue<Long> _currentACKsResend;
/** when did we last send ACKs to the peer? */
private volatile long _lastACKSend;
/** when did we decide we need to ACK to this peer? */
@ -169,9 +181,9 @@ public class PeerState {
private long _packetsReceived;
/** list of InboundMessageState for active message */
private final Map _inboundMessages;
private final Map<Long, InboundMessageState> _inboundMessages;
/** list of OutboundMessageState */
private final List _outboundMessages;
private final List<OutboundMessageState> _outboundMessages;
/** which outbound message is currently being retransmitted */
private OutboundMessageState _retransmitter;
@ -180,8 +192,10 @@ public class PeerState {
/** have we migrated away from this peer to another newer one? */
private volatile boolean _dead;
/** Make sure a 4229 byte TunnelBuildMessage can be sent in one volley with small MTU */
private static final int MIN_CONCURRENT_MSGS = 8;
/** how many concurrent outbound messages do we allow throws OutboundMessageFragments to send */
private volatile int _concurrentMessagesAllowed = 8;
private volatile int _concurrentMessagesAllowed = MIN_CONCURRENT_MSGS;
/**
* how many outbound messages are currently being transmitted. Not thread safe, as we're not strict
*/
@ -203,6 +217,11 @@ public class PeerState {
* we need 522 fragment bytes to fit it in 2 packets - add 46 for SSU, 20
* for UDP, and 8 for IP, giving us 596. round up to mod 16, giving a total
* of 608
*
* Well, we really need to count the acks as well, especially
* 4 * MAX_RESEND_ACKS which can take up a significant amount of space.
* We reduce the max acks when using the small MTU but it may not be enough...
*
*/
private static final int MIN_MTU = 608;//600; //1500;
private static final int DEFAULT_MTU = MIN_MTU;
@ -234,8 +253,8 @@ public class PeerState {
_currentReceiveSecond = -1;
_lastSendTime = -1;
_lastReceiveTime = -1;
_currentACKs = new ArrayList(8);
_currentACKsResend = new ArrayList(8);
_currentACKs = new ConcurrentHashSet();
_currentACKsResend = new LinkedBlockingQueue();
_currentSecondECNReceived = false;
_remoteWantsPreviousACKs = false;
_sendWindowBytes = DEFAULT_SEND_WINDOW_BYTES;
@ -582,12 +601,9 @@ public class PeerState {
_context.statManager().addRateData("udp.receiveBps", _receiveBps, 0);
}
synchronized (_currentACKs) {
if (_wantACKSendSince <= 0)
_wantACKSendSince = now;
if (!_currentACKs.contains(messageId))
_currentACKs.add(messageId);
}
if (_wantACKSendSince <= 0)
_wantACKSendSince = now;
_currentACKs.add(messageId);
_messagesReceived++;
}
@ -600,7 +616,8 @@ public class PeerState {
* Fetch the internal id (Long) to InboundMessageState for incomplete inbound messages.
* Access to this map must be synchronized explicitly!
*/
public Map getInboundMessages() { return _inboundMessages; }
public Map<Long, InboundMessageState> getInboundMessages() { return _inboundMessages; }
/**
* Expire partially received inbound messages, returning how many are still pending.
* This should probably be fired periodically, in case a peer goes silent and we don't
@ -661,26 +678,36 @@ public class PeerState {
* removeACKMessage(Long) should be called.
*
*/
public List getCurrentFullACKs() {
synchronized (_currentACKs) {
ArrayList rv = new ArrayList(_currentACKs);
public List<Long> getCurrentFullACKs() {
ArrayList<Long> rv = new ArrayList(_currentACKs);
// include some for retransmission
rv.addAll(_currentACKsResend);
return rv;
}
}
public void removeACKMessage(Long messageId) {
synchronized (_currentACKs) {
_currentACKs.remove(messageId);
_currentACKsResend.add(messageId);
_currentACKsResend.offer(messageId);
// trim down the resends
while (_currentACKsResend.size() > MAX_RESEND_ACKS)
_currentACKsResend.remove(0);
}
_lastACKSend = _context.clock().now();
_currentACKsResend.poll();
_lastACKSend = _context.clock().now();
}
/**
* The max number of acks we save to send as duplicates
*/
private static final int MAX_RESEND_ACKS = 16;
/**
* The number of duplicate acks sent in each messge -
* Warning, this directly affects network overhead
* Was 16 but that's too much (64 bytes in a max 608 byte packet,
* and often much smaller)
* @since 0.7.13
*/
private static final int MAX_RESEND_ACKS_LARGE = 9;
/** for small MTU */
private static final int MAX_RESEND_ACKS_SMALL = 4;
/**
* grab a list of ACKBitfield instances, some of which may fully
@ -691,51 +718,75 @@ public class PeerState {
* will be unchanged if there are ACKs remaining.
*
*/
public List retrieveACKBitfields() { return retrieveACKBitfields(true); }
public List retrieveACKBitfields(boolean alwaysIncludeRetransmissions) {
List rv = null;
public List<ACKBitfield> retrieveACKBitfields() { return retrieveACKBitfields(true); }
public List<ACKBitfield> retrieveACKBitfields(boolean alwaysIncludeRetransmissions) {
List<ACKBitfield> rv = new ArrayList(MAX_RESEND_ACKS);
int bytesRemaining = countMaxACKData();
synchronized (_currentACKs) {
rv = new ArrayList(16); //_currentACKs.size());
int oldIndex = _currentACKsResend.size();
while ( (bytesRemaining >= 4) && (_currentACKs.size() > 0) ) {
Long val = (Long)_currentACKs.remove(0);
// Limit the overhead of all the resent acks when using small MTU
// 64 bytes in a 608-byte packet is too much...
// Send a random subset of all the queued resend acks.
int resendSize = _currentACKsResend.size();
int maxResendAcks;
if (bytesRemaining < MIN_MTU)
maxResendAcks = MAX_RESEND_ACKS_SMALL;
else
maxResendAcks = MAX_RESEND_ACKS_LARGE;
List<Long> randomResends = new ArrayList(_currentACKsResend);
// As explained above, we include the acks in any order
// since we are unlikely to get backed up -
// just take them using the Set iterator.
Iterator<Long> iter = _currentACKs.iterator();
while (bytesRemaining >= 4 && iter.hasNext()) {
Long val = iter.next();
iter.remove();
long id = val.longValue();
rv.add(new FullACKBitfield(id));
_currentACKsResend.add(val);
_currentACKsResend.offer(val);
bytesRemaining -= 4;
}
if (_currentACKs.size() <= 0)
if (_currentACKs.isEmpty())
_wantACKSendSince = -1;
if (alwaysIncludeRetransmissions || rv.size() > 0) {
// now repeat by putting in some old ACKs
for (int i = 0; (i < oldIndex) && (bytesRemaining >= 4); i++) {
Long cur = (Long)_currentACKsResend.get(i);
// randomly selected from the Resend queue.
// Maybe we should only resend each one a certain number of times...
int oldIndex = Math.min(resendSize, maxResendAcks);
if (oldIndex > 0 && oldIndex < resendSize)
Collections.shuffle(randomResends, _context.random());
iter = randomResends.iterator();
while (bytesRemaining >= 4 && oldIndex-- > 0 && iter.hasNext()) {
Long cur = iter.next();
long c = cur.longValue();
FullACKBitfield bf = new FullACKBitfield(c);
rv.add(bf);
bytesRemaining -= 4;
// try to avoid duplicates ??
// ACKsResend is not checked for dups at add time
//if (rv.contains(bf)) {
// iter.remove();
//} else {
rv.add(bf);
bytesRemaining -= 4;
//}
}
}
// trim down the resends
while (_currentACKsResend.size() > MAX_RESEND_ACKS)
_currentACKsResend.remove(0);
}
_currentACKsResend.poll();
int partialIncluded = 0;
if (bytesRemaining > 4) {
// ok, there's room to *try* to fit in some partial ACKs, so
// we should try to find some packets to partially ACK
// (preferably the ones which have the most received fragments)
List partial = new ArrayList();
List<ACKBitfield> partial = new ArrayList();
fetchPartialACKs(partial);
// we may not be able to use them all, but lets try...
for (int i = 0; (bytesRemaining > 4) && (i < partial.size()); i++) {
ACKBitfield bitfield = (ACKBitfield)partial.get(i);
ACKBitfield bitfield = partial.get(i);
int bytes = (bitfield.fragmentCount() / 7) + 1;
if (bytesRemaining > bytes + 4) { // msgId + bitfields
if (rv == null)
rv = new ArrayList(partial.size());
rv.add(bitfield);
bytesRemaining -= bytes + 4;
partialIncluded++;
@ -754,7 +805,7 @@ public class PeerState {
return rv;
}
void fetchPartialACKs(List rv) {
void fetchPartialACKs(List<ACKBitfield> rv) {
InboundMessageState states[] = null;
int curState = 0;
synchronized (_inboundMessages) {
@ -762,9 +813,8 @@ public class PeerState {
if (numMessages <= 0)
return;
// todo: make this a list instead of a map, so we can iterate faster w/out the memory overhead?
int remaining = _inboundMessages.size();
for (Iterator iter = _inboundMessages.values().iterator(); remaining > 0; remaining--) {
InboundMessageState state = (InboundMessageState)iter.next();
for (Iterator<InboundMessageState> iter = _inboundMessages.values().iterator(); iter.hasNext(); ) {
InboundMessageState state = iter.next();
if (state.isExpired()) {
//if (_context instanceof RouterContext)
// ((RouterContext)_context).messageHistory().droppedInboundMessage(state.getMessageId(), state.getFrom(), "expired partially received: " + state.toString());
@ -795,6 +845,13 @@ public class PeerState {
public boolean received(int fragmentNum) { return true; }
public boolean receivedComplete() { return true; }
@Override
public int hashCode() { return (int) _msgId; }
@Override
public boolean equals(Object o) {
if (!(o instanceof FullACKBitfield)) return false;
return _msgId == ((ACKBitfield)o).getMessageId();
}
@Override
public String toString() { return "Full ACK of " + _msgId; }
}
@ -825,8 +882,8 @@ public class PeerState {
}
} else {
int allow = _concurrentMessagesAllowed - 1;
if (allow < 8)
allow = 8;
if (allow < MIN_CONCURRENT_MSGS)
allow = MIN_CONCURRENT_MSGS;
_concurrentMessagesAllowed = allow;
}
if (_sendWindowBytes > MAX_SEND_WINDOW_BYTES)
@ -977,10 +1034,10 @@ public class PeerState {
public long getWantedACKSendSince() { return _wantACKSendSince; }
public boolean unsentACKThresholdReached() {
int threshold = countMaxACKData() / 4;
synchronized (_currentACKs) {
return _currentACKs.size() >= threshold;
}
return _currentACKs.size() >= threshold;
}
/** @return MTU - 83 */
private int countMaxACKData() {
return _mtu
- IP_HEADER_SIZE
@ -1013,7 +1070,7 @@ public class PeerState {
state.setPeer(this);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Adding to " + _remotePeer.toBase64() + ": " + state.getMessageId());
List msgs = _outboundMessages;
List<OutboundMessageState> msgs = _outboundMessages;
if (msgs == null) return 0;
int rv = 0;
boolean fail = false;
@ -1068,12 +1125,12 @@ public class PeerState {
public void dropOutbound() {
//if (_dead) return;
_dead = true;
List msgs = _outboundMessages;
List<OutboundMessageState> msgs = _outboundMessages;
//_outboundMessages = null;
_retransmitter = null;
if (msgs != null) {
int sz = 0;
List tempList = null;
List<OutboundMessageState> tempList = null;
synchronized (msgs) {
sz = msgs.size();
if (sz > 0) {
@ -1082,12 +1139,14 @@ public class PeerState {
}
}
for (int i = 0; i < sz; i++)
_transport.failed((OutboundMessageState)tempList.get(i), false);
_transport.failed(tempList.get(i), false);
}
// so the ACKSender will drop this peer from its queue
_wantACKSendSince = -1;
}
public int getOutboundMessageCount() {
List msgs = _outboundMessages;
List<OutboundMessageState> msgs = _outboundMessages;
if (_dead) return 0;
if (msgs != null) {
synchronized (msgs) {
@ -1104,17 +1163,17 @@ public class PeerState {
*/
public int finishMessages() {
int rv = 0;
List msgs = _outboundMessages;
List<OutboundMessageState> msgs = _outboundMessages;
if (_dead) {
dropOutbound();
return 0;
}
List succeeded = null;
List failed = null;
List<OutboundMessageState> succeeded = null;
List<OutboundMessageState> failed = null;
synchronized (msgs) {
int size = msgs.size();
for (int i = 0; i < size; i++) {
OutboundMessageState state = (OutboundMessageState)msgs.get(i);
OutboundMessageState state = msgs.get(i);
if (state.isComplete()) {
msgs.remove(i);
i--;
@ -1147,7 +1206,7 @@ public class PeerState {
}
for (int i = 0; succeeded != null && i < succeeded.size(); i++) {
OutboundMessageState state = (OutboundMessageState)succeeded.get(i);
OutboundMessageState state = succeeded.get(i);
_transport.succeeded(state);
state.releaseResources();
OutNetMessage msg = state.getMessage();
@ -1156,7 +1215,7 @@ public class PeerState {
}
for (int i = 0; failed != null && i < failed.size(); i++) {
OutboundMessageState state = (OutboundMessageState)failed.get(i);
OutboundMessageState state = failed.get(i);
OutNetMessage msg = state.getMessage();
if (msg != null) {
msg.timestamp("expired in the active pool");
@ -1180,12 +1239,12 @@ public class PeerState {
*/
public OutboundMessageState allocateSend() {
int total = 0;
List msgs = _outboundMessages;
List<OutboundMessageState> msgs = _outboundMessages;
if (_dead) return null;
synchronized (msgs) {
int size = msgs.size();
for (int i = 0; i < size; i++) {
OutboundMessageState state = (OutboundMessageState)msgs.get(i);
OutboundMessageState state = msgs.get(i);
if (locked_shouldSend(state)) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Allocate sending to " + _remotePeer.toBase64() + ": " + state.getMessageId());
@ -1217,7 +1276,7 @@ public class PeerState {
public int getNextDelay() {
int rv = -1;
long now = _context.clock().now();
List msgs = _outboundMessages;
List<OutboundMessageState> msgs = _outboundMessages;
if (_dead) return -1;
synchronized (msgs) {
if (_retransmitter != null) {
@ -1229,7 +1288,7 @@ public class PeerState {
}
int size = msgs.size();
for (int i = 0; i < size; i++) {
OutboundMessageState state = (OutboundMessageState)msgs.get(i);
OutboundMessageState state = msgs.get(i);
int delay = (int)(state.getNextSendTime() - now);
if (delay <= 0)
delay = 1;
@ -1346,12 +1405,12 @@ public class PeerState {
public int acked(long messageId) {
OutboundMessageState state = null;
List msgs = _outboundMessages;
List<OutboundMessageState> msgs = _outboundMessages;
if (_dead) return 0;
synchronized (msgs) {
int sz = msgs.size();
for (int i = 0; i < sz; i++) {
state = (OutboundMessageState)msgs.get(i);
state = msgs.get(i);
if (state.getMessageId() == messageId) {
msgs.remove(i);
break;
@ -1407,13 +1466,13 @@ public class PeerState {
return;
}
List msgs = _outboundMessages;
List<OutboundMessageState> msgs = _outboundMessages;
OutboundMessageState state = null;
boolean isComplete = false;
synchronized (msgs) {
for (int i = 0; i < msgs.size(); i++) {
state = (OutboundMessageState)msgs.get(i);
state = msgs.get(i);
if (state.getMessageId() == bitfield.getMessageId()) {
boolean complete = state.acked(bitfield);
if (complete) {
@ -1486,26 +1545,23 @@ public class PeerState {
_sendWindowBytes = oldPeer._sendWindowBytes;
oldPeer._dead = true;
List tmp = new ArrayList();
synchronized (oldPeer._currentACKs) {
tmp.addAll(oldPeer._currentACKs);
oldPeer._currentACKs.clear();
}
List<Long> tmp = new ArrayList();
tmp.addAll(oldPeer._currentACKs);
oldPeer._currentACKs.clear();
if (!_dead) {
synchronized (_currentACKs) { _currentACKs.addAll(tmp); }
_currentACKs.addAll(tmp);
}
tmp.clear();
synchronized (oldPeer._currentACKsResend) {
tmp.addAll(oldPeer._currentACKsResend);
oldPeer._currentACKsResend.clear();
}
tmp.addAll(oldPeer._currentACKsResend);
oldPeer._currentACKsResend.clear();
if (!_dead) {
synchronized (_currentACKsResend) { _currentACKsResend.addAll(tmp); }
_currentACKsResend.addAll(tmp);
}
tmp.clear();
Map msgs = new HashMap();
Map<Long, InboundMessageState> msgs = new HashMap();
synchronized (oldPeer._inboundMessages) {
msgs.putAll(oldPeer._inboundMessages);
oldPeer._inboundMessages.clear();
@ -1515,20 +1571,20 @@ public class PeerState {
}
msgs.clear();
List<OutboundMessageState> tmp2 = new ArrayList();
OutboundMessageState retransmitter = null;
synchronized (oldPeer._outboundMessages) {
tmp.addAll(oldPeer._outboundMessages);
tmp2.addAll(oldPeer._outboundMessages);
oldPeer._outboundMessages.clear();
retransmitter = oldPeer._retransmitter;
oldPeer._retransmitter = null;
}
if (!_dead) {
synchronized (_outboundMessages) {
_outboundMessages.addAll(tmp);
_outboundMessages.addAll(tmp2);
_retransmitter = retransmitter;
}
}
tmp.clear();
}
/*

View File

@ -2,11 +2,10 @@ package net.i2p.router.transport.udp;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Queue;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.LinkedBlockingQueue;
import net.i2p.data.Base64;
import net.i2p.data.DataHelper;
@ -102,7 +101,7 @@ class PeerTestManager {
private PeerTestState _currentTest;
private boolean _currentTestComplete;
/** as Alice */
private List<Long> _recentTests;
private Queue<Long> _recentTests;
/** longest we will keep track of a Charlie nonce for */
private static final int MAX_CHARLIE_LIFETIME = 10*1000;
@ -116,8 +115,8 @@ class PeerTestManager {
_context = context;
_transport = transport;
_log = context.logManager().getLog(PeerTestManager.class);
_activeTests = new HashMap(64);
_recentTests = Collections.synchronizedList(new ArrayList(16));
_activeTests = new ConcurrentHashMap();
_recentTests = new LinkedBlockingQueue();
_packetBuilder = new PacketBuilder(context, transport);
_currentTest = null;
_currentTestComplete = false;
@ -155,8 +154,8 @@ class PeerTestManager {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Running test with bob = " + bobIP + ":" + bobPort + " " + test.getNonce());
while (_recentTests.size() > 16)
_recentTests.remove(0);
_recentTests.add(new Long(test.getNonce()));
_recentTests.poll();
_recentTests.offer(new Long(test.getNonce()));
sendTestToBob();
@ -435,10 +434,7 @@ class PeerTestManager {
testInfo.readIP(testIP, 0);
}
PeerTestState state = null;
synchronized (_activeTests) {
state = (PeerTestState)_activeTests.get(new Long(nonce));
}
PeerTestState state = _activeTests.get(new Long(nonce));
if (state == null) {
if ( (testIP == null) || (testPort <= 0) ) {
@ -542,9 +538,7 @@ class PeerTestManager {
_log.debug("Receive from bob (" + from + ") as charlie, sending back to bob and sending to alice @ " + aliceIP + ":" + alicePort);
if (isNew) {
synchronized (_activeTests) {
_activeTests.put(new Long(nonce), state);
}
_activeTests.put(new Long(nonce), state);
SimpleScheduler.getInstance().addEvent(new RemoveTest(nonce), MAX_CHARLIE_LIFETIME);
}
@ -623,9 +617,7 @@ class PeerTestManager {
}
if (isNew) {
synchronized (_activeTests) {
_activeTests.put(new Long(nonce), state);
}
_activeTests.put(new Long(nonce), state);
SimpleScheduler.getInstance().addEvent(new RemoveTest(nonce), MAX_CHARLIE_LIFETIME);
}
@ -701,9 +693,7 @@ class PeerTestManager {
_nonce = nonce;
}
public void timeReached() {
synchronized (_activeTests) {
_activeTests.remove(new Long(_nonce));
}
}
}
}

View File

@ -33,71 +33,71 @@ class PeerTestState {
public static final short BOB = 2;
public static final short CHARLIE = 3;
public synchronized long getNonce() { return _testNonce; }
public synchronized void setNonce(long nonce) { _testNonce = nonce; }
public long getNonce() { return _testNonce; }
public void setNonce(long nonce) { _testNonce = nonce; }
/** Are we Alice, bob, or Charlie. */
public synchronized short getOurRole() { return _ourRole; }
public synchronized void setOurRole(short role) { _ourRole = role; }
public short getOurRole() { return _ourRole; }
public void setOurRole(short role) { _ourRole = role; }
/**
* If we are Alice, this will contain the IP that Bob says we
* can be reached at - the IP Charlie says we can be reached
* at is _aliceIPFromCharlie
*
*/
public synchronized InetAddress getAliceIP() { return _aliceIP; }
public synchronized void setAliceIP(InetAddress ip) { _aliceIP = ip; }
public synchronized InetAddress getBobIP() { return _bobIP; }
public synchronized void setBobIP(InetAddress ip) { _bobIP = ip; }
public synchronized InetAddress getCharlieIP() { return _charlieIP; }
public synchronized void setCharlieIP(InetAddress ip) { _charlieIP = ip; }
public synchronized InetAddress getAliceIPFromCharlie() { return _aliceIPFromCharlie; }
public synchronized void setAliceIPFromCharlie(InetAddress ip) { _aliceIPFromCharlie = ip; }
public InetAddress getAliceIP() { return _aliceIP; }
public void setAliceIP(InetAddress ip) { _aliceIP = ip; }
public InetAddress getBobIP() { return _bobIP; }
public void setBobIP(InetAddress ip) { _bobIP = ip; }
public InetAddress getCharlieIP() { return _charlieIP; }
public void setCharlieIP(InetAddress ip) { _charlieIP = ip; }
public InetAddress getAliceIPFromCharlie() { return _aliceIPFromCharlie; }
public void setAliceIPFromCharlie(InetAddress ip) { _aliceIPFromCharlie = ip; }
/**
* If we are Alice, this will contain the port that Bob says we
* can be reached at - the port Charlie says we can be reached
* at is _alicePortFromCharlie
*
*/
public synchronized int getAlicePort() { return _alicePort; }
public synchronized void setAlicePort(int alicePort) { _alicePort = alicePort; }
public synchronized int getBobPort() { return _bobPort; }
public synchronized void setBobPort(int bobPort) { _bobPort = bobPort; }
public synchronized int getCharliePort() { return _charliePort; }
public synchronized void setCharliePort(int charliePort) { _charliePort = charliePort; }
public int getAlicePort() { return _alicePort; }
public void setAlicePort(int alicePort) { _alicePort = alicePort; }
public int getBobPort() { return _bobPort; }
public void setBobPort(int bobPort) { _bobPort = bobPort; }
public int getCharliePort() { return _charliePort; }
public void setCharliePort(int charliePort) { _charliePort = charliePort; }
public synchronized int getAlicePortFromCharlie() { return _alicePortFromCharlie; }
public synchronized void setAlicePortFromCharlie(int alicePortFromCharlie) { _alicePortFromCharlie = alicePortFromCharlie; }
public int getAlicePortFromCharlie() { return _alicePortFromCharlie; }
public void setAlicePortFromCharlie(int alicePortFromCharlie) { _alicePortFromCharlie = alicePortFromCharlie; }
public synchronized SessionKey getAliceIntroKey() { return _aliceIntroKey; }
public synchronized void setAliceIntroKey(SessionKey key) { _aliceIntroKey = key; }
public synchronized SessionKey getCharlieIntroKey() { return _charlieIntroKey; }
public synchronized void setCharlieIntroKey(SessionKey key) { _charlieIntroKey = key; }
public synchronized SessionKey getBobCipherKey() { return _bobCipherKey; }
public synchronized void setBobCipherKey(SessionKey key) { _bobCipherKey = key; }
public synchronized SessionKey getBobMACKey() { return _bobMACKey; }
public synchronized void setBobMACKey(SessionKey key) { _bobMACKey = key; }
public SessionKey getAliceIntroKey() { return _aliceIntroKey; }
public void setAliceIntroKey(SessionKey key) { _aliceIntroKey = key; }
public SessionKey getCharlieIntroKey() { return _charlieIntroKey; }
public void setCharlieIntroKey(SessionKey key) { _charlieIntroKey = key; }
public SessionKey getBobCipherKey() { return _bobCipherKey; }
public void setBobCipherKey(SessionKey key) { _bobCipherKey = key; }
public SessionKey getBobMACKey() { return _bobMACKey; }
public void setBobMACKey(SessionKey key) { _bobMACKey = key; }
/** when did this test begin? */
public synchronized long getBeginTime() { return _beginTime; }
public synchronized void setBeginTime(long when) { _beginTime = when; }
public long getBeginTime() { return _beginTime; }
public void setBeginTime(long when) { _beginTime = when; }
/** when did we last send out a packet? */
public synchronized long getLastSendTime() { return _lastSendTime; }
public synchronized void setLastSendTime(long when) { _lastSendTime = when; }
public long getLastSendTime() { return _lastSendTime; }
public void setLastSendTime(long when) { _lastSendTime = when; }
/** when did we last hear from alice? */
public synchronized long getReceiveAliceTime() { return _receiveAliceTime; }
public synchronized void setReceiveAliceTime(long when) { _receiveAliceTime = when; }
public long getReceiveAliceTime() { return _receiveAliceTime; }
public void setReceiveAliceTime(long when) { _receiveAliceTime = when; }
/** when did we last hear from bob? */
public synchronized long getReceiveBobTime() { return _receiveBobTime; }
public synchronized void setReceiveBobTime(long when) { _receiveBobTime = when; }
public long getReceiveBobTime() { return _receiveBobTime; }
public void setReceiveBobTime(long when) { _receiveBobTime = when; }
/** when did we last hear from charlie? */
public synchronized long getReceiveCharlieTime() { return _receiveCharlieTime; }
public synchronized void setReceiveCharlieTime(long when) { _receiveCharlieTime = when; }
public long getReceiveCharlieTime() { return _receiveCharlieTime; }
public void setReceiveCharlieTime(long when) { _receiveCharlieTime = when; }
public int getPacketsRelayed() { return _packetsRelayed; }
public void incrementPacketsRelayed() { ++_packetsRelayed; }
@Override
public synchronized String toString() {
public String toString() {
StringBuilder buf = new StringBuilder(512);
buf.append("Role: ");
if (_ourRole == ALICE) buf.append("Alice");

View File

@ -2,9 +2,9 @@ package net.i2p.router.transport.udp;
import java.net.DatagramPacket;
import java.net.InetAddress;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Queue;
import java.util.concurrent.LinkedBlockingQueue;
import net.i2p.I2PAppContext;
import net.i2p.data.DataHelper;
@ -40,15 +40,17 @@ public class UDPPacket {
private int _validateCount;
// private boolean _isInbound;
private static final List _packetCache;
private static final Queue<UDPPacket> _packetCache;
private static final boolean CACHE = true;
private static final int CACHE_SIZE = 64;
static {
_packetCache = new ArrayList(256);
if (CACHE)
_packetCache = new LinkedBlockingQueue(CACHE_SIZE);
else
_packetCache = null;
_log = I2PAppContext.getGlobalContext().logManager().getLog(UDPPacket.class);
}
private static final boolean CACHE = true; // TODO: support caching to cut churn down a /lot/
private static final int CACHE_SIZE = 64;
static final int MAX_PACKET_SIZE = 2048;
public static final int IV_SIZE = 16;
public static final int MAC_SIZE = 16;
@ -121,7 +123,9 @@ public class UDPPacket {
private int _messageType;
private int _fragmentCount;
/** only for debugging and stats, does not go on the wire */
int getMessageType() { return _messageType; }
/** only for debugging and stats, does not go on the wire */
void setMessageType(int type) { _messageType = type; }
int getFragmentCount() { return _fragmentCount; }
void setFragmentCount(int count) { _fragmentCount = count; }
@ -238,7 +242,7 @@ public class UDPPacket {
@Override
public String toString() {
verifyNotReleased();
StringBuilder buf = new StringBuilder(64);
StringBuilder buf = new StringBuilder(256);
buf.append(_packet.getLength());
buf.append(" byte packet with ");
buf.append(_packet.getAddress().getHostAddress()).append(":");
@ -256,12 +260,7 @@ public class UDPPacket {
public static UDPPacket acquire(I2PAppContext ctx, boolean inbound) {
UDPPacket rv = null;
if (CACHE) {
synchronized (_packetCache) {
if (_packetCache.size() > 0) {
rv = (UDPPacket)_packetCache.remove(0);
}
}
rv = _packetCache.poll();
if (rv != null)
rv.init(ctx, inbound);
}
@ -284,11 +283,7 @@ public class UDPPacket {
//_dataCache.release(_dataBuf);
if (!CACHE)
return;
synchronized (_packetCache) {
if (_packetCache.size() <= CACHE_SIZE) {
_packetCache.add(this);
}
}
_packetCache.offer(this);
}
private void verifyNotReleased() {

View File

@ -402,7 +402,7 @@ public class UDPPacketReader {
@Override
public String toString() {
StringBuilder buf = new StringBuilder(256);
StringBuilder buf = new StringBuilder(512);
long msAgo = _context.clock().now() - readTimestamp()*1000;
buf.append("Data packet sent ").append(msAgo).append("ms ago ");
buf.append("IV ");

View File

@ -2,8 +2,8 @@ package net.i2p.router.transport.udp;
import java.io.IOException;
import java.net.DatagramSocket;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import net.i2p.router.RouterContext;
import net.i2p.router.transport.FIFOBandwidthLimiter;
@ -24,19 +24,20 @@ public class UDPReceiver {
private Log _log;
private DatagramSocket _socket;
private String _name;
private final List _inboundQueue;
private final BlockingQueue<UDPPacket> _inboundQueue;
private boolean _keepRunning;
private Runner _runner;
private UDPTransport _transport;
// private static int __id;
private static int __id;
private int _id;
private static final int TYPE_POISON = -99999;
public UDPReceiver(RouterContext ctx, UDPTransport transport, DatagramSocket socket, String name) {
_context = ctx;
_log = ctx.logManager().getLog(UDPReceiver.class);
_id++;
_id = ++__id;
_name = name;
_inboundQueue = new ArrayList(128);
_inboundQueue = new LinkedBlockingQueue();
_socket = socket;
_transport = transport;
_runner = new Runner();
@ -50,17 +51,22 @@ public class UDPReceiver {
public void startup() {
//adjustDropProbability();
_keepRunning = true;
I2PThread t = new I2PThread(_runner, _name + "." + _id);
t.setDaemon(true);
I2PThread t = new I2PThread(_runner, _name + '.' + _id, true);
t.start();
}
public void shutdown() {
_keepRunning = false;
synchronized (_inboundQueue) {
_inboundQueue.clear();
_inboundQueue.notifyAll();
_inboundQueue.clear();
UDPPacket poison = UDPPacket.acquire(_context, false);
poison.setMessageType(TYPE_POISON);
_inboundQueue.offer(poison);
for (int i = 1; i <= 5 && !_inboundQueue.isEmpty(); i++) {
try {
Thread.sleep(i * 50);
} catch (InterruptedException ie) {}
}
_inboundQueue.clear();
}
/*********
@ -96,6 +102,7 @@ public class UDPReceiver {
private static final int ARTIFICIAL_DELAY_BASE = 0; //600;
**********/
/** @return zero (was queue size) */
private int receive(UDPPacket packet) {
/*********
//adjustDropProbability();
@ -126,7 +133,12 @@ public class UDPReceiver {
return doReceive(packet);
}
/** @return zero (was queue size) */
private final int doReceive(UDPPacket packet) {
if (!_keepRunning)
return 0;
if (_log.shouldLog(Log.INFO))
_log.info("Received: " + packet);
@ -143,26 +155,25 @@ public class UDPReceiver {
boolean rejected = false;
int queueSize = 0;
long headPeriod = 0;
synchronized (_inboundQueue) {
queueSize = _inboundQueue.size();
if (queueSize > 0) {
headPeriod = ((UDPPacket)_inboundQueue.get(0)).getLifetime();
UDPPacket head = _inboundQueue.peek();
if (head != null) {
headPeriod = head.getLifetime();
if (headPeriod > MAX_QUEUE_PERIOD) {
rejected = true;
_inboundQueue.notifyAll();
}
}
if (!rejected) {
_inboundQueue.add(packet);
_inboundQueue.notifyAll();
return queueSize + 1;
_inboundQueue.offer(packet);
//return queueSize + 1;
return 0;
}
}
// rejected
packet.release();
_context.statManager().addRateData("udp.droppedInbound", queueSize, headPeriod);
if (_log.shouldLog(Log.WARN)) {
queueSize = _inboundQueue.size();
StringBuilder msg = new StringBuilder();
msg.append("Dropping inbound packet with ");
msg.append(queueSize);
@ -188,21 +199,15 @@ public class UDPReceiver {
*/
public UDPPacket receiveNext() {
UDPPacket rv = null;
int remaining = 0;
while (_keepRunning) {
synchronized (_inboundQueue) {
if (_inboundQueue.size() <= 0)
try { _inboundQueue.wait(); } catch (InterruptedException ie) {}
if (_inboundQueue.size() > 0) {
rv = (UDPPacket)_inboundQueue.remove(0);
remaining = _inboundQueue.size();
if (remaining > 0)
_inboundQueue.notifyAll();
break;
}
}
//int remaining = 0;
while (_keepRunning && rv == null) {
try {
rv = _inboundQueue.take();
} catch (InterruptedException ie) {}
if (rv != null && rv.getMessageType() == TYPE_POISON)
return null;
}
_context.statManager().addRateData("udp.receiveRemaining", remaining, 0);
//_context.statManager().addRateData("udp.receiveRemaining", remaining, 0);
return rv;
}

View File

@ -3,8 +3,8 @@ package net.i2p.router.transport.udp;
import java.io.IOException;
import java.net.DatagramPacket;
import java.net.DatagramSocket;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import net.i2p.router.RouterContext;
import net.i2p.router.transport.FIFOBandwidthLimiter;
@ -20,16 +20,17 @@ public class UDPSender {
private Log _log;
private DatagramSocket _socket;
private String _name;
private final List _outboundQueue;
private final BlockingQueue<UDPPacket> _outboundQueue;
private boolean _keepRunning;
private Runner _runner;
private static final int TYPE_POISON = 99999;
private static final int MAX_QUEUED = 4;
//private static final int MAX_QUEUED = 4;
public UDPSender(RouterContext ctx, DatagramSocket socket, String name) {
_context = ctx;
_log = ctx.logManager().getLog(UDPSender.class);
_outboundQueue = new ArrayList(128);
_outboundQueue = new LinkedBlockingQueue();
_socket = socket;
_runner = new Runner();
_name = name;
@ -44,49 +45,40 @@ public class UDPSender {
// used in RouterWatchdog
_context.statManager().createRateStat("udp.sendException", "How frequently we fail to send a packet (likely due to a windows exception)", "udp", new long[] { 60*1000, 10*60*1000 });
_context.statManager().createRateStat("udp.sendPacketSize.1", "db store message size", "udp", UDPTransport.RATES);
_context.statManager().createRateStat("udp.sendPacketSize.2", "db lookup message size", "udp", UDPTransport.RATES);
_context.statManager().createRateStat("udp.sendPacketSize.3", "db search reply message size", "udp", UDPTransport.RATES);
_context.statManager().createRateStat("udp.sendPacketSize.6", "tunnel create message size", "udp", UDPTransport.RATES);
_context.statManager().createRateStat("udp.sendPacketSize.7", "tunnel create status message size", "udp", UDPTransport.RATES);
_context.statManager().createRateStat("udp.sendPacketSize.10", "delivery status message size", "udp", UDPTransport.RATES);
_context.statManager().createRateStat("udp.sendPacketSize.11", "garlic message size", "udp", UDPTransport.RATES);
_context.statManager().createRateStat("udp.sendPacketSize.16", "date message size", "udp", UDPTransport.RATES);
_context.statManager().createRateStat("udp.sendPacketSize.18", "tunnel data message size", "udp", UDPTransport.RATES);
_context.statManager().createRateStat("udp.sendPacketSize.19", "tunnel gateway message size", "udp", UDPTransport.RATES);
_context.statManager().createRateStat("udp.sendPacketSize.20", "data message size", "udp", UDPTransport.RATES);
_context.statManager().createRateStat("udp.sendPacketSize.21", "tunnel build", "udp", UDPTransport.RATES);
_context.statManager().createRateStat("udp.sendPacketSize.22", "tunnel build reply", "udp", UDPTransport.RATES);
_context.statManager().createRateStat("udp.sendPacketSize.20", "data message size", "udp", UDPTransport.RATES);
_context.statManager().createRateStat("udp.sendPacketSize.42", "ack-only packet size", "udp", UDPTransport.RATES);
_context.statManager().createRateStat("udp.sendPacketSize.43", "hole punch packet size", "udp", UDPTransport.RATES);
_context.statManager().createRateStat("udp.sendPacketSize.44", "relay response packet size", "udp", UDPTransport.RATES);
_context.statManager().createRateStat("udp.sendPacketSize.45", "relay intro packet size", "udp", UDPTransport.RATES);
_context.statManager().createRateStat("udp.sendPacketSize.46", "relay request packet size", "udp", UDPTransport.RATES);
_context.statManager().createRateStat("udp.sendPacketSize.47", "peer test charlie to bob packet size", "udp", UDPTransport.RATES);
_context.statManager().createRateStat("udp.sendPacketSize.48", "peer test bob to charlie packet size", "udp", UDPTransport.RATES);
_context.statManager().createRateStat("udp.sendPacketSize.49", "peer test to alice packet size", "udp", UDPTransport.RATES);
_context.statManager().createRateStat("udp.sendPacketSize.50", "peer test from alice packet size", "udp", UDPTransport.RATES);
_context.statManager().createRateStat("udp.sendPacketSize.51", "session confirmed packet size", "udp", UDPTransport.RATES);
_context.statManager().createRateStat("udp.sendPacketSize.52", "session request packet size", "udp", UDPTransport.RATES);
_context.statManager().createRateStat("udp.sendPacketSize.53", "session created packet size", "udp", UDPTransport.RATES);
_context.statManager().createRateStat("udp.sendPacketSize." + PacketBuilder.TYPE_ACK, "ack-only packet size", "udp", UDPTransport.RATES);
_context.statManager().createRateStat("udp.sendPacketSize." + PacketBuilder.TYPE_PUNCH, "hole punch packet size", "udp", UDPTransport.RATES);
_context.statManager().createRateStat("udp.sendPacketSize." + PacketBuilder.TYPE_RESP, "relay response packet size", "udp", UDPTransport.RATES);
_context.statManager().createRateStat("udp.sendPacketSize." + PacketBuilder.TYPE_INTRO, "relay intro packet size", "udp", UDPTransport.RATES);
_context.statManager().createRateStat("udp.sendPacketSize." + PacketBuilder.TYPE_RREQ, "relay request packet size", "udp", UDPTransport.RATES);
_context.statManager().createRateStat("udp.sendPacketSize." + PacketBuilder.TYPE_TCB, "peer test charlie to bob packet size", "udp", UDPTransport.RATES);
_context.statManager().createRateStat("udp.sendPacketSize." + PacketBuilder.TYPE_TBC, "peer test bob to charlie packet size", "udp", UDPTransport.RATES);
_context.statManager().createRateStat("udp.sendPacketSize." + PacketBuilder.TYPE_TTA, "peer test to alice packet size", "udp", UDPTransport.RATES);
_context.statManager().createRateStat("udp.sendPacketSize." + PacketBuilder.TYPE_TFA, "peer test from alice packet size", "udp", UDPTransport.RATES);
_context.statManager().createRateStat("udp.sendPacketSize." + PacketBuilder.TYPE_CONF, "session confirmed packet size", "udp", UDPTransport.RATES);
_context.statManager().createRateStat("udp.sendPacketSize." + PacketBuilder.TYPE_SREQ, "session request packet size", "udp", UDPTransport.RATES);
_context.statManager().createRateStat("udp.sendPacketSize." + PacketBuilder.TYPE_CREAT, "session created packet size", "udp", UDPTransport.RATES);
}
public void startup() {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Starting the runner: " + _name);
_keepRunning = true;
I2PThread t = new I2PThread(_runner, _name);
t.setDaemon(true);
I2PThread t = new I2PThread(_runner, _name, true);
t.start();
}
public void shutdown() {
_keepRunning = false;
synchronized (_outboundQueue) {
_outboundQueue.clear();
_outboundQueue.notifyAll();
_outboundQueue.clear();
UDPPacket poison = UDPPacket.acquire(_context, false);
poison.setMessageType(TYPE_POISON);
_outboundQueue.offer(poison);
for (int i = 1; i <= 5 && !_outboundQueue.isEmpty(); i++) {
try {
Thread.sleep(i * 50);
} catch (InterruptedException ie) {}
}
_outboundQueue.clear();
}
public DatagramSocket updateListeningPort(DatagramSocket socket, int newPort) {
@ -98,10 +90,12 @@ public class UDPSender {
* Add the packet to the queue. This may block until there is space
* available, if requested, otherwise it returns immediately
*
* @param blockTime how long to block
* @param blockTime how long to block IGNORED
* @return number of packets queued
* @deprecated use add(packet)
*/
public int add(UDPPacket packet, int blockTime) {
/********
//long expiration = _context.clock().now() + blockTime;
int remaining = -1;
long lifetime = -1;
@ -124,13 +118,12 @@ public class UDPSender {
}
}
//if (true || (_outboundQueue.size() < MAX_QUEUED)) {
if (true || (_outboundQueue.size() < MAX_QUEUED)) {
lifetime = packet.getLifetime();
_outboundQueue.add(packet);
added = true;
remaining = _outboundQueue.size();
_outboundQueue.notifyAll();
/*****
} else {
long remainingTime = expiration - _context.clock().now();
if (remainingTime > 0) {
@ -141,7 +134,6 @@ public class UDPSender {
}
lifetime = packet.getLifetime();
}
*****/
}
//} catch (InterruptedException ie) {}
}
@ -153,42 +145,26 @@ public class UDPSender {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Added the packet onto the queue with " + remaining + " remaining and a lifetime of " + lifetime);
return remaining;
********/
return add(packet);
}
private static final int MAX_HEAD_LIFETIME = 1000;
/**
*
* @return number of packets in the queue
* Put it on the queue
* @return ZERO (used to be number of packets in the queue)
*/
public int add(UDPPacket packet) {
if (packet == null) return 0;
if (packet == null || !_keepRunning) return 0;
int size = 0;
long lifetime = -1;
int removed = 0;
synchronized (_outboundQueue) {
lifetime = packet.getLifetime();
UDPPacket head = null;
if (_outboundQueue.size() > 0) {
head = (UDPPacket)_outboundQueue.get(0);
while (head.getLifetime() > MAX_HEAD_LIFETIME) {
_outboundQueue.remove(0);
removed++;
if (_outboundQueue.size() > 0)
head = (UDPPacket)_outboundQueue.get(0);
else
break;
}
}
_outboundQueue.add(packet);
_outboundQueue.offer(packet);
//size = _outboundQueue.size();
//_context.statManager().addRateData("udp.sendQueueSize", size, lifetime);
if (_log.shouldLog(Log.DEBUG)) {
size = _outboundQueue.size();
_outboundQueue.notifyAll();
_log.debug("Added the packet onto the queue with " + size + " remaining and a lifetime of " + packet.getLifetime());
}
_context.statManager().addRateData("udp.sendQueueSize", size, lifetime);
if (removed > 0)
_context.statManager().addRateData("udp.sendQueueTrimmed", removed, size);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Added the packet onto the queue with " + size + " remaining and a lifetime of " + lifetime);
return size;
}
@ -227,7 +203,8 @@ public class UDPSender {
//_log.debug("Sending packet: (size="+size + "/"+size2 +")\nraw: " + Base64.encode(packet.getPacket().getData(), 0, size));
}
_context.statManager().addRateData("udp.sendPacketSize." + packet.getMessageType(), size, packet.getFragmentCount());
if (packet.getMessageType() >= PacketBuilder.TYPE_FIRST)
_context.statManager().addRateData("udp.sendPacketSize." + packet.getMessageType(), size, packet.getFragmentCount());
//packet.getPacket().setLength(size);
try {
@ -267,20 +244,17 @@ public class UDPSender {
_log.debug("Stop sending...");
}
/** @return next packet in queue. Will discard any packet older than MAX_HEAD_LIFETIME */
private UDPPacket getNextPacket() {
UDPPacket packet = null;
while ( (_keepRunning) && (packet == null) ) {
while ( (_keepRunning) && (packet == null || packet.getLifetime() > MAX_HEAD_LIFETIME) ) {
if (packet != null)
_context.statManager().addRateData("udp.sendQueueTrimmed", 1, 0);
try {
synchronized (_outboundQueue) {
if (_outboundQueue.size() <= 0) {
_outboundQueue.notifyAll();
_outboundQueue.wait();
} else {
packet = (UDPPacket)_outboundQueue.remove(0);
_outboundQueue.notifyAll();
}
}
packet = _outboundQueue.take();
} catch (InterruptedException ie) {}
if (packet != null && packet.getMessageType() == TYPE_POISON)
return null;
}
return packet;
}

View File

@ -13,8 +13,10 @@ import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.TreeSet;
import java.util.Vector;
import java.util.concurrent.ConcurrentHashMap;
import net.i2p.data.DataHelper;
import net.i2p.data.Hash;
@ -31,6 +33,7 @@ import net.i2p.router.RouterContext;
import net.i2p.router.transport.Transport;
import net.i2p.router.transport.TransportBid;
import net.i2p.router.transport.TransportImpl;
import net.i2p.util.ConcurrentHashSet;
import net.i2p.util.Log;
import net.i2p.util.SimpleScheduler;
import net.i2p.util.SimpleTimer;
@ -75,8 +78,11 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
/** introduction key */
private SessionKey _introKey;
/** list of RemoteHostId for peers whose packets we want to drop outright */
private final List<RemoteHostId> _dropList;
/**
* List of RemoteHostId for peers whose packets we want to drop outright
* This is only for old network IDs (pre-0.6.1.10), so it isn't really used now.
*/
private final Set<RemoteHostId> _dropList;
private int _expireTimeout;
@ -167,9 +173,9 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
super(ctx);
_context = ctx;
_log = ctx.logManager().getLog(UDPTransport.class);
_peersByIdent = new HashMap(128);
_peersByRemoteHost = new HashMap(128);
_dropList = new ArrayList(256);
_peersByIdent = new ConcurrentHashMap(128);
_peersByRemoteHost = new ConcurrentHashMap(128);
_dropList = new ConcurrentHashSet(2);
_endpoint = null;
// See comments in DQAT.java
@ -608,9 +614,7 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
* if no state exists
*/
PeerState getPeerState(RemoteHostId hostInfo) {
synchronized (_peersByRemoteHost) {
return _peersByRemoteHost.get(hostInfo);
}
}
/**
@ -618,9 +622,7 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
* if no state exists
*/
public PeerState getPeerState(Hash remotePeer) {
synchronized (_peersByIdent) {
return _peersByIdent.get(remotePeer);
}
}
/**
@ -697,14 +699,12 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
long oldEstablishedOn = -1;
PeerState oldPeer = null;
if (remotePeer != null) {
synchronized (_peersByIdent) {
oldPeer = _peersByIdent.put(remotePeer, peer);
if ( (oldPeer != null) && (oldPeer != peer) ) {
// transfer over the old state/inbound message fragments/etc
peer.loadFrom(oldPeer);
oldEstablishedOn = oldPeer.getKeyEstablishedTime();
}
}
}
if (oldPeer != null) {
@ -717,13 +717,11 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
RemoteHostId remoteId = peer.getRemoteHostId();
if (remoteId == null) return false;
synchronized (_peersByRemoteHost) {
oldPeer = _peersByRemoteHost.put(remoteId, peer);
if ( (oldPeer != null) && (oldPeer != peer) ) {
// transfer over the old state/inbound message fragments/etc
peer.loadFrom(oldPeer);
oldEstablishedOn = oldPeer.getKeyEstablishedTime();
}
}
if (oldPeer != null) {
@ -773,6 +771,8 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
DatabaseStoreMessage dsm = (DatabaseStoreMessage)inMsg;
if ( (dsm.getRouterInfo() != null) &&
(dsm.getRouterInfo().getNetworkId() != Router.NETWORK_ID) ) {
// this is pre-0.6.1.10, so it isn't going to happen any more
/*
if (remoteIdentHash != null) {
_context.shitlist().shitlistRouter(remoteIdentHash, "Sent us a peer from the wrong network");
@ -792,21 +792,9 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
PeerState peer = getPeerState(peerHash);
if (peer != null) {
RemoteHostId remote = peer.getRemoteHostId();
boolean added = false;
int droplistSize = 0;
synchronized (_dropList) {
if (!_dropList.contains(remote)) {
while (_dropList.size() > MAX_DROPLIST_SIZE)
_dropList.remove(0);
_dropList.add(remote);
added = true;
}
droplistSize = _dropList.size();
}
if (added) {
_context.statManager().addRateData("udp.dropPeerDroplist", droplistSize, 0);
SimpleScheduler.getInstance().addEvent(new RemoveDropList(remote), DROPLIST_PERIOD);
}
_dropList.add(remote);
_context.statManager().addRateData("udp.dropPeerDroplist", 1, 0);
SimpleScheduler.getInstance().addEvent(new RemoveDropList(remote), DROPLIST_PERIOD);
}
markUnreachable(peerHash);
_context.shitlist().shitlistRouter(peerHash, "Part of the wrong network, version = " + dsm.getRouterInfo().getOption("router.version"));
@ -838,13 +826,11 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
private RemoteHostId _peer;
public RemoveDropList(RemoteHostId peer) { _peer = peer; }
public void timeReached() {
synchronized (_dropList) {
_dropList.remove(_peer);
}
_dropList.remove(_peer);
}
}
boolean isInDropList(RemoteHostId peer) { synchronized (_dropList) { return _dropList.contains(peer); } }
boolean isInDropList(RemoteHostId peer) { return _dropList.contains(peer); }
void dropPeer(Hash peer, boolean shouldShitlist, String why) {
PeerState state = getPeerState(peer);
@ -916,16 +902,12 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
}
long now = _context.clock().now();
_context.statManager().addRateData("udp.droppedPeer", now - peer.getLastReceiveTime(), now - peer.getKeyEstablishedTime());
synchronized (_peersByIdent) {
altByIdent = _peersByIdent.remove(peer.getRemotePeer());
}
altByIdent = _peersByIdent.remove(peer.getRemotePeer());
}
RemoteHostId remoteId = peer.getRemoteHostId();
if (remoteId != null) {
synchronized (_peersByRemoteHost) {
altByHost = _peersByRemoteHost.remove(remoteId);
}
}
// unchoke 'em, but just because we'll never talk again...
@ -1087,10 +1069,7 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
// also introduce us, also bid aggressively so we are preferred over NTCP.
// (Otherwise we only talk UDP to those that are firewalled, and we will
// never get any introducers)
int count;
synchronized (_peersByIdent) {
count = _peersByIdent.size();
}
int count = _peersByIdent.size();
if (alwaysPreferUDP() || count < MIN_PEERS ||
(introducersRequired() && _introManager.introducerCount() < MIN_INTRODUCER_POOL))
return _cachedBid[SLOW_PREFERRED_BID];
@ -1474,9 +1453,7 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
@Override
public int countPeers() {
synchronized (_peersByIdent) {
return _peersByIdent.size();
}
}
@Override
@ -1484,7 +1461,6 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
long now = _context.clock().now();
int active = 0;
int inactive = 0;
synchronized (_peersByIdent) {
for (Iterator<PeerState> iter = _peersByIdent.values().iterator(); iter.hasNext(); ) {
PeerState peer = iter.next();
if (now-peer.getLastReceiveTime() > 5*60*1000)
@ -1492,7 +1468,6 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
else
active++;
}
}
return active;
}
@ -1501,7 +1476,6 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
long now = _context.clock().now();
int active = 0;
int inactive = 0;
synchronized (_peersByIdent) {
for (Iterator<PeerState> iter = _peersByIdent.values().iterator(); iter.hasNext(); ) {
PeerState peer = iter.next();
if (now-peer.getLastSendFullyTime() > 1*60*1000)
@ -1509,7 +1483,6 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
else
active++;
}
}
return active;
}
@ -1519,9 +1492,7 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
}
public boolean allowConnection() {
synchronized (_peersByIdent) {
return _peersByIdent.size() < getMaxConnections();
}
}
/**
@ -1534,9 +1505,7 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
Vector<Long> skews = new Vector();
Vector<PeerState> peers = new Vector();
synchronized (_peersByIdent) {
peers.addAll(_peersByIdent.values());
}
peers.addAll(_peersByIdent.values());
// If our clock is way off, we may not have many (or any) successful connections,
// so try hard in that case to return good data
@ -1557,15 +1526,13 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
/** **internal, do not use** */
public static final UDPTransport _instance() { return __instance; }
/** **internal, do not use** return the peers (Hash) of active peers. */
public List _getActivePeers() {
List peers = new ArrayList(128);
synchronized (_peersByIdent) {
peers.addAll(_peersByIdent.keySet());
}
public List<Hash> _getActivePeers() {
List<Hash> peers = new ArrayList(128);
peers.addAll(_peersByIdent.keySet());
long now = _context.clock().now();
for (Iterator iter = peers.iterator(); iter.hasNext(); ) {
Hash peer = (Hash)iter.next();
for (Iterator<Hash> iter = peers.iterator(); iter.hasNext(); ) {
Hash peer = iter.next();
PeerState state = getPeerState(peer);
if (now-state.getLastReceiveTime() > 5*60*1000)
iter.remove(); // don't include old peers
@ -1886,9 +1853,7 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
@Override
public void renderStatusHTML(Writer out, String urlBase, int sortFlags) throws IOException {
TreeSet<PeerState> peers = new TreeSet(getComparator(sortFlags));
synchronized (_peersByIdent) {
peers.addAll(_peersByIdent.values());
}
peers.addAll(_peersByIdent.values());
long offsetTotal = 0;
int bpsIn = 0;
@ -2205,12 +2170,12 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
}
private class ExpirePeerEvent implements SimpleTimer.TimedEvent {
private final List _expirePeers;
private List _expireBuffer;
private final Set<PeerState> _expirePeers;
private final List<PeerState> _expireBuffer;
private boolean _alive;
public ExpirePeerEvent() {
_expirePeers = new ArrayList(128);
_expireBuffer = new ArrayList(128);
_expirePeers = new ConcurrentHashSet(128);
_expireBuffer = new ArrayList();
}
public void timeReached() {
// Increase allowed idle time if we are well under allowed connections, otherwise decrease
@ -2222,10 +2187,9 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
long longInactivityCutoff = _context.clock().now() - EXPIRE_TIMEOUT;
long pingCutoff = _context.clock().now() - (2 * 60*60*1000);
_expireBuffer.clear();
synchronized (_expirePeers) {
int sz = _expirePeers.size();
for (int i = 0; i < sz; i++) {
PeerState peer = (PeerState)_expirePeers.get(i);
for (Iterator<PeerState> iter = _expirePeers.iterator(); iter.hasNext(); ) {
PeerState peer = iter.next();
long inactivityCutoff;
// if we offered to introduce them, or we used them as introducer in last 2 hours
if (peer.getWeRelayToThemAs() > 0 || peer.getIntroducerTime() > pingCutoff)
@ -2234,28 +2198,22 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
inactivityCutoff = shortInactivityCutoff;
if ( (peer.getLastReceiveTime() < inactivityCutoff) && (peer.getLastSendTime() < inactivityCutoff) ) {
_expireBuffer.add(peer);
_expirePeers.remove(i);
i--;
sz--;
iter.remove();
}
}
}
for (int i = 0; i < _expireBuffer.size(); i++)
dropPeer((PeerState)_expireBuffer.get(i), false, "idle too long");
dropPeer(_expireBuffer.get(i), false, "idle too long");
_expireBuffer.clear();
if (_alive)
SimpleTimer.getInstance().addEvent(ExpirePeerEvent.this, 30*1000);
}
public void add(PeerState peer) {
synchronized (_expirePeers) {
_expirePeers.add(peer);
}
}
public void remove(PeerState peer) {
synchronized (_expirePeers) {
_expirePeers.remove(peer);
}
}
public void setIsAlive(boolean isAlive) {
_alive = isAlive;
@ -2263,9 +2221,7 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
SimpleTimer.getInstance().addEvent(ExpirePeerEvent.this, 30*1000);
} else {
SimpleTimer.getInstance().removeEvent(ExpirePeerEvent.this);
synchronized (_expirePeers) {
_expirePeers.clear();
}
_expirePeers.clear();
}
}
}
@ -2348,10 +2304,7 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
}
PeerState pickTestPeer(RemoteHostId dontInclude) {
List<PeerState> peers = null;
synchronized (_peersByIdent) {
peers = new ArrayList(_peersByIdent.values());
}
List<PeerState> peers = new ArrayList(_peersByIdent.values());
Collections.shuffle(peers, _context.random());
for (int i = 0; i < peers.size(); i++) {
PeerState peer = peers.get(i);

View File

@ -101,7 +101,7 @@ public class TrivialPreprocessor implements TunnelGateway.QueuePreprocessor {
* @deprecated unused except by above
*/
private byte[][] preprocess(TunnelGateway.Pending msg) {
List fragments = new ArrayList(1);
List<byte[]> fragments = new ArrayList(1);
while (msg.getOffset() < msg.getData().length) {
fragments.add(preprocessFragment(msg));
@ -111,7 +111,7 @@ public class TrivialPreprocessor implements TunnelGateway.QueuePreprocessor {
byte rv[][] = new byte[fragments.size()][];
for (int i = 0; i < fragments.size(); i++)
rv[i] = (byte[])fragments.get(i);
rv[i] = fragments.get(i);
return rv;
}

View File

@ -386,7 +386,7 @@ public class TunnelDispatcher implements Service {
*
*/
public void dispatch(TunnelGatewayMessage msg) {
long before = System.currentTimeMillis();
long before = _context.clock().now();
TunnelGateway gw = _inboundGateways.get(msg.getTunnelId());
if (gw != null) {
if (_log.shouldLog(Log.DEBUG))
@ -423,7 +423,7 @@ public class TunnelDispatcher implements Service {
+ " existing = " + _inboundGateways.size(), new Exception("source"));
}
long dispatchTime = System.currentTimeMillis() - before;
long dispatchTime = _context.clock().now() - before;
if (_log.shouldLog(Log.DEBUG))
_log.debug("Dispatch in gw time: " + dispatchTime + " gateway? " + gw);
@ -726,6 +726,7 @@ public class TunnelDispatcher implements Service {
startup();
}
/** @deprecated moved to router console */
public void renderStatusHTML(Writer out) throws IOException {}
private class LeaveTunnel extends JobImpl {

View File

@ -2,6 +2,8 @@ package net.i2p.router.tunnel;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import net.i2p.router.RouterContext;
import net.i2p.util.I2PThread;
@ -12,27 +14,36 @@ import net.i2p.util.I2PThread;
*/
public class TunnelGatewayPumper implements Runnable {
private RouterContext _context;
private final List<PumpedTunnelGateway> _wantsPumping;
private final BlockingQueue<PumpedTunnelGateway> _wantsPumping;
private boolean _stop;
private static final int PUMPERS = 4;
/** Creates a new instance of TunnelGatewayPumper */
public TunnelGatewayPumper(RouterContext ctx) {
_context = ctx;
_wantsPumping = new ArrayList(64);
_wantsPumping = new LinkedBlockingQueue();
_stop = false;
for (int i = 0; i < 4; i++)
new I2PThread(this, "GW pumper " + i, true).start();
for (int i = 0; i < PUMPERS; i++)
new I2PThread(this, "Tunnel GW pumper " + i + '/' + PUMPERS, true).start();
}
public void stopPumping() {
_stop=true;
synchronized (_wantsPumping) { _wantsPumping.notifyAll(); }
_wantsPumping.clear();
PumpedTunnelGateway poison = new PoisonPTG(_context);
for (int i = 0; i < PUMPERS; i++)
_wantsPumping.offer(poison);
for (int i = 1; i <= 5 && !_wantsPumping.isEmpty(); i++) {
try {
Thread.sleep(i * 50);
} catch (InterruptedException ie) {}
}
_wantsPumping.clear();
}
public void wantsPumping(PumpedTunnelGateway gw) {
synchronized (_wantsPumping) {
_wantsPumping.add(gw);
_wantsPumping.notify();
}
if (!_stop)
_wantsPumping.offer(gw);
}
public void run() {
@ -40,17 +51,25 @@ public class TunnelGatewayPumper implements Runnable {
List<TunnelGateway.Pending> queueBuf = new ArrayList(32);
while (!_stop) {
try {
synchronized (_wantsPumping) {
if (_wantsPumping.size() > 0)
gw = _wantsPumping.remove(0);
else
_wantsPumping.wait();
}
gw = _wantsPumping.take();
} catch (InterruptedException ie) {}
if (gw != null) {
if (gw.getMessagesSent() == POISON_PTG)
break;
gw.pump(queueBuf);
gw = null;
}
}
}
private static final int POISON_PTG = -99999;
private static class PoisonPTG extends PumpedTunnelGateway {
public PoisonPTG(RouterContext ctx) {
super(ctx, null, null, null, null);
}
@Override
public int getMessagesSent() { return POISON_PTG; }
}
}

View File

@ -176,7 +176,7 @@ class BuildRequestor {
private static final String MIN_VARIABLE_VERSION = "0.7.12";
/** change this to true in 0.7.13 if testing goes well */
private static final boolean SEND_VARIABLE = false;
private static final boolean SEND_VARIABLE = true;
/** 5 (~2600 bytes) fits nicely in 3 tunnel messages */
private static final int SHORT_RECORDS = 5;
private static final int LONG_RECORDS = TunnelBuildMessage.MAX_RECORD_COUNT;

View File

@ -180,7 +180,8 @@ public abstract class TunnelPeerSelector {
peers.addAll(ctx.tunnelManager().selectPeersInTooManyTunnels());
// if (false && filterUnreachable(ctx, isInbound, isExploratory)) {
if (filterUnreachable(ctx, isInbound, isExploratory)) {
List caps = ctx.peerManager().getPeersByCapability(Router.CAPABILITY_UNREACHABLE);
// This is the only use for getPeersByCapability? And the whole set of datastructures in PeerManager?
List<Hash> caps = ctx.peerManager().getPeersByCapability(Router.CAPABILITY_UNREACHABLE);
if (caps != null)
peers.addAll(caps);
caps = ctx.profileOrganizer().selectPeersLocallyUnreachable();