merge of '0d58ec9e2b160029e92a584b0c707ffdf4f25c7e'

and 'd4e270a4df0c8134d7bac1585e30ef8ddef37f85'
This commit is contained in:
zzz
2009-11-04 16:06:46 +00:00
25 changed files with 320 additions and 171 deletions

View File

@ -34,7 +34,7 @@ class RouterThrottleImpl implements RouterThrottle {
private static final int DEFAULT_MAX_TUNNELS = 2500;
private static final String PROP_DEFAULT_KBPS_THROTTLE = "router.defaultKBpsThrottle";
private static final String PROP_MAX_PROCESSINGTIME = "router.defaultProcessingTimeThrottle";
private static final int DEFAULT_MAX_PROCESSINGTIME = 1500;
private static final int DEFAULT_MAX_PROCESSINGTIME = 1250;
/** tunnel acceptance */
public static final int TUNNEL_ACCEPT = 0;
@ -137,7 +137,7 @@ class RouterThrottleImpl implements RouterThrottle {
_log.warn("Refusing tunnel request due to sendProcessingTime of " + avgSendProcessingTime
+ " ms over the last two minutes, which is too much.");
}
setTunnelStatus("Rejecting tunnels: congestion");
setTunnelStatus("Rejecting tunnels: High message delay");
return TunnelHistory.TUNNEL_REJECT_BANDWIDTH;
}
}

View File

@ -659,7 +659,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
String validate(Hash key, RouterInfo routerInfo) throws IllegalArgumentException {
long now = _context.clock().now();
boolean upLongEnough = _context.router().getUptime() > 60*60*1000;
// Once we're over 150 routers, reduce the expiration time down from the default,
// Once we're over 120 routers, reduce the expiration time down from the default,
// as a crude way of limiting memory usage.
// i.e. at 300 routers the expiration time will be about half the default, etc.
// And if we're floodfill, we can keep the expiration really short, since
@ -673,7 +673,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
// _kb.size() includes leasesets but that's ok
adjustedExpiration = Math.min(ROUTER_INFO_EXPIRATION,
ROUTER_INFO_EXPIRATION_MIN +
((ROUTER_INFO_EXPIRATION - ROUTER_INFO_EXPIRATION_MIN) * 150 / (_kb.size() + 1)));
((ROUTER_INFO_EXPIRATION - ROUTER_INFO_EXPIRATION_MIN) * 120 / (_kb.size() + 1)));
if (!key.equals(routerInfo.getIdentity().getHash())) {
if (_log.shouldLog(Log.WARN))
@ -891,12 +891,13 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
public int getPeerTimeout(Hash peer) {
PeerProfile prof = _context.profileOrganizer().getProfile(peer);
double responseTime = MAX_PER_PEER_TIMEOUT;
if (prof != null)
if (prof != null && prof.getIsExpandedDB()) {
responseTime = prof.getDbResponseTime().getLifetimeAverageValue();
if (responseTime < MIN_PER_PEER_TIMEOUT)
responseTime = MIN_PER_PEER_TIMEOUT;
else if (responseTime > MAX_PER_PEER_TIMEOUT)
responseTime = MAX_PER_PEER_TIMEOUT;
if (responseTime < MIN_PER_PEER_TIMEOUT)
responseTime = MIN_PER_PEER_TIMEOUT;
else if (responseTime > MAX_PER_PEER_TIMEOUT)
responseTime = MAX_PER_PEER_TIMEOUT;
}
return 4 * (int)responseTime; // give it up to 4x the average response time
}

View File

@ -174,11 +174,13 @@ class StoreJob extends JobImpl {
_state.addSkipped(peer);
} else {
int peerTimeout = _facade.getPeerTimeout(peer);
PeerProfile prof = getContext().profileOrganizer().getProfile(peer);
if (prof != null) {
RateStat failing = prof.getDBHistory().getFailedLookupRate();
Rate failed = failing.getRate(60*60*1000);
}
//PeerProfile prof = getContext().profileOrganizer().getProfile(peer);
//if (prof != null && prof.getIsExpandedDB()) {
// RateStat failing = prof.getDBHistory().getFailedLookupRate();
// Rate failed = failing.getRate(60*60*1000);
//}
//long failedCount = failed.getCurrentEventCount()+failed.getLastEventCount();
//if (failedCount > 10) {
// _state.addSkipped(peer);

View File

@ -19,12 +19,15 @@ public class IntegrationCalculator extends Calculator {
@Override
public double calc(PeerProfile profile) {
// give more weight to recent counts
long val = profile.getDbIntroduction().getRate(24*60*60*1000l).getCurrentEventCount();
val += 2 * 4 * profile.getDbIntroduction().getRate(6*60*60*1000l).getLastEventCount();
val += 3 * 4 * profile.getDbIntroduction().getRate(6*60*60*1000l).getCurrentEventCount();
val += 4 * 24 * profile.getDbIntroduction().getRate(60*60*1000l).getCurrentEventCount();
val /= 10;
long val = 0;
if (profile.getIsExpandedDB()) {
// give more weight to recent counts
val = profile.getDbIntroduction().getRate(24*60*60*1000l).getCurrentEventCount();
val += 2 * 4 * profile.getDbIntroduction().getRate(6*60*60*1000l).getLastEventCount();
val += 3 * 4 * profile.getDbIntroduction().getRate(6*60*60*1000l).getCurrentEventCount();
val += 4 * 24 * profile.getDbIntroduction().getRate(60*60*1000l).getCurrentEventCount();
val /= 10;
}
val += profile.getIntegrationBonus();
return val;
}

View File

@ -36,8 +36,8 @@ public class PeerProfile {
private long _lastHeardFrom;
private double _tunnelTestResponseTimeAvg;
// periodic rates
private RateStat _sendSuccessSize = null;
private RateStat _receiveSize = null;
//private RateStat _sendSuccessSize = null;
//private RateStat _receiveSize = null;
private RateStat _dbResponseTime = null;
private RateStat _tunnelCreateResponseTime = null;
private RateStat _tunnelTestResponseTime = null;
@ -56,6 +56,7 @@ public class PeerProfile {
private DBHistory _dbHistory;
// does this peer profile contain expanded data, or just the basics?
private boolean _expanded;
private boolean _expandedDB;
private int _consecutiveShitlists;
public PeerProfile(RouterContext context, Hash peer) {
@ -72,6 +73,8 @@ public class PeerProfile {
_consecutiveShitlists = 0;
_tunnelTestResponseTimeAvg = 0.0d;
_peer = peer;
// this is always true, and there are several places in the router that will NPE
// if it is false, so all need to be fixed before we can have non-expanded profiles
if (expand)
expandProfile();
}
@ -87,6 +90,7 @@ public class PeerProfile {
*
*/
public boolean getIsExpanded() { return _expanded; }
public boolean getIsExpandedDB() { return _expandedDB; }
public int incrementShitlists() { return _consecutiveShitlists++; }
public void unshitlist() { _consecutiveShitlists = 0; }
@ -107,18 +111,25 @@ public class PeerProfile {
*
* Note: this appears to be the only use for these two RateStats.
*
* Update: Rewritten so we can get rid of the two RateStats.
* This also helps by not having it depend on coalesce boundaries.
*
* @param period must be one of the periods in the RateStat constructors below
* (5*60*1000 or 60*60*1000)
*/
public boolean getIsActive(long period) {
if ( (getSendSuccessSize().getRate(period).getCurrentEventCount() > 0) ||
(getSendSuccessSize().getRate(period).getLastEventCount() > 0) ||
(getReceiveSize().getRate(period).getCurrentEventCount() > 0) ||
(getReceiveSize().getRate(period).getLastEventCount() > 0) ||
_context.commSystem().isEstablished(_peer) )
return true;
else
return false;
//if ( (getSendSuccessSize().getRate(period).getCurrentEventCount() > 0) ||
// (getSendSuccessSize().getRate(period).getLastEventCount() > 0) ||
// (getReceiveSize().getRate(period).getCurrentEventCount() > 0) ||
// (getReceiveSize().getRate(period).getLastEventCount() > 0) ||
// _context.commSystem().isEstablished(_peer) )
// return true;
//else
// return false;
long before = _context.clock().now() - period;
return getLastHeardFrom() < before ||
getLastSendSuccessful() < before ||
_context.commSystem().isEstablished(_peer);
}
@ -142,25 +153,31 @@ public class PeerProfile {
public long getLastHeardFrom() { return _lastHeardFrom; }
public void setLastHeardFrom(long when) { _lastHeardFrom = when; }
/** history of tunnel activity with the peer */
/** history of tunnel activity with the peer
Warning - may return null if !getIsExpanded() */
public TunnelHistory getTunnelHistory() { return _tunnelHistory; }
public void setTunnelHistory(TunnelHistory history) { _tunnelHistory = history; }
/** history of db activity with the peer */
/** history of db activity with the peer
Warning - may return null if !getIsExpandedDB() */
public DBHistory getDBHistory() { return _dbHistory; }
public void setDBHistory(DBHistory hist) { _dbHistory = hist; }
/** how large successfully sent messages are, calculated over a 1 minute, 1 hour, and 1 day period */
public RateStat getSendSuccessSize() { return _sendSuccessSize; }
//public RateStat getSendSuccessSize() { return _sendSuccessSize; }
/** how large received messages are, calculated over a 1 minute, 1 hour, and 1 day period */
public RateStat getReceiveSize() { return _receiveSize; }
/** how long it takes to get a db response from the peer (in milliseconds), calculated over a 1 minute, 1 hour, and 1 day period */
//public RateStat getReceiveSize() { return _receiveSize; }
/** how long it takes to get a db response from the peer (in milliseconds), calculated over a 1 minute, 1 hour, and 1 day period
Warning - may return null if !getIsExpandedDB() */
public RateStat getDbResponseTime() { return _dbResponseTime; }
/** how long it takes to get a tunnel create response from the peer (in milliseconds), calculated over a 1 minute, 1 hour, and 1 day period */
/** how long it takes to get a tunnel create response from the peer (in milliseconds), calculated over a 1 minute, 1 hour, and 1 day period
Warning - may return null if !getIsExpanded() */
public RateStat getTunnelCreateResponseTime() { return _tunnelCreateResponseTime; }
/** how long it takes to successfully test a tunnel this peer participates in (in milliseconds), calculated over a 10 minute, 1 hour, and 1 day period */
/** how long it takes to successfully test a tunnel this peer participates in (in milliseconds), calculated over a 10 minute, 1 hour, and 1 day period
Warning - may return null if !getIsExpanded() */
public RateStat getTunnelTestResponseTime() { return _tunnelTestResponseTime; }
/** how many new peers we get from dbSearchReplyMessages or dbStore messages, calculated over a 1 hour, 1 day, and 1 week period */
/** how many new peers we get from dbSearchReplyMessages or dbStore messages, calculated over a 1 hour, 1 day, and 1 week period
Warning - may return null if !getIsExpandedDB() */
public RateStat getDbIntroduction() { return _dbIntroduction; }
/**
@ -327,10 +344,12 @@ public class PeerProfile {
* extensive stats on them, call this to discard excess data points. Specifically,
* this drops the rates, the tunnelHistory, and the dbHistory.
*
* UNUSED for now, will cause NPEs elsewhere
*/
/*****
public void shrinkProfile() {
_sendSuccessSize = null;
_receiveSize = null;
//_sendSuccessSize = null;
//_receiveSize = null;
_dbResponseTime = null;
_tunnelCreateResponseTime = null;
_tunnelTestResponseTime = null;
@ -339,7 +358,9 @@ public class PeerProfile {
_dbHistory = null;
_expanded = false;
_expandedDB = false;
}
******/
/**
* When the given peer is performing well enough that we want to keep detailed
@ -350,32 +371,43 @@ public class PeerProfile {
*/
public void expandProfile() {
String group = (null == _peer ? "profileUnknown" : _peer.toBase64().substring(0,6));
if (_sendSuccessSize == null)
_sendSuccessSize = new RateStat("sendSuccessSize", "How large successfully sent messages are", group, new long[] { 5*60*1000l, 60*60*1000l });
if (_receiveSize == null)
_receiveSize = new RateStat("receiveSize", "How large received messages are", group, new long[] { 5*60*1000l, 60*60*1000l } );
if (_dbResponseTime == null)
_dbResponseTime = new RateStat("dbResponseTime", "how long it takes to get a db response from the peer (in milliseconds)", group, new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000 } );
//if (_sendSuccessSize == null)
// _sendSuccessSize = new RateStat("sendSuccessSize", "How large successfully sent messages are", group, new long[] { 5*60*1000l, 60*60*1000l });
//if (_receiveSize == null)
// _receiveSize = new RateStat("receiveSize", "How large received messages are", group, new long[] { 5*60*1000l, 60*60*1000l } );
if (_tunnelCreateResponseTime == null)
_tunnelCreateResponseTime = new RateStat("tunnelCreateResponseTime", "how long it takes to get a tunnel create response from the peer (in milliseconds)", group, new long[] { 10*60*1000l, 30*60*1000l, 60*60*1000l, 24*60*60*1000 } );
if (_tunnelTestResponseTime == null)
_tunnelTestResponseTime = new RateStat("tunnelTestResponseTime", "how long it takes to successfully test a tunnel this peer participates in (in milliseconds)", group, new long[] { 10*60*1000l, 30*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000 } );
if (_dbIntroduction == null)
_dbIntroduction = new RateStat("dbIntroduction", "how many new peers we get from dbSearchReplyMessages or dbStore messages", group, new long[] { 60*60*1000l, 6*60*60*1000l, 24*60*60*1000l });
if (_tunnelHistory == null)
_tunnelHistory = new TunnelHistory(_context, group);
//_sendSuccessSize.setStatLog(_context.statManager().getStatLog());
//_receiveSize.setStatLog(_context.statManager().getStatLog());
_tunnelCreateResponseTime.setStatLog(_context.statManager().getStatLog());
_tunnelTestResponseTime.setStatLog(_context.statManager().getStatLog());
_expanded = true;
}
/**
* For floodfills
*/
public synchronized void expandDBProfile() {
String group = (null == _peer ? "profileUnknown" : _peer.toBase64().substring(0,6));
if (_dbResponseTime == null)
_dbResponseTime = new RateStat("dbResponseTime", "how long it takes to get a db response from the peer (in milliseconds)", group, new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000 } );
if (_dbIntroduction == null)
_dbIntroduction = new RateStat("dbIntroduction", "how many new peers we get from dbSearchReplyMessages or dbStore messages", group, new long[] { 60*60*1000l, 6*60*60*1000l, 24*60*60*1000l });
if (_dbHistory == null)
_dbHistory = new DBHistory(_context, group);
_sendSuccessSize.setStatLog(_context.statManager().getStatLog());
_receiveSize.setStatLog(_context.statManager().getStatLog());
_dbResponseTime.setStatLog(_context.statManager().getStatLog());
_tunnelCreateResponseTime.setStatLog(_context.statManager().getStatLog());
_tunnelTestResponseTime.setStatLog(_context.statManager().getStatLog());
_dbIntroduction.setStatLog(_context.statManager().getStatLog());
_expanded = true;
_expandedDB = true;
}
/** once a day, on average, cut the measured throughtput values in half */
/** let's try once an hour times 3/4 */
private static final int DROP_PERIOD_MINUTES = 60;
@ -419,14 +451,16 @@ public class PeerProfile {
/** update the stats and rates (this should be called once a minute) */
public void coalesceStats() {
if (!_expanded) return;
_dbIntroduction.coalesceStats();
_dbResponseTime.coalesceStats();
_receiveSize.coalesceStats();
_sendSuccessSize.coalesceStats();
//_receiveSize.coalesceStats();
//_sendSuccessSize.coalesceStats();
_tunnelCreateResponseTime.coalesceStats();
_tunnelTestResponseTime.coalesceStats();
_dbHistory.coalesceStats();
_tunnelHistory.coalesceStats();
if (_expandedDB) {
_dbIntroduction.coalesceStats();
_dbResponseTime.coalesceStats();
_dbHistory.coalesceStats();
}
coalesceThroughput();

View File

@ -39,7 +39,7 @@ public class ProfileManagerImpl implements ProfileManager {
PeerProfile data = getProfile(peer);
if (data == null) return;
data.setLastSendSuccessful(_context.clock().now());
data.getSendSuccessSize().addData(bytesSent, msToSend);
//data.getSendSuccessSize().addData(bytesSent, msToSend);
}
/**
@ -169,11 +169,14 @@ public class ProfileManagerImpl implements ProfileManager {
/**
* Note that the peer was able to return the valid data for a db lookup
*
* This will force creation of DB stats
*/
public void dbLookupSuccessful(Hash peer, long responseTimeMs) {
PeerProfile data = getProfile(peer);
if (data == null) return;
data.setLastHeardFrom(_context.clock().now());
if (!data.getIsExpandedDB())
data.expandDBProfile();
data.getDbResponseTime().addData(responseTimeMs, responseTimeMs);
DBHistory hist = data.getDBHistory();
hist.lookupSuccessful();
@ -183,10 +186,13 @@ public class ProfileManagerImpl implements ProfileManager {
* Note that the peer was unable to reply to a db lookup - either with data or with
* a lookupReply redirecting the user elsewhere
*
* This will force creation of DB stats
*/
public void dbLookupFailed(Hash peer) {
PeerProfile data = getProfile(peer);
if (data == null) return;
if (!data.getIsExpandedDB())
data.expandDBProfile();
DBHistory hist = data.getDBHistory();
hist.lookupFailed();
}
@ -203,6 +209,8 @@ public class ProfileManagerImpl implements ProfileManager {
PeerProfile data = getProfile(peer);
if (data == null) return;
data.setLastHeardFrom(_context.clock().now());
if (!data.getIsExpandedDB())
return;
data.getDbResponseTime().addData(responseTimeMs, responseTimeMs);
data.getDbIntroduction().addData(newPeers, responseTimeMs);
DBHistory hist = data.getDBHistory();
@ -217,6 +225,8 @@ public class ProfileManagerImpl implements ProfileManager {
PeerProfile data = getProfile(peer);
if (data == null) return;
data.setLastHeardFrom(_context.clock().now());
if (!data.getIsExpandedDB())
return;
DBHistory hist = data.getDBHistory();
hist.lookupReceived();
}
@ -229,6 +239,8 @@ public class ProfileManagerImpl implements ProfileManager {
PeerProfile data = getProfile(peer);
if (data == null) return;
data.setLastHeardFrom(_context.clock().now());
if (!data.getIsExpandedDB())
return;
DBHistory hist = data.getDBHistory();
hist.unpromptedStoreReceived(wasNewKey);
}
@ -242,8 +254,10 @@ public class ProfileManagerImpl implements ProfileManager {
PeerProfile data = getProfile(peer);
if (data == null) return;
long now = _context.clock().now();
data.setLastSendSuccessful(now);
data.setLastHeardFrom(now);
if (!data.getIsExpandedDB())
return;
data.setLastSendSuccessful(now);
// we could do things like update some sort of "how many successful stores we've sent them"...
// naah.. dont really care now
}
@ -279,7 +293,7 @@ public class ProfileManagerImpl implements ProfileManager {
PeerProfile data = getProfile(peer);
if (data == null) return;
data.setLastHeardFrom(_context.clock().now());
data.getReceiveSize().addData(bytesRead, msToReceive);
//data.getReceiveSize().addData(bytesRead, msToReceive);
}
private PeerProfile getProfile(Hash peer) {

View File

@ -241,7 +241,7 @@ public class ProfileOrganizer {
*/
public boolean peerSendsBadReplies(Hash peer) {
PeerProfile profile = getProfile(peer);
if (profile != null) {
if (profile != null && profile.getIsExpandedDB()) {
RateStat invalidReplyRateStat = profile.getDBHistory().getInvalidReplyRate();
Rate invalidReplyRate = invalidReplyRateStat.getRate(30*60*1000l);
if ( (invalidReplyRate.getCurrentTotalValue() > MAX_BAD_REPLIES_PER_HOUR) ||

View File

@ -128,18 +128,20 @@ class ProfilePersistenceHelper {
out.write(buf.toString().getBytes());
profile.getTunnelHistory().store(out);
profile.getDBHistory().store(out);
if (profile.getIsExpanded()) {
// only write out expanded data if, uh, we've got it
profile.getDbIntroduction().store(out, "dbIntroduction");
profile.getDbResponseTime().store(out, "dbResponseTime");
profile.getReceiveSize().store(out, "receiveSize");
profile.getSendSuccessSize().store(out, "sendSuccessSize");
profile.getTunnelHistory().store(out);
//profile.getReceiveSize().store(out, "receiveSize");
//profile.getSendSuccessSize().store(out, "sendSuccessSize");
profile.getTunnelCreateResponseTime().store(out, "tunnelCreateResponseTime");
profile.getTunnelTestResponseTime().store(out, "tunnelTestResponseTime");
}
if (profile.getIsExpandedDB()) {
profile.getDBHistory().store(out);
profile.getDbIntroduction().store(out, "dbIntroduction");
profile.getDbResponseTime().store(out, "dbResponseTime");
}
}
public Set readProfiles() {
@ -211,12 +213,22 @@ class ProfilePersistenceHelper {
profile.setPeakTunnel1mThroughputKBps(getDouble(props, "tunnelPeakTunnel1mThroughput"));
profile.getTunnelHistory().load(props);
profile.getDBHistory().load(props);
profile.getDbIntroduction().load(props, "dbIntroduction", true);
profile.getDbResponseTime().load(props, "dbResponseTime", true);
profile.getReceiveSize().load(props, "receiveSize", true);
profile.getSendSuccessSize().load(props, "sendSuccessSize", true);
// In the interest of keeping the in-memory profiles small,
// don't load the DB info at all unless there is something interesting there
// (i.e. floodfills)
// It seems like we do one or two lookups as a part of handshaking?
// Not sure, to be researched.
if (getLong(props, "dbHistory.successfulLookups") > 1 ||
getLong(props, "dbHistory.failedlLokups") > 1) {
profile.expandDBProfile();
profile.getDBHistory().load(props);
profile.getDbIntroduction().load(props, "dbIntroduction", true);
profile.getDbResponseTime().load(props, "dbResponseTime", true);
}
//profile.getReceiveSize().load(props, "receiveSize", true);
//profile.getSendSuccessSize().load(props, "sendSuccessSize", true);
profile.getTunnelCreateResponseTime().load(props, "tunnelCreateResponseTime", true);
profile.getTunnelTestResponseTime().load(props, "tunnelTestResponseTime", true);

View File

@ -636,25 +636,20 @@ public class UPnP extends ControlPoint implements DeviceChangeListener, EventLis
}
public void run() {
HashMap<ForwardPort, ForwardPortStatus> map = new HashMap(1);
for(ForwardPort port : portsToForwardNow) {
String proto = protoToString(port.protocol);
map.clear();
ForwardPortStatus fps;
if (proto.length() <= 1) {
HashMap<ForwardPort, ForwardPortStatus> map = new HashMap<ForwardPort, ForwardPortStatus>();
map.put(port, new ForwardPortStatus(ForwardPortStatus.DEFINITE_FAILURE, "Protocol not supported", port.portNumber));
forwardCallback.portForwardStatus(map);
continue;
}
if(tryAddMapping(proto, port.portNumber, port.name, port)) {
HashMap<ForwardPort, ForwardPortStatus> map = new HashMap<ForwardPort, ForwardPortStatus>();
map.put(port, new ForwardPortStatus(ForwardPortStatus.MAYBE_SUCCESS, "Port apparently forwarded by UPnP", port.portNumber));
forwardCallback.portForwardStatus(map);
continue;
fps = new ForwardPortStatus(ForwardPortStatus.DEFINITE_FAILURE, "Protocol not supported", port.portNumber);
} else if(tryAddMapping(proto, port.portNumber, port.name, port)) {
fps = new ForwardPortStatus(ForwardPortStatus.MAYBE_SUCCESS, "Port apparently forwarded by UPnP", port.portNumber);
} else {
HashMap<ForwardPort, ForwardPortStatus> map = new HashMap<ForwardPort, ForwardPortStatus>();
map.put(port, new ForwardPortStatus(ForwardPortStatus.PROBABLE_FAILURE, "UPnP port forwarding apparently failed", port.portNumber));
forwardCallback.portForwardStatus(map);
continue;
fps = new ForwardPortStatus(ForwardPortStatus.PROBABLE_FAILURE, "UPnP port forwarding apparently failed", port.portNumber);
}
map.put(port, fps);
forwardCallback.portForwardStatus(map);
}
}
}

View File

@ -22,7 +22,11 @@ public class UDPEndpoint {
private DatagramSocket _socket;
private InetAddress _bindAddress;
public UDPEndpoint(RouterContext ctx, UDPTransport transport, int listenPort, InetAddress bindAddress) throws SocketException {
/**
* @param listenPort -1 or the requested port, may not be honored
* @param bindAddress null ok
*/
public UDPEndpoint(RouterContext ctx, UDPTransport transport, int listenPort, InetAddress bindAddress) {
_context = ctx;
_log = ctx.logManager().getLog(UDPEndpoint.class);
_transport = transport;
@ -30,23 +34,20 @@ public class UDPEndpoint {
_listenPort = listenPort;
}
/** caller should call getListenPort() after this to get the actual bound port and determine success */
public void startup() {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Starting up the UDP endpoint");
shutdown();
try {
if (_bindAddress == null)
_socket = new DatagramSocket(_listenPort);
else
_socket = new DatagramSocket(_listenPort, _bindAddress);
_sender = new UDPSender(_context, _socket, "UDPSender");
_receiver = new UDPReceiver(_context, _transport, _socket, "UDPReceiver");
_sender.startup();
_receiver.startup();
} catch (SocketException se) {
_transport.setReachabilityStatus(CommSystemFacade.STATUS_HOSED);
_log.log(Log.CRIT, "Unable to bind on port " + _listenPort, se);
_socket = getSocket();
if (_socket == null) {
_log.log(Log.CRIT, "UDP Unable to open a port");
return;
}
_sender = new UDPSender(_context, _socket, "UDPSender");
_receiver = new UDPReceiver(_context, _transport, _socket, "UDPReceiver");
_sender.startup();
_receiver.startup();
}
public void shutdown() {
@ -60,6 +61,8 @@ public class UDPEndpoint {
}
public void setListenPort(int newPort) { _listenPort = newPort; }
/*******
public void updateListenPort(int newPort) {
if (newPort == _listenPort) return;
try {
@ -76,7 +79,54 @@ public class UDPEndpoint {
_log.error("Unable to bind on " + _listenPort);
}
}
********/
/** 8998 is monotone, and 32000 is the wrapper, so let's stay between those */
private static final int MIN_RANDOM_PORT = 9111;
private static final int MAX_RANDOM_PORT = 31777;
private static final int MAX_PORT_RETRIES = 20;
/**
* Open socket using requested port in _listenPort and bind host in _bindAddress.
* If _listenPort <= 0, or requested port is busy, repeatedly try a new random port.
* @return null on failure
* Sets _listenPort to actual port or -1 on failure
*/
private DatagramSocket getSocket() {
DatagramSocket socket = null;
int port = _listenPort;
for (int i = 0; i < MAX_PORT_RETRIES; i++) {
if (port <= 0) {
// try random ports rather than just do new DatagramSocket()
// so we stay out of the way of other I2P stuff
port = MIN_RANDOM_PORT + _context.random().nextInt(MAX_RANDOM_PORT - MIN_RANDOM_PORT);
}
try {
if (_bindAddress == null)
socket = new DatagramSocket(port);
else
socket = new DatagramSocket(port, _bindAddress);
break;
} catch (SocketException se) {
if (_log.shouldLog(Log.WARN))
_log.warn("Binding to port " + port + " failed: " + se);
}
port = -1;
}
if (socket == null) {
_log.log(Log.CRIT, "SSU Unable to bind to a port on " + _bindAddress);
} else if (port != _listenPort) {
if (_listenPort > 0)
_log.error("SSU Unable to bind to requested port " + _listenPort + ", using random port " + port);
else
_log.error("SSU selected random port " + port);
}
_listenPort = port;
return socket;
}
/** call after startup() to get actual port or -1 on startup failure */
public int getListenPort() { return _listenPort; }
public UDPSender getSender() { return _sender; }

View File

@ -100,6 +100,7 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
public static final String STYLE = "SSU";
public static final String PROP_INTERNAL_PORT = "i2np.udp.internalPort";
/** now unused, we pick a random port */
public static final int DEFAULT_INTERNAL_PORT = 8887;
/** since fixed port defaults to true, this doesnt do anything at the moment.
* We should have an exception if it matches the existing low port. */
@ -137,6 +138,7 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
public static final String PROP_FORCE_INTRODUCERS = "i2np.udp.forceIntroducers";
/** do we allow direct SSU connections, sans introducers? */
public static final String PROP_ALLOW_DIRECT = "i2np.udp.allowDirect";
/** this is rarely if ever used, default is to bind to wildcard address */
public static final String PROP_BIND_INTERFACE = "i2np.udp.bindInterface";
/** how many relays offered to us will we use at a time? */
@ -226,40 +228,41 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
System.arraycopy(_context.routerHash().getData(), 0, _introKey.getData(), 0, SessionKey.KEYSIZE_BYTES);
rebuildExternalAddress();
int port = -1;
if (_externalListenPort <= 0) {
// no explicit external port, so lets try an internal one
port = _context.getProperty(PROP_INTERNAL_PORT, DEFAULT_INTERNAL_PORT);
// attempt to use it as our external port - this will be overridden by
// externalAddressReceived(...)
_context.router().setConfigSetting(PROP_EXTERNAL_PORT, port+"");
_context.router().saveConfig();
} else {
port = _externalListenPort;
if (_log.shouldLog(Log.INFO))
_log.info("Binding to the explicitly specified external port: " + port);
}
if (_endpoint == null) {
String bindTo = _context.getProperty(PROP_BIND_INTERFACE);
InetAddress bindToAddr = null;
if (bindTo != null) {
try {
bindToAddr = InetAddress.getByName(bindTo);
} catch (UnknownHostException uhe) {
if (_log.shouldLog(Log.ERROR))
_log.error("Invalid SSU bind interface specified [" + bindTo + "]", uhe);
bindToAddr = null;
}
}
// bind host
String bindTo = _context.getProperty(PROP_BIND_INTERFACE);
InetAddress bindToAddr = null;
if (bindTo != null) {
try {
_endpoint = new UDPEndpoint(_context, this, port, bindToAddr);
} catch (SocketException se) {
if (_log.shouldLog(Log.CRIT))
_log.log(Log.CRIT, "Unable to listen on the UDP port (" + port + ")", se);
bindToAddr = InetAddress.getByName(bindTo);
} catch (UnknownHostException uhe) {
_log.log(Log.CRIT, "Invalid SSU bind interface specified [" + bindTo + "]", uhe);
setReachabilityStatus(CommSystemFacade.STATUS_HOSED);
return;
}
}
// Requested bind port
// This may be -1 or may not be honored if busy,
// we will check below after starting up the endpoint.
int port;
int oldIPort = _context.getProperty(PROP_INTERNAL_PORT, -1);
int oldEPort = _context.getProperty(PROP_EXTERNAL_PORT, -1);
if (_externalListenPort <= 0) {
// no explicit external port, so lets try an internal one
if (oldIPort > 0)
port = oldIPort;
else
port = oldEPort;
} else {
port = _externalListenPort;
}
if (_log.shouldLog(Log.INFO))
_log.info("Binding to the port: " + port);
if (_endpoint == null) {
_endpoint = new UDPEndpoint(_context, this, port, bindToAddr);
} else {
// todo, set bind address too
_endpoint.setListenPort(port);
}
@ -278,7 +281,24 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
if (_flooder == null)
_flooder = new UDPFlooder(_context, this);
// Startup the endpoint with the requested port, check the actual port, and
// take action if it failed or was different than requested or it needs to be saved
_endpoint.startup();
int newPort = _endpoint.getListenPort();
_externalListenPort = newPort;
if (newPort <= 0) {
_log.log(Log.CRIT, "Unable to open UDP port");
setReachabilityStatus(CommSystemFacade.STATUS_HOSED);
return;
}
if (newPort != port || newPort != oldIPort || newPort != oldEPort) {
// attempt to use it as our external port - this will be overridden by
// externalAddressReceived(...)
_context.router().setConfigSetting(PROP_INTERNAL_PORT, newPort+"");
_context.router().setConfigSetting(PROP_EXTERNAL_PORT, newPort+"");
_context.router().saveConfig();
}
_establisher.startup();
_handler.startup();
_fragments.startup();
@ -321,11 +341,16 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
public int getLocalPort() { return _externalListenPort; }
public InetAddress getLocalAddress() { return _externalListenHost; }
public int getExternalPort() { return _externalListenPort; }
/**
* _externalListenPort should always be set (by startup()) before this is called,
* so the returned value should be > 0
*/
@Override
public int getRequestedPort() {
if (_externalListenPort > 0)
return _externalListenPort;
return _context.getProperty(PROP_INTERNAL_PORT, DEFAULT_INTERNAL_PORT);
return _context.getProperty(PROP_INTERNAL_PORT, -1);
}
/**
@ -2003,6 +2028,8 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
buf.append(" <td align=\"center\"><b>").append(resentTotal);
buf.append("</b></td> <td align=\"center\"><b>").append(dupRecvTotal).append("</b></td>\n");
buf.append(" </tr></table></div>\n");
/*****
long bytesTransmitted = _context.bandwidthLimiter().getTotalAllocatedOutboundBytes();
// NPE here early
double averagePacketSize = _context.statManager().getRate("udp.sendPacketSize").getLifetimeAverageValue();
@ -2012,6 +2039,8 @@ public class UDPTransport extends TransportImpl implements TimedWeightedPriority
double bwResent = (nondupSent <= 0 ? 0d : ((((double)resentTotal)*averagePacketSize) / nondupSent));
buf.append("<h3>Percentage of bytes retransmitted (lifetime): ").append(formatPct(bwResent));
buf.append("</h3><i>(Includes retransmission required by packet loss)</i>\n");
*****/
out.write(buf.toString());
buf.setLength(0);
out.write(KEY);