2005-02-22 jrandom
* Adjusted (and fixed...) the timestamper change detection * Deal with a rare reordering bug at the beginning of a stream (so we don't drop it unnecessarily) * Cleaned up some dropped message handling in the router * Reduced job queue churn when dealing with a large number of tunnels by sharing an expiration job * Keep a separate list of the most recent CRIT messages (shown on the logs.jsp). This way they don't get buried among any other messages. * For clarity, display the tunnel variance config as "Randomization" on the web console. * If lease republishing fails (boo! hiss!) try it again * Actually fix the negative jobLag in the right place (this time) * Allow reseeding when there are less than 10 known peer references * Lots of logging updates.
This commit is contained in:
@ -112,7 +112,7 @@ public class ConfigTunnelsHelper {
|
||||
buf.append("</tr>\n");
|
||||
|
||||
// tunnel depth variance
|
||||
buf.append("<tr><td>Variance</td>\n");
|
||||
buf.append("<tr><td>Randomization</td>\n");
|
||||
buf.append("<td><select name=\"").append(index).append(".varianceInbound\">\n");
|
||||
buf.append("<option value=\"0\" ");
|
||||
if (in.getLengthVariance() == 0) buf.append(" selected=\"true\" ");
|
||||
|
@ -41,6 +41,22 @@ public class LogsHelper {
|
||||
return buf.toString();
|
||||
}
|
||||
|
||||
public String getCriticalLogs() {
|
||||
List msgs = _context.logManager().getBuffer().getMostRecentCriticalMessages();
|
||||
StringBuffer buf = new StringBuffer(16*1024);
|
||||
buf.append("<ul>");
|
||||
buf.append("<code>\n");
|
||||
for (int i = msgs.size(); i > 0; i--) {
|
||||
String msg = (String)msgs.get(i - 1);
|
||||
buf.append("<li>");
|
||||
buf.append(msg);
|
||||
buf.append("</li>\n");
|
||||
}
|
||||
buf.append("</code></ul>\n");
|
||||
|
||||
return buf.toString();
|
||||
}
|
||||
|
||||
public String getServiceLogs() {
|
||||
String str = FileUtil.readTextFile("wrapper.log", 500, false);
|
||||
if (str == null)
|
||||
|
@ -3,8 +3,12 @@ package net.i2p.router.web;
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStreamWriter;
|
||||
import java.text.DateFormat;
|
||||
import java.text.DecimalFormat;
|
||||
import java.text.SimpleDateFormat;
|
||||
import java.util.Date;
|
||||
import java.util.Iterator;
|
||||
import java.util.Locale;
|
||||
import java.util.Set;
|
||||
|
||||
import net.i2p.data.DataHelper;
|
||||
@ -70,7 +74,32 @@ public class SummaryHelper {
|
||||
else
|
||||
return DataHelper.formatDuration(router.getUptime());
|
||||
}
|
||||
|
||||
|
||||
private static final DateFormat _fmt = new java.text.SimpleDateFormat("HH:mm:ss", Locale.UK);
|
||||
public String getTime() {
|
||||
if (_context == null) return "";
|
||||
|
||||
String now = null;
|
||||
synchronized (_fmt) {
|
||||
now = _fmt.format(new Date(_context.clock().now()));
|
||||
}
|
||||
|
||||
long ms = _context.clock().getOffset();
|
||||
|
||||
if (ms < 60 * 1000) {
|
||||
return now + " (" + (ms / 1000) + "s)";
|
||||
} else if (ms < 60 * 1000) {
|
||||
return now + " (" + (ms / (60 * 1000)) + "m)";
|
||||
} else if (ms < 24 * 60 * 60 * 1000) {
|
||||
return now + " (" + (ms / (60 * 60 * 1000)) + "h)";
|
||||
} else {
|
||||
return now + " (" + (ms / (24 * 60 * 60 * 1000)) + "d)";
|
||||
}
|
||||
}
|
||||
|
||||
public boolean allowReseed() {
|
||||
return (_context.netDb().getKnownRouters() < 10);
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve amount of used memory.
|
||||
|
@ -19,6 +19,9 @@
|
||||
<h4>Connection logs:</h4><a name="connectionlogs"> </a>
|
||||
<jsp:getProperty name="logsHelper" property="connectionLogs" />
|
||||
<hr />
|
||||
<h4>Critical logs:</h4><a name="criticallogs"> </a>
|
||||
<jsp:getProperty name="logsHelper" property="criticalLogs" />
|
||||
<hr />
|
||||
<h4>Service logs:</h4><a name="servicelogs"> </a>
|
||||
<jsp:getProperty name="logsHelper" property="serviceLogs" />
|
||||
</div>
|
||||
|
@ -10,6 +10,7 @@
|
||||
<b>Ident:</b> <jsp:getProperty name="helper" property="ident" /><br />
|
||||
<b>Version:</b> <jsp:getProperty name="helper" property="version" /><br />
|
||||
<b>Uptime:</b> <jsp:getProperty name="helper" property="uptime" /><br />
|
||||
<b>Now:</b> <jsp:getProperty name="helper" property="time" /><br />
|
||||
<b>Memory:</b> <jsp:getProperty name="helper" property="memory" /><br />
|
||||
<hr />
|
||||
|
||||
@ -23,7 +24,7 @@
|
||||
if (helper.getActivePeers() <= 0) {
|
||||
%><b><a href="config.jsp">check your NAT/firewall</a></b><br /><%
|
||||
}
|
||||
if (helper.getActiveProfiles() <= 10) { // 10 is the min fallback
|
||||
if (helper.allowReseed()) {
|
||||
if ("true".equals(System.getProperty("net.i2p.router.web.ReseedHandler.reseedInProgress", "false"))) {
|
||||
out.print(" <i>reseeding</i>");
|
||||
} else {
|
||||
|
@ -259,11 +259,15 @@ public class ConnectionPacketHandler {
|
||||
con.setRemotePeer(packet.getOptionalFrom());
|
||||
return true;
|
||||
} else {
|
||||
// neither RST nor SYN and we dont have the stream id yet? nuh uh
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Packet without RST or SYN where we dont know stream ID: "
|
||||
+ packet);
|
||||
return false;
|
||||
// neither RST nor SYN and we dont have the stream id yet?
|
||||
if (packet.getSequenceNum() <= 2) {
|
||||
return true;
|
||||
} else {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Packet without RST or SYN where we dont know stream ID: "
|
||||
+ packet);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (!DataHelper.eq(con.getSendStreamId(), packet.getReceiveStreamId())) {
|
||||
|
@ -92,7 +92,9 @@ public class MessageHandler implements I2PSessionListener {
|
||||
*/
|
||||
public void errorOccurred(I2PSession session, String message, Throwable error) {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("error occurred: " + message, error);
|
||||
_log.error("error occurred: " + message + "- " + error.getMessage());
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("cause", error);
|
||||
//_manager.disconnectAllHard();
|
||||
}
|
||||
|
||||
|
@ -137,29 +137,30 @@ public class PacketHandler {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Received forged reset for " + con, ie);
|
||||
}
|
||||
} else if (packet.isFlagSet(Packet.FLAG_SYNCHRONIZE)) {
|
||||
} else {
|
||||
if ( (con.getSendStreamId() == null) ||
|
||||
(DataHelper.eq(con.getSendStreamId(), packet.getReceiveStreamId())) ) {
|
||||
byte oldId[] =con.getSendStreamId();
|
||||
// con fully established, w00t
|
||||
con.setSendStreamId(packet.getReceiveStreamId());
|
||||
if (packet.isFlagSet(Packet.FLAG_SYNCHRONIZE)) // con fully established, w00t
|
||||
con.setSendStreamId(packet.getReceiveStreamId());
|
||||
|
||||
try {
|
||||
con.getPacketHandler().receivePacket(packet, con);
|
||||
} catch (I2PException ie) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Received forged syn for " + con, ie);
|
||||
_log.warn("Received forged packet for " + con + ": " + packet, ie);
|
||||
con.setSendStreamId(oldId);
|
||||
}
|
||||
} else {
|
||||
} else if (packet.isFlagSet(Packet.FLAG_SYNCHRONIZE)) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Receive a syn packet with the wrong IDs, sending reset: " + packet);
|
||||
sendReset(packet);
|
||||
}
|
||||
} else {
|
||||
if (!con.getResetSent()) {
|
||||
// someone is sending us a packet on the wrong stream
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Received a packet on the wrong stream: " + packet + " connection: " + con);
|
||||
} else {
|
||||
if (!con.getResetSent()) {
|
||||
// someone is sending us a packet on the wrong stream
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Received a packet on the wrong stream: " + packet + " connection: " + con);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -36,9 +36,9 @@ class SchedulerHardDisconnected extends SchedulerImpl {
|
||||
long timeSinceClose = _context.clock().now() - con.getCloseSentOn();
|
||||
if (con.getResetSent())
|
||||
timeSinceClose = _context.clock().now() - con.getResetSentOn();
|
||||
boolean ok = (con.getHardDisconnected() || con.getResetSent() || con.getResetReceived()) &&
|
||||
boolean ok = (con.getHardDisconnected() || con.getResetSent()) &&
|
||||
(timeSinceClose < Connection.DISCONNECT_TIMEOUT);
|
||||
return ok;
|
||||
return ok || con.getResetReceived();
|
||||
}
|
||||
|
||||
public void eventOccurred(Connection con) {
|
||||
|
@ -504,7 +504,11 @@ abstract class I2PSessionImpl implements I2PSession, I2CPMessageReader.I2CPMessa
|
||||
* Pass off the error to the listener
|
||||
*/
|
||||
void propogateError(String msg, Throwable error) {
|
||||
if (_log.shouldLog(Log.ERROR)) _log.error(getPrefix() + "Error occurred: " + msg, error);
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error(getPrefix() + "Error occurred: " + msg + " - " + error.getMessage());
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn(getPrefix() + " cause", error);
|
||||
|
||||
if (_sessionListener != null) _sessionListener.errorOccurred(this, msg, error);
|
||||
}
|
||||
|
||||
|
@ -57,8 +57,8 @@ class MessagePayloadMessageHandler extends HandlerImpl {
|
||||
Payload payload = msg.getPayload();
|
||||
byte[] data = _context.elGamalAESEngine().decrypt(payload.getEncryptedData(), session.getDecryptionKey());
|
||||
if (data == null) {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Error decrypting the payload");
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Error decrypting the payload");
|
||||
throw new DataFormatException("Unable to decrypt the payload");
|
||||
}
|
||||
payload.setUnencryptedData(data);
|
||||
|
@ -26,10 +26,10 @@ public class Timestamper implements Runnable {
|
||||
private boolean _initialized;
|
||||
|
||||
private static final int DEFAULT_QUERY_FREQUENCY = 5*60*1000;
|
||||
private static final String DEFAULT_SERVER_LIST = "pool.ntp.org, pool.ntp.org";
|
||||
private static final String DEFAULT_SERVER_LIST = "pool.ntp.org, pool.ntp.org, pool.ntp.org";
|
||||
private static final boolean DEFAULT_DISABLED = true;
|
||||
/** how many times do we have to query if we are changing the clock? */
|
||||
private static final int DEFAULT_CONCURRING_SERVERS = 2;
|
||||
private static final int DEFAULT_CONCURRING_SERVERS = 3;
|
||||
|
||||
public static final String PROP_QUERY_FREQUENCY = "time.queryFrequencyMs";
|
||||
public static final String PROP_SERVER_LIST = "time.sntpServerList";
|
||||
@ -109,7 +109,7 @@ public class Timestamper implements Runnable {
|
||||
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Starting up timestamper");
|
||||
boolean alreadyBitched = false;
|
||||
boolean lastFailed = false;
|
||||
try {
|
||||
while (true) {
|
||||
updateConfig();
|
||||
@ -123,14 +123,16 @@ public class Timestamper implements Runnable {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Querying servers " + _servers);
|
||||
try {
|
||||
queryTime(serverList);
|
||||
lastFailed = !queryTime(serverList);
|
||||
} catch (IllegalArgumentException iae) {
|
||||
if (!alreadyBitched)
|
||||
if (!lastFailed)
|
||||
_log.log(Log.CRIT, "Unable to reach any of the NTP servers - network disconnected?");
|
||||
alreadyBitched = true;
|
||||
lastFailed = true;
|
||||
}
|
||||
}
|
||||
long sleepTime = _context.random().nextInt(_queryFrequency) + _queryFrequency;
|
||||
if (lastFailed)
|
||||
sleepTime = 30*1000;
|
||||
try { Thread.sleep(sleepTime); } catch (InterruptedException ie) {}
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
@ -138,39 +140,64 @@ public class Timestamper implements Runnable {
|
||||
}
|
||||
}
|
||||
|
||||
private void queryTime(String serverList[]) throws IllegalArgumentException {
|
||||
/**
|
||||
* True if the time was queried successfully, false if it couldn't be
|
||||
*/
|
||||
private boolean queryTime(String serverList[]) throws IllegalArgumentException {
|
||||
long found[] = new long[serverList.length];
|
||||
long localTime = -1;
|
||||
long now = -1;
|
||||
long expectedDelta = 0;
|
||||
for (int i = 0; i < _concurringServers; i++) {
|
||||
try { Thread.sleep(10*1000); } catch (InterruptedException ie) {}
|
||||
localTime = _context.clock().now();
|
||||
now = NtpClient.currentTime(serverList);
|
||||
|
||||
long delta = now - localTime;
|
||||
found[i] = delta;
|
||||
if (i == 0) {
|
||||
if (Math.abs(delta) < MAX_VARIANCE) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("a single SNTP query was within the tolerance (" + delta + "ms)");
|
||||
return;
|
||||
return true;
|
||||
} else {
|
||||
// outside the tolerance, lets iterate across the concurring queries
|
||||
expectedDelta = delta;
|
||||
}
|
||||
} else {
|
||||
if (Math.abs(delta - expectedDelta) > MAX_VARIANCE) {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("SNTP client variance exceeded at query " + i + ". expected = " + expectedDelta + ", found = " + delta);
|
||||
return;
|
||||
if (_log.shouldLog(Log.ERROR)) {
|
||||
StringBuffer err = new StringBuffer(96);
|
||||
err.append("SNTP client variance exceeded at query ").append(i);
|
||||
err.append(". expected = ");
|
||||
err.append(expectedDelta);
|
||||
err.append(", found = ");
|
||||
err.append(delta);
|
||||
err.append(" all deltas: ");
|
||||
for (int j = 0; j < found.length; j++)
|
||||
err.append(found[j]).append(' ');
|
||||
_log.error(err.toString());
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
stampTime(now);
|
||||
if (_log.shouldLog(Log.DEBUG)) {
|
||||
StringBuffer buf = new StringBuffer(64);
|
||||
buf.append("Deltas: ");
|
||||
for (int i = 0; i < found.length; i++)
|
||||
buf.append(found[i]).append(' ');
|
||||
_log.debug(buf.toString());
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Send an HTTP request to a given URL specifying the current time
|
||||
*/
|
||||
private void stampTime(long now) {
|
||||
long before = _context.clock().now();
|
||||
synchronized (_listeners) {
|
||||
for (int i = 0; i < _listeners.size(); i++) {
|
||||
UpdateListener lsnr = (UpdateListener)_listeners.get(i);
|
||||
@ -178,7 +205,7 @@ public class Timestamper implements Runnable {
|
||||
}
|
||||
}
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Stamped the time as " + now);
|
||||
_log.debug("Stamped the time as " + now + " (delta=" + (now-before) + ")");
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -12,10 +12,12 @@ import net.i2p.I2PAppContext;
|
||||
public class LogConsoleBuffer {
|
||||
private I2PAppContext _context;
|
||||
private List _buffer;
|
||||
private List _critBuffer;
|
||||
|
||||
public LogConsoleBuffer(I2PAppContext context) {
|
||||
_context = context;
|
||||
_buffer = new ArrayList();
|
||||
_critBuffer = new ArrayList();
|
||||
}
|
||||
|
||||
void add(String msg) {
|
||||
@ -26,6 +28,14 @@ public class LogConsoleBuffer {
|
||||
_buffer.add(msg);
|
||||
}
|
||||
}
|
||||
void addCritical(String msg) {
|
||||
int lim = _context.logManager().getConsoleBufferSize();
|
||||
synchronized (_critBuffer) {
|
||||
while (_critBuffer.size() >= lim)
|
||||
_critBuffer.remove(0);
|
||||
_critBuffer.add(msg);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve the currently bufferd messages, earlier values were generated...
|
||||
@ -38,4 +48,15 @@ public class LogConsoleBuffer {
|
||||
return new ArrayList(_buffer);
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Retrieve the currently bufferd crutucak messages, earlier values were generated...
|
||||
* earlier. All values are strings with no formatting (as they are written
|
||||
* in the logs)
|
||||
*
|
||||
*/
|
||||
public List getMostRecentCriticalMessages() {
|
||||
synchronized (_critBuffer) {
|
||||
return new ArrayList(_critBuffer);
|
||||
}
|
||||
}
|
||||
}
|
@ -101,6 +101,8 @@ class LogWriter implements Runnable {
|
||||
|
||||
// we always add to the console buffer, but only sometimes write to stdout
|
||||
_manager.getBuffer().add(val);
|
||||
if (rec.getPriority() >= Log.CRIT)
|
||||
_manager.getBuffer().addCritical(val);
|
||||
if (_manager.getDisplayOnScreenLevel() <= rec.getPriority()) {
|
||||
if (_manager.displayOnScreen()) {
|
||||
System.out.print(val);
|
||||
|
18
history.txt
18
history.txt
@ -1,4 +1,20 @@
|
||||
$Id: history.txt,v 1.149 2005/02/20 04:12:48 jrandom Exp $
|
||||
$Id: history.txt,v 1.150 2005/02/21 14:08:01 jrandom Exp $
|
||||
|
||||
2005-02-22 jrandom
|
||||
* Adjusted (and fixed...) the timestamper change detection
|
||||
* Deal with a rare reordering bug at the beginning of a stream (so we
|
||||
don't drop it unnecessarily)
|
||||
* Cleaned up some dropped message handling in the router
|
||||
* Reduced job queue churn when dealing with a large number of tunnels by
|
||||
sharing an expiration job
|
||||
* Keep a separate list of the most recent CRIT messages (shown on the
|
||||
logs.jsp). This way they don't get buried among any other messages.
|
||||
* For clarity, display the tunnel variance config as "Randomization" on
|
||||
the web console.
|
||||
* If lease republishing fails (boo! hiss!) try it again
|
||||
* Actually fix the negative jobLag in the right place (this time)
|
||||
* Allow reseeding when there are less than 10 known peer references
|
||||
* Lots of logging updates.
|
||||
|
||||
2005-02-20 jrandom
|
||||
* Allow the streaming lib resend frequency to drop down to 20s as the
|
||||
|
@ -20,6 +20,7 @@ import net.i2p.data.RouterIdentity;
|
||||
import net.i2p.data.i2np.DeliveryStatusMessage;
|
||||
import net.i2p.data.i2np.I2NPMessage;
|
||||
import net.i2p.data.i2np.DatabaseSearchReplyMessage;
|
||||
import net.i2p.data.i2np.DatabaseLookupMessage;
|
||||
import net.i2p.data.i2np.TunnelCreateStatusMessage;
|
||||
import net.i2p.data.i2np.TunnelDataMessage;
|
||||
import net.i2p.data.i2np.TunnelGatewayMessage;
|
||||
@ -35,7 +36,7 @@ import net.i2p.util.Log;
|
||||
public class InNetMessagePool implements Service {
|
||||
private Log _log;
|
||||
private RouterContext _context;
|
||||
private Map _handlerJobBuilders;
|
||||
private HandlerJobBuilder _handlerJobBuilders[];
|
||||
private List _pendingDataMessages;
|
||||
private List _pendingDataMessagesFrom;
|
||||
private List _pendingGatewayMessages;
|
||||
@ -57,7 +58,7 @@ public class InNetMessagePool implements Service {
|
||||
|
||||
public InNetMessagePool(RouterContext context) {
|
||||
_context = context;
|
||||
_handlerJobBuilders = new HashMap();
|
||||
_handlerJobBuilders = new HandlerJobBuilder[20];
|
||||
_pendingDataMessages = new ArrayList(16);
|
||||
_pendingDataMessagesFrom = new ArrayList(16);
|
||||
_pendingGatewayMessages = new ArrayList(16);
|
||||
@ -75,11 +76,15 @@ public class InNetMessagePool implements Service {
|
||||
}
|
||||
|
||||
public HandlerJobBuilder registerHandlerJobBuilder(int i2npMessageType, HandlerJobBuilder builder) {
|
||||
return (HandlerJobBuilder)_handlerJobBuilders.put(new Integer(i2npMessageType), builder);
|
||||
HandlerJobBuilder old = _handlerJobBuilders[i2npMessageType];
|
||||
_handlerJobBuilders[i2npMessageType] = builder;
|
||||
return old;
|
||||
}
|
||||
|
||||
public HandlerJobBuilder unregisterHandlerJobBuilder(int i2npMessageType) {
|
||||
return (HandlerJobBuilder)_handlerJobBuilders.remove(new Integer(i2npMessageType));
|
||||
HandlerJobBuilder old = _handlerJobBuilders[i2npMessageType];
|
||||
_handlerJobBuilders[i2npMessageType] = null;
|
||||
return old;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -132,7 +137,7 @@ public class InNetMessagePool implements Service {
|
||||
shortCircuitTunnelData(messageBody, fromRouterHash);
|
||||
allowMatches = false;
|
||||
} else {
|
||||
HandlerJobBuilder builder = (HandlerJobBuilder)_handlerJobBuilders.get(new Integer(type));
|
||||
HandlerJobBuilder builder = _handlerJobBuilders[type];
|
||||
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Add message to the inNetMessage pool - builder: " + builder
|
||||
@ -190,12 +195,14 @@ public class InNetMessagePool implements Service {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Dropping slow db lookup response: " + messageBody);
|
||||
_context.statManager().addRateData("inNetPool.droppedDbLookupResponseMessage", 1, 0);
|
||||
} else if (type == DatabaseLookupMessage.MESSAGE_TYPE) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Dropping netDb lookup due to throttling");
|
||||
} else {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Message " + messageBody + " expiring on "
|
||||
+ (messageBody != null ? (messageBody.getMessageExpiration()+"") : " [unknown]")
|
||||
+ " was not handled by a HandlerJobBuilder - DROPPING: "
|
||||
+ messageBody, new Exception("DROPPED MESSAGE"));
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Message expiring on "
|
||||
+ (messageBody != null ? (messageBody.getMessageExpiration()+"") : " [unknown]")
|
||||
+ " was not handled by a HandlerJobBuilder - DROPPING: " + messageBody);
|
||||
_context.statManager().addRateData("inNetPool.dropped", 1, 0);
|
||||
}
|
||||
} else {
|
||||
|
@ -92,9 +92,12 @@ class JobQueueRunner implements Runnable {
|
||||
_state = 13;
|
||||
long diff = _context.clock().now() - beforeUpdate;
|
||||
|
||||
long lag = doStart - origStartAfter;
|
||||
if (lag < 0) lag = 0;
|
||||
|
||||
_context.statManager().addRateData("jobQueue.jobRunnerInactive", betweenJobs, betweenJobs);
|
||||
_context.statManager().addRateData("jobQueue.jobRun", duration, duration);
|
||||
_context.statManager().addRateData("jobQueue.jobLag", doStart - origStartAfter, 0);
|
||||
_context.statManager().addRateData("jobQueue.jobLag", lag, 0);
|
||||
_context.statManager().addRateData("jobQueue.jobWait", enqueuedTime, enqueuedTime);
|
||||
|
||||
if (duration > 1000) {
|
||||
|
@ -57,6 +57,8 @@ public abstract class NetworkDatabaseFacade implements Service {
|
||||
public abstract void publish(LeaseSet localLeaseSet);
|
||||
public abstract void unpublish(LeaseSet localLeaseSet);
|
||||
public abstract void fail(Hash dbEntry);
|
||||
|
||||
public int getKnownRouters() { return 0; }
|
||||
}
|
||||
|
||||
|
||||
|
@ -15,9 +15,9 @@ import net.i2p.CoreVersion;
|
||||
*
|
||||
*/
|
||||
public class RouterVersion {
|
||||
public final static String ID = "$Revision: 1.144 $ $Date: 2005/02/20 04:12:46 $";
|
||||
public final static String ID = "$Revision: 1.145 $ $Date: 2005/02/21 13:02:16 $";
|
||||
public final static String VERSION = "0.5";
|
||||
public final static long BUILD = 3;
|
||||
public final static long BUILD = 4;
|
||||
public static void main(String args[]) {
|
||||
System.out.println("I2P Router version: " + VERSION);
|
||||
System.out.println("Router ID: " + RouterVersion.ID);
|
||||
|
@ -76,6 +76,9 @@ public class GarlicMessageReceiver {
|
||||
} else {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("CloveMessageParser failed to decrypt the message [" + message.getUniqueId()
|
||||
+ "]");
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("CloveMessageParser failed to decrypt the message [" + message.getUniqueId()
|
||||
+ "]", new Exception("Decrypt garlic failed"));
|
||||
_context.statManager().addRateData("crypto.garlic.decryptFail", 1, 0);
|
||||
_context.messageHistory().messageProcessingError(message.getUniqueId(),
|
||||
@ -105,6 +108,9 @@ public class GarlicMessageReceiver {
|
||||
String howLongAgo = DataHelper.formatDuration(_context.clock().now()-clove.getExpiration().getTime());
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Clove is NOT valid: id=" + clove.getCloveId()
|
||||
+ " expiration " + howLongAgo + " ago");
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Clove is NOT valid: id=" + clove.getCloveId()
|
||||
+ " expiration " + howLongAgo + " ago", new Exception("Invalid within..."));
|
||||
_context.messageHistory().messageProcessingError(clove.getCloveId(),
|
||||
clove.getData().getClass().getName(),
|
||||
|
@ -60,6 +60,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
private boolean _initialized;
|
||||
/** Clock independent time of when we started up */
|
||||
private long _started;
|
||||
private int _knownRouters;
|
||||
private StartExplorersJob _exploreJob;
|
||||
private HarvesterJob _harvestJob;
|
||||
/** when was the last time an exploration found something new? */
|
||||
@ -130,6 +131,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
_peerSelector = new PeerSelector(_context);
|
||||
_publishingLeaseSets = new HashSet(8);
|
||||
_lastExploreNew = 0;
|
||||
_knownRouters = 0;
|
||||
_activeRequests = new HashMap(8);
|
||||
_enforceNetId = DEFAULT_ENFORCE_NETID;
|
||||
}
|
||||
@ -359,6 +361,8 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
return rv;
|
||||
}
|
||||
|
||||
public int getKnownRouters() { return _knownRouters; }
|
||||
|
||||
public void lookupLeaseSet(Hash key, Job onFindJob, Job onFailedLookupJob, long timeoutMs) {
|
||||
if (!_initialized) return;
|
||||
LeaseSet ls = lookupLeaseSetLocally(key);
|
||||
@ -639,6 +643,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
+ routerInfo.getOptions().size() + " options on "
|
||||
+ new Date(routerInfo.getPublished()));
|
||||
|
||||
_knownRouters++;
|
||||
_ds.put(key, routerInfo);
|
||||
synchronized (_lastSent) {
|
||||
if (!_lastSent.containsKey(key))
|
||||
@ -699,6 +704,8 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
synchronized (_passiveSendKeys) {
|
||||
_passiveSendKeys.remove(dbEntry);
|
||||
}
|
||||
if (isRouterInfo)
|
||||
_knownRouters--;
|
||||
}
|
||||
|
||||
public void unpublish(LeaseSet localLeaseSet) {
|
||||
|
@ -76,6 +76,7 @@ public class RepublishLeaseSetJob extends JobImpl {
|
||||
public void runJob() {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("FAILED publishing of the leaseSet for " + _dest.toBase64());
|
||||
getContext().jobQueue().addJob(new RepublishLeaseSetJob(getContext(), _facade, _dest));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -17,10 +17,13 @@ class OutboundReceiver implements TunnelGateway.Receiver {
|
||||
private RouterContext _context;
|
||||
private Log _log;
|
||||
private TunnelCreatorConfig _config;
|
||||
private RouterInfo _nextHopCache;
|
||||
|
||||
public OutboundReceiver(RouterContext ctx, TunnelCreatorConfig cfg) {
|
||||
_context = ctx;
|
||||
_log = ctx.logManager().getLog(OutboundReceiver.class);
|
||||
_config = cfg;
|
||||
_nextHopCache = _context.netDb().lookupRouterInfoLocally(_config.getPeer(1));
|
||||
}
|
||||
|
||||
public void receiveEncrypted(byte encrypted[]) {
|
||||
@ -30,8 +33,11 @@ class OutboundReceiver implements TunnelGateway.Receiver {
|
||||
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("received encrypted, sending out " + _config + ": " + msg);
|
||||
RouterInfo ri = _context.netDb().lookupRouterInfoLocally(_config.getPeer(1));
|
||||
RouterInfo ri = _nextHopCache;
|
||||
if (ri == null)
|
||||
ri = _context.netDb().lookupRouterInfoLocally(_config.getPeer(1));
|
||||
if (ri != null) {
|
||||
_nextHopCache = ri;
|
||||
send(msg, ri);
|
||||
} else {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
@ -65,8 +71,10 @@ class OutboundReceiver implements TunnelGateway.Receiver {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("lookup of " + _config.getPeer(1).toBase64().substring(0,4)
|
||||
+ " successful? " + (ri != null));
|
||||
if (ri != null)
|
||||
if (ri != null) {
|
||||
_nextHopCache = ri;
|
||||
send(_msg, ri);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -38,6 +38,7 @@ public class TunnelDispatcher implements Service {
|
||||
/** what is the date/time on which the last non-locally-created tunnel expires? */
|
||||
private long _lastParticipatingExpiration;
|
||||
private BloomFilterIVValidator _validator;
|
||||
private LeaveTunnel _leaveJob;
|
||||
|
||||
/** Creates a new instance of TunnelDispatcher */
|
||||
public TunnelDispatcher(RouterContext ctx) {
|
||||
@ -50,6 +51,7 @@ public class TunnelDispatcher implements Service {
|
||||
_participatingConfig = new HashMap();
|
||||
_lastParticipatingExpiration = 0;
|
||||
_validator = null;
|
||||
_leaveJob = new LeaveTunnel(ctx);
|
||||
ctx.statManager().createRateStat("tunnel.participatingTunnels",
|
||||
"How many tunnels are we participating in?", "Tunnels",
|
||||
new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
@ -176,7 +178,7 @@ public class TunnelDispatcher implements Service {
|
||||
_context.statManager().addRateData("tunnel.joinParticipant", 1, 0);
|
||||
if (cfg.getExpiration() > _lastParticipatingExpiration)
|
||||
_lastParticipatingExpiration = cfg.getExpiration();
|
||||
_context.jobQueue().addJob(new LeaveTunnel(_context, cfg));
|
||||
_leaveJob.add(cfg);
|
||||
}
|
||||
/**
|
||||
* We are the outbound endpoint in this tunnel, and did not create it
|
||||
@ -200,7 +202,7 @@ public class TunnelDispatcher implements Service {
|
||||
|
||||
if (cfg.getExpiration() > _lastParticipatingExpiration)
|
||||
_lastParticipatingExpiration = cfg.getExpiration();
|
||||
_context.jobQueue().addJob(new LeaveTunnel(_context, cfg));
|
||||
_leaveJob.add(cfg);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -228,7 +230,7 @@ public class TunnelDispatcher implements Service {
|
||||
|
||||
if (cfg.getExpiration() > _lastParticipatingExpiration)
|
||||
_lastParticipatingExpiration = cfg.getExpiration();
|
||||
_context.jobQueue().addJob(new LeaveTunnel(_context, cfg));
|
||||
_leaveJob.add(cfg);
|
||||
}
|
||||
|
||||
public int getParticipatingCount() {
|
||||
@ -336,10 +338,11 @@ public class TunnelDispatcher implements Service {
|
||||
_context.statManager().addRateData("tunnel.dispatchEndpoint", 1, 0);
|
||||
} else {
|
||||
_context.messageHistory().droppedTunnelDataMessageUnknown(msg.getUniqueId(), msg.getTunnelId().getTunnelId());
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("no matching participant/endpoint for id=" + msg.getTunnelId().getTunnelId()
|
||||
+ ": existing = " + _participants.keySet()
|
||||
+ " / " + _outboundEndpoints.keySet());
|
||||
int level = (_context.router().getUptime() > 10*60*1000 ? Log.ERROR : Log.WARN);
|
||||
if (_log.shouldLog(level))
|
||||
_log.log(level, "no matching participant/endpoint for id=" + msg.getTunnelId().getTunnelId()
|
||||
+ " expiring in " + DataHelper.formatDuration(msg.getMessageExpiration()-_context.clock().now())
|
||||
+ ": existing = " + _participants.size() + " / " + _outboundEndpoints.size());
|
||||
}
|
||||
}
|
||||
|
||||
@ -374,8 +377,9 @@ public class TunnelDispatcher implements Service {
|
||||
_context.statManager().addRateData("tunnel.dispatchInbound", 1, 0);
|
||||
} else {
|
||||
_context.messageHistory().droppedTunnelGatewayMessageUnknown(msg.getUniqueId(), msg.getTunnelId().getTunnelId());
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("no matching tunnel for id=" + msg.getTunnelId().getTunnelId()
|
||||
int level = (_context.router().getUptime() > 10*60*1000 ? Log.ERROR : Log.WARN);
|
||||
if (_log.shouldLog(level))
|
||||
_log.log(level, "no matching tunnel for id=" + msg.getTunnelId().getTunnelId()
|
||||
+ ": gateway message expiring in "
|
||||
+ DataHelper.formatDuration(msg.getMessageExpiration()-_context.clock().now())
|
||||
+ "/"
|
||||
@ -383,7 +387,7 @@ public class TunnelDispatcher implements Service {
|
||||
+ " messageId " + msg.getUniqueId()
|
||||
+ "/" + msg.getMessage().getUniqueId()
|
||||
+ " messageType: " + msg.getMessage().getClass().getName()
|
||||
+ " existing = " + _inboundGateways.keySet());
|
||||
+ " existing = " + _inboundGateways.size());
|
||||
}
|
||||
|
||||
long dispatchTime = _context.clock().now() - before;
|
||||
@ -423,7 +427,7 @@ public class TunnelDispatcher implements Service {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("dispatch outbound through " + outboundTunnel.getTunnelId()
|
||||
+ ": " + msg);
|
||||
if (msg.getMessageExpiration() < before) {
|
||||
if (msg.getMessageExpiration() < before - Router.CLOCK_FUDGE_FACTOR) {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("why are you sending a tunnel message that expired "
|
||||
+ (before-msg.getMessageExpiration()) + "ms ago? "
|
||||
@ -438,9 +442,10 @@ public class TunnelDispatcher implements Service {
|
||||
} else {
|
||||
_context.messageHistory().droppedTunnelGatewayMessageUnknown(msg.getUniqueId(), outboundTunnel.getTunnelId());
|
||||
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("no matching outbound tunnel for id=" + outboundTunnel
|
||||
+ ": existing = " + _outboundGateways.keySet());
|
||||
int level = (_context.router().getUptime() > 10*60*1000 ? Log.ERROR : Log.WARN);
|
||||
if (_log.shouldLog(level))
|
||||
_log.log(level, "no matching outbound tunnel for id=" + outboundTunnel
|
||||
+ ": existing = " + _outboundGateways.size());
|
||||
}
|
||||
|
||||
long dispatchTime = _context.clock().now() - before;
|
||||
@ -473,16 +478,59 @@ public class TunnelDispatcher implements Service {
|
||||
public void renderStatusHTML(Writer out) throws IOException {}
|
||||
|
||||
private class LeaveTunnel extends JobImpl {
|
||||
private HopConfig _config;
|
||||
private List _configs;
|
||||
private List _times;
|
||||
|
||||
public LeaveTunnel(RouterContext ctx, HopConfig config) {
|
||||
public LeaveTunnel(RouterContext ctx) {
|
||||
super(ctx);
|
||||
_config = config;
|
||||
getTiming().setStartAfter(config.getExpiration() + 2*Router.CLOCK_FUDGE_FACTOR);
|
||||
_configs = new ArrayList(128);
|
||||
_times = new ArrayList(128);
|
||||
}
|
||||
|
||||
public void add(HopConfig cfg) {
|
||||
Long dropTime = new Long(cfg.getExpiration() + 2*Router.CLOCK_FUDGE_FACTOR);
|
||||
synchronized (LeaveTunnel.this) {
|
||||
_configs.add(cfg);
|
||||
_times.add(dropTime);
|
||||
}
|
||||
|
||||
long oldAfter = getTiming().getStartAfter();
|
||||
if (oldAfter < getContext().clock().now()) {
|
||||
getTiming().setStartAfter(dropTime.longValue());
|
||||
getContext().jobQueue().addJob(LeaveTunnel.this);
|
||||
} else if (oldAfter >= dropTime.longValue()) {
|
||||
getTiming().setStartAfter(dropTime.longValue());
|
||||
} else {
|
||||
// already scheduled for the future, and before this expiration
|
||||
}
|
||||
}
|
||||
|
||||
public String getName() { return "Leave participant"; }
|
||||
public void runJob() {
|
||||
remove(_config);
|
||||
HopConfig cur = null;
|
||||
Long nextTime = null;
|
||||
long now = getContext().clock().now();
|
||||
synchronized (LeaveTunnel.this) {
|
||||
if (_configs.size() <= 0)
|
||||
return;
|
||||
nextTime = (Long)_times.get(0);
|
||||
if (nextTime.longValue() <= now) {
|
||||
cur = (HopConfig)_configs.remove(0);
|
||||
_times.remove(0);
|
||||
if (_times.size() > 0)
|
||||
nextTime = (Long)_times.get(0);
|
||||
else
|
||||
nextTime = null;
|
||||
}
|
||||
}
|
||||
|
||||
if (cur != null)
|
||||
remove(cur);
|
||||
|
||||
if (nextTime != null) {
|
||||
getTiming().setStartAfter(nextTime.longValue());
|
||||
getContext().jobQueue().addJob(LeaveTunnel.this);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user