2006-04-05 jrandom
* Cut down on the time that we allow a tunnel creation request to sit by without response, and reject tunnel creation requests that are lagged locally. Also switch to a bounded FIFO instead of a LIFO * Threading tweaks for the message handling (thanks bar!) * Don't add addresses to syndie with blank names (thanks Complication!) * Further ban clearance
This commit is contained in:
@ -28,6 +28,7 @@ import net.i2p.util.Log;
|
||||
public class Archive {
|
||||
private I2PAppContext _context;
|
||||
private Log _log;
|
||||
private BlogManager _mgr;
|
||||
private File _rootDir;
|
||||
private File _cacheDir;
|
||||
private Map _blogInfo;
|
||||
@ -43,9 +44,10 @@ public class Archive {
|
||||
public boolean accept(File dir, String name) { return name.endsWith(".snd"); }
|
||||
};
|
||||
|
||||
public Archive(I2PAppContext ctx, String rootDir, String cacheDir) {
|
||||
public Archive(I2PAppContext ctx, String rootDir, String cacheDir, BlogManager mgr) {
|
||||
_context = ctx;
|
||||
_log = ctx.logManager().getLog(Archive.class);
|
||||
_mgr = mgr;
|
||||
_rootDir = new File(rootDir);
|
||||
if (!_rootDir.exists())
|
||||
_rootDir.mkdirs();
|
||||
@ -72,6 +74,13 @@ public class Archive {
|
||||
try {
|
||||
fi = new FileInputStream(meta);
|
||||
bi.load(fi);
|
||||
if (_mgr.isBanned(bi.getKey().calculateHash())) {
|
||||
fi.close();
|
||||
fi = null;
|
||||
_log.error("Deleting banned blog " + bi.getKey().calculateHash().toBase64());
|
||||
delete(bi.getKey().calculateHash());
|
||||
continue;
|
||||
}
|
||||
if (bi.verify(_context)) {
|
||||
info.add(bi);
|
||||
} else {
|
||||
@ -120,6 +129,12 @@ public class Archive {
|
||||
_log.warn("Not storing invalid blog " + info);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (_mgr.isBanned(info.getKey().calculateHash())) {
|
||||
_log.error("Not storing banned blog " + info.getKey().calculateHash().toBase64(), new Exception("Stored by"));
|
||||
return false;
|
||||
}
|
||||
|
||||
boolean isNew = true;
|
||||
synchronized (_blogInfo) {
|
||||
BlogInfo old = (BlogInfo)_blogInfo.get(info.getKey().calculateHash());
|
||||
@ -274,8 +289,9 @@ public class Archive {
|
||||
if (blogKey == null) {
|
||||
// no key, cache.
|
||||
File entryDir = getEntryDir(entries[i]);
|
||||
if (entryDir.exists())
|
||||
if (entryDir.exists()) {
|
||||
entry = getCachedEntry(entryDir);
|
||||
}
|
||||
if ((entry == null) || !entryDir.exists()) {
|
||||
if (!extractEntry(entries[i], entryDir, info)) {
|
||||
_log.error("Entry " + entries[i].getPath() + " is not valid");
|
||||
|
@ -74,7 +74,7 @@ public class BlogManager {
|
||||
_cacheDir.mkdirs();
|
||||
_userDir.mkdirs();
|
||||
_tempDir.mkdirs();
|
||||
_archive = new Archive(ctx, _archiveDir.getAbsolutePath(), _cacheDir.getAbsolutePath());
|
||||
_archive = new Archive(ctx, _archiveDir.getAbsolutePath(), _cacheDir.getAbsolutePath(), this);
|
||||
if (regenIndex)
|
||||
_archive.regenerateIndex();
|
||||
}
|
||||
|
@ -295,7 +295,7 @@ public abstract class BaseServlet extends HttpServlet {
|
||||
|
||||
if (AddressesServlet.ACTION_ADD_TAG.equals(action)) {
|
||||
String name = req.getParameter(AddressesServlet.PARAM_NAME);
|
||||
if (!user.getPetNameDB().containsName(name)) {
|
||||
if ((name != null) && (name.trim().length() > 0) && (!user.getPetNameDB().containsName(name)) ) {
|
||||
PetName pn = new PetName(name, AddressesServlet.NET_SYNDIE, AddressesServlet.PROTO_TAG, name);
|
||||
user.getPetNameDB().add(pn);
|
||||
BlogManager.instance().saveUser(user);
|
||||
@ -307,7 +307,7 @@ public abstract class BaseServlet extends HttpServlet {
|
||||
(AddressesServlet.ACTION_ADD_OTHER.equals(action)) ||
|
||||
(AddressesServlet.ACTION_ADD_PEER.equals(action)) ) {
|
||||
PetName pn = buildNewAddress(req);
|
||||
if ( (pn != null) && (pn.getName() != null) && (pn.getLocation() != null) &&
|
||||
if ( (pn != null) && (pn.getName() != null) && (pn.getName().trim().length() > 0) && (pn.getLocation() != null) &&
|
||||
(!user.getPetNameDB().containsName(pn.getName())) ) {
|
||||
user.getPetNameDB().add(pn);
|
||||
BlogManager.instance().saveUser(user);
|
||||
@ -744,6 +744,8 @@ public abstract class BaseServlet extends HttpServlet {
|
||||
for (Iterator iter = names.iterator(); iter.hasNext(); ) {
|
||||
String name = (String) iter.next();
|
||||
PetName pn = db.getByName(name);
|
||||
if (pn == null)
|
||||
continue;
|
||||
String proto = pn.getProtocol();
|
||||
String loc = pn.getLocation();
|
||||
if (proto != null && loc != null && "syndieblog".equals(proto) && pn.isMember(FilteredThreadIndex.GROUP_FAVORITE)) {
|
||||
|
10
history.txt
10
history.txt
@ -1,4 +1,12 @@
|
||||
$Id: history.txt,v 1.443 2006/04/03 05:07:24 jrandom Exp $
|
||||
$Id: history.txt,v 1.444 2006/04/04 07:20:39 jrandom Exp $
|
||||
|
||||
2006-04-05 jrandom
|
||||
* Cut down on the time that we allow a tunnel creation request to sit by
|
||||
without response, and reject tunnel creation requests that are lagged
|
||||
locally. Also switch to a bounded FIFO instead of a LIFO
|
||||
* Threading tweaks for the message handling (thanks bar!)
|
||||
* Don't add addresses to syndie with blank names (thanks Complication!)
|
||||
* Further ban clearance
|
||||
|
||||
2006-04-05 jrandom
|
||||
* Fix during the ssu handshake to avoid an unnecessary failure on
|
||||
|
@ -15,9 +15,9 @@ import net.i2p.CoreVersion;
|
||||
*
|
||||
*/
|
||||
public class RouterVersion {
|
||||
public final static String ID = "$Revision: 1.383 $ $Date: 2006/04/01 14:08:41 $";
|
||||
public final static String ID = "$Revision: 1.384 $ $Date: 2006/04/03 05:07:25 $";
|
||||
public final static String VERSION = "0.6.1.13";
|
||||
public final static long BUILD = 3;
|
||||
public final static long BUILD = 5;
|
||||
public static void main(String args[]) {
|
||||
System.out.println("I2P Router version: " + VERSION + "-" + BUILD);
|
||||
System.out.println("Router ID: " + RouterVersion.ID);
|
||||
|
@ -101,8 +101,7 @@ public class OutboundMessageRegistry {
|
||||
} else if (o instanceof List) {
|
||||
msgs = (List)o;
|
||||
if (msgs != null)
|
||||
for (int j = 0; j < msgs.size(); j++)
|
||||
rv.add(msgs.get(j));
|
||||
rv.addAll(msgs);
|
||||
}
|
||||
}
|
||||
if (removed) {
|
||||
@ -152,7 +151,7 @@ public class OutboundMessageRegistry {
|
||||
if (oldMsg != null) {
|
||||
List multi = null;
|
||||
if (oldMsg instanceof OutNetMessage) {
|
||||
multi = new ArrayList(4);
|
||||
multi = Collections.synchronizedList(new ArrayList(4));
|
||||
multi.add(oldMsg);
|
||||
multi.add(msg);
|
||||
_selectorToMessage.put(sel, multi);
|
||||
@ -226,7 +225,7 @@ public class OutboundMessageRegistry {
|
||||
if (o instanceof OutNetMessage)
|
||||
msg = (OutNetMessage)o;
|
||||
else if (o instanceof List)
|
||||
msgs = (List)o;
|
||||
msgs = new ArrayList((List)o);
|
||||
}
|
||||
if (msg != null) {
|
||||
synchronized (_activeMessages) {
|
||||
|
@ -44,6 +44,8 @@ class BuildHandler {
|
||||
_context.statManager().createRateStat("tunnel.rejectOverloaded", "How long we had to wait before processing the request (when it was rejected)", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.acceptLoad", "Delay before processing the accepted request", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.dropLoad", "How long we had to wait before finally giving up on an inbound request (period is queue count)?", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.dropLoadDelay", "How long we had to wait before finally giving up on an inbound request?", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.dropLoadBacklog", "How many requests were pending when they were so lagged that we had to drop a new inbound request??", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
_context.statManager().createRateStat("tunnel.handleRemaining", "How many pending inbound requests were left on the queue after one pass?", "Tunnels", new long[] { 60*1000, 10*60*1000 });
|
||||
|
||||
_context.statManager().createRateStat("tunnel.receiveRejectionProbabalistic", "How often we are rejected probabalistically?", "Tunnels", new long[] { 10*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
@ -66,6 +68,7 @@ class BuildHandler {
|
||||
* there are remaining requeusts we skipped over
|
||||
*/
|
||||
boolean handleInboundRequests() {
|
||||
int dropExpired = 0;
|
||||
List handled = null;
|
||||
synchronized (_inboundBuildMessages) {
|
||||
int toHandle = _inboundBuildMessages.size();
|
||||
@ -73,8 +76,31 @@ class BuildHandler {
|
||||
if (toHandle > MAX_HANDLE_AT_ONCE)
|
||||
toHandle = MAX_HANDLE_AT_ONCE;
|
||||
handled = new ArrayList(toHandle);
|
||||
for (int i = 0; i < toHandle; i++) // LIFO for lower response time (should we RED it for DoS?)
|
||||
handled.add(_inboundBuildMessages.remove(_inboundBuildMessages.size()-1));
|
||||
if (false) {
|
||||
for (int i = 0; i < toHandle; i++) // LIFO for lower response time (should we RED it for DoS?)
|
||||
handled.add(_inboundBuildMessages.remove(_inboundBuildMessages.size()-1));
|
||||
} else {
|
||||
// drop any expired messages
|
||||
long dropBefore = System.currentTimeMillis() - BuildRequestor.REQUEST_TIMEOUT;
|
||||
do {
|
||||
BuildMessageState state = (BuildMessageState)_inboundBuildMessages.get(0);
|
||||
if (state.recvTime <= dropBefore) {
|
||||
_inboundBuildMessages.remove(0);
|
||||
dropExpired++;
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Not even trying to handle/decrypt the request " + state.msg.getUniqueId()
|
||||
+ ", since we received it a long time ago: " + (System.currentTimeMillis() - state.recvTime));
|
||||
_context.statManager().addRateData("tunnel.dropLoadDelay", System.currentTimeMillis() - state.recvTime, 0);
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
} while (_inboundBuildMessages.size() > 0);
|
||||
|
||||
// now pull off the oldest requests first (we're doing a tail-drop
|
||||
// when adding)
|
||||
for (int i = 0; i < toHandle; i++)
|
||||
handled.add(_inboundBuildMessages.remove(0));
|
||||
}
|
||||
}
|
||||
}
|
||||
if (handled != null) {
|
||||
@ -246,11 +272,12 @@ class BuildHandler {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug(state.msg.getUniqueId() + ": handling request after " + timeSinceReceived);
|
||||
|
||||
if (timeSinceReceived > BuildRequestor.REQUEST_TIMEOUT*2) {
|
||||
if (timeSinceReceived > BuildRequestor.REQUEST_TIMEOUT) {
|
||||
// don't even bother, since we are so overloaded locally
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Not even trying to handle/decrypt the request " + state.msg.getUniqueId()
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Not even trying to handle/decrypt the request " + state.msg.getUniqueId()
|
||||
+ ", since we received it a long time ago: " + timeSinceReceived);
|
||||
_context.statManager().addRateData("tunnel.dropLoadDelay", timeSinceReceived, 0);
|
||||
return;
|
||||
}
|
||||
// ok, this is not our own tunnel, so we need to do some heavy lifting
|
||||
@ -358,7 +385,7 @@ class BuildHandler {
|
||||
// response = TunnelHistory.TUNNEL_REJECT_PROBABALISTIC_REJECT;
|
||||
|
||||
long recvDelay = System.currentTimeMillis()-state.recvTime;
|
||||
if ( (response == 0) && (recvDelay > BuildRequestor.REQUEST_TIMEOUT) ) {
|
||||
if ( (response == 0) && (recvDelay > BuildRequestor.REQUEST_TIMEOUT/2) ) {
|
||||
_context.statManager().addRateData("tunnel.rejectOverloaded", recvDelay, recvDelay);
|
||||
response = TunnelHistory.TUNNEL_REJECT_TRANSIENT_OVERLOAD;
|
||||
} else if (response == 0) {
|
||||
@ -523,17 +550,24 @@ class BuildHandler {
|
||||
} else {
|
||||
synchronized (_inboundBuildMessages) {
|
||||
boolean removed = false;
|
||||
int dropped = 0;
|
||||
while (_inboundBuildMessages.size() > 0) {
|
||||
BuildMessageState cur = (BuildMessageState)_inboundBuildMessages.get(0);
|
||||
BuildMessageState cur = (BuildMessageState)_inboundBuildMessages.get(_inboundBuildMessages.size()-1);
|
||||
long age = System.currentTimeMillis() - cur.recvTime;
|
||||
if (age >= BuildRequestor.REQUEST_TIMEOUT) {
|
||||
_inboundBuildMessages.remove(0);
|
||||
_inboundBuildMessages.remove(_inboundBuildMessages.size()-1);
|
||||
dropped++;
|
||||
_context.statManager().addRateData("tunnel.dropLoad", age, _inboundBuildMessages.size());
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
_inboundBuildMessages.add(new BuildMessageState(receivedMessage, from, fromHash));
|
||||
if (dropped > 0) {
|
||||
// if the queue is backlogged, stop adding new messages
|
||||
_context.statManager().addRateData("tunnel.dropLoadBacklog", _inboundBuildMessages.size(), _inboundBuildMessages.size());
|
||||
} else {
|
||||
_inboundBuildMessages.add(new BuildMessageState(receivedMessage, from, fromHash));
|
||||
}
|
||||
}
|
||||
_exec.repoll();
|
||||
}
|
||||
|
Reference in New Issue
Block a user