DatabaseLookupMessageHandler:

added stat - netDb.lookupsReceived
 fixed formatting
HandleDatabaseLookupMessage:
 added stat - netDb.lookupsHandled
 added stat - netDb.lookupsMatched
 fixed formatting
HandleDatabaseStoreMessage:
 added stat - netDb.storeHandled
 fixed formatting
StoreJob:
 added stat - netDb.storeSent
 fixed formatting
 removed old unused code (we do dbStore through tunnels, not garlics)
 logging
SearchJob:
 fixed formatting
 logging
HandleTunnelCreateMessageJob:
 fixed formatting
 logging
PoolingTunnelManagerFacade:
 added stat - tunnel.participatingTunnels
 fixed formatting
 logging
TunnelPool:
 added getParticipatingTunnelCount()
 fixed formatting
 logging
StatisticsManager:
 revamped whats published
 fixed formatting
 logging
 fixed formatting
This commit is contained in:
jrandom
2004-04-16 23:52:11 +00:00
committed by zzz
parent 58c145ba08
commit 86759d2f9c
9 changed files with 1531 additions and 1568 deletions

View File

@ -37,100 +37,134 @@ public class StatisticsManager implements Service {
public final static int DEFAULT_MAX_PUBLISHED_PEERS = 20; public final static int DEFAULT_MAX_PUBLISHED_PEERS = 20;
public StatisticsManager() { public StatisticsManager() {
_includePeerRankings = false; _includePeerRankings = false;
} }
public void shutdown() {} public void shutdown() {}
public void startup() { public void startup() {
String val = Router.getInstance().getConfigSetting(PROP_PUBLISH_RANKINGS); String val = Router.getInstance().getConfigSetting(PROP_PUBLISH_RANKINGS);
try { try {
if (val == null) { if (val == null) {
_log.info("Peer publishing setting " + PROP_PUBLISH_RANKINGS + " not set - using default " + DEFAULT_PROP_PUBLISH_RANKINGS); if (_log.shouldLog(Log.INFO))
val = DEFAULT_PROP_PUBLISH_RANKINGS; _log.info("Peer publishing setting " + PROP_PUBLISH_RANKINGS
} else { + " not set - using default " + DEFAULT_PROP_PUBLISH_RANKINGS);
_log.info("Peer publishing setting " + PROP_PUBLISH_RANKINGS + " set to " + val); val = DEFAULT_PROP_PUBLISH_RANKINGS;
} } else {
boolean v = Boolean.TRUE.toString().equalsIgnoreCase(val); if (_log.shouldLog(Log.INFO))
_includePeerRankings = v; _log.info("Peer publishing setting " + PROP_PUBLISH_RANKINGS
_log.debug("Setting includePeerRankings = " + v); + " set to " + val);
} catch (Throwable t) { }
_log.error("Error determining whether to publish rankings [" + PROP_PUBLISH_RANKINGS + "=" + val + "], so we're defaulting to FALSE"); boolean v = Boolean.TRUE.toString().equalsIgnoreCase(val);
_includePeerRankings = false; _includePeerRankings = v;
} if (_log.shouldLog(Log.DEBUG))
val = Router.getInstance().getConfigSetting(PROP_MAX_PUBLISHED_PEERS); _log.debug("Setting includePeerRankings = " + v);
if (val == null) { } catch (Throwable t) {
_publishedStats = DEFAULT_MAX_PUBLISHED_PEERS; if (_log.shouldLog(Log.ERROR))
} else { _log.error("Error determining whether to publish rankings ["
try { + PROP_PUBLISH_RANKINGS + "=" + val
int num = Integer.parseInt(val); + "], so we're defaulting to FALSE");
_publishedStats = num; _includePeerRankings = false;
} catch (NumberFormatException nfe) { }
_log.error("Invalid max number of peers to publish [" + val + "], defaulting to " + DEFAULT_MAX_PUBLISHED_PEERS, nfe); val = Router.getInstance().getConfigSetting(PROP_MAX_PUBLISHED_PEERS);
_publishedStats = DEFAULT_MAX_PUBLISHED_PEERS; if (val == null) {
} _publishedStats = DEFAULT_MAX_PUBLISHED_PEERS;
} } else {
} try {
int num = Integer.parseInt(val);
_publishedStats = num;
} catch (NumberFormatException nfe) {
if (_log.shouldLog(Log.ERROR))
_log.error("Invalid max number of peers to publish [" + val
+ "], defaulting to " + DEFAULT_MAX_PUBLISHED_PEERS, nfe);
_publishedStats = DEFAULT_MAX_PUBLISHED_PEERS;
}
}
}
/** Retrieve a snapshot of the statistics that should be published */ /** Retrieve a snapshot of the statistics that should be published */
public Properties publishStatistics() { public Properties publishStatistics() {
Properties stats = new Properties(); Properties stats = new Properties();
stats.setProperty("router.version", RouterVersion.VERSION); stats.setProperty("router.version", RouterVersion.VERSION);
stats.setProperty("router.id", RouterVersion.ID); stats.setProperty("router.id", RouterVersion.ID);
stats.setProperty("coreVersion", CoreVersion.VERSION); stats.setProperty("coreVersion", CoreVersion.VERSION);
stats.setProperty("core.id", CoreVersion.ID); stats.setProperty("core.id", CoreVersion.ID);
if (_includePeerRankings) { if (_includePeerRankings) {
stats.putAll(ProfileManager.getInstance().summarizePeers(_publishedStats)); stats.putAll(ProfileManager.getInstance().summarizePeers(_publishedStats));
includeRate("transport.sendProcessingTime", stats); includeRate("transport.sendProcessingTime", stats, new long[] { 60*1000, 60*60*1000 });
includeRate("tcp.queueSize", stats); //includeRate("tcp.queueSize", stats);
includeRate("jobQueue.jobLag", stats); includeRate("jobQueue.jobLag", stats, new long[] { 60*1000, 60*60*1000 });
includeRate("jobQueue.jobRun", stats); includeRate("jobQueue.jobRun", stats, new long[] { 60*1000, 60*60*1000 });
includeRate("crypto.elGamal.encrypt", stats); includeRate("crypto.elGamal.encrypt", stats, new long[] { 60*1000, 60*60*1000 });
includeRate("jobQueue.readyJobs", stats); includeRate("jobQueue.readyJobs", stats, new long[] { 60*1000, 60*60*1000 });
includeRate("jobQueue.droppedJobs", stats); includeRate("jobQueue.droppedJobs", stats, new long[] { 60*60*1000, 24*60*60*1000 });
stats.setProperty("stat_uptime", DataHelper.formatDuration(Router.getInstance().getUptime())); includeRate("inNetPool.dropped", stats, new long[] { 60*60*1000, 24*60*60*1000 });
stats.setProperty("stat__rateKey", "avg;maxAvg;pctLifetime;[sat;satLim;maxSat;maxSatLim;][num;lifetimeFreq;maxFreq]"); includeRate("tunnel.participatingTunnels", stats, new long[] { 5*60*1000, 60*60*1000 });
_log.debug("Publishing peer rankings"); includeRate("netDb.lookupsReceived", stats, new long[] { 5*60*1000, 60*60*1000 });
} else { includeRate("netDb.lookupsHandled", stats, new long[] { 5*60*1000, 60*60*1000 });
_log.debug("Not publishing peer rankings"); includeRate("netDb.lookupsMatched", stats, new long[] { 5*60*1000, 60*60*1000 });
} includeRate("netDb.storeSent", stats, new long[] { 5*60*1000, 60*60*1000 });
includeRate("netDb.successPeers", stats, new long[] { 60*60*1000 });
includeRate("transport.receiveMessageSize", stats, new long[] { 5*60*1000, 60*60*1000 });
includeRate("transport.sendMessageSize", stats, new long[] { 5*60*1000, 60*60*1000 });
stats.setProperty("stat_uptime", DataHelper.formatDuration(Router.getInstance().getUptime()));
stats.setProperty("stat__rateKey", "avg;maxAvg;pctLifetime;[sat;satLim;maxSat;maxSatLim;][num;lifetimeFreq;maxFreq]");
_log.debug("Publishing peer rankings");
} else {
_log.debug("Not publishing peer rankings");
}
if (_log.shouldLog(Log.DEBUG)) if (_log.shouldLog(Log.DEBUG))
_log.debug("Building status: " + stats); _log.debug("Building status: " + stats);
return stats; return stats;
} }
private void includeRate(String rateName, Properties stats) { private void includeRate(String rateName, Properties stats) {
RateStat rate = StatManager.getInstance().getRate(rateName); includeRate(rateName, stats, null);
if (rate == null) return; }
for (int i = 0; i < rate.getPeriods().length; i++) { private void includeRate(String rateName, Properties stats, long selectedPeriods[]) {
Rate curRate = rate.getRate(rate.getPeriods()[i]); RateStat rate = StatManager.getInstance().getRate(rateName);
if (curRate == null) continue; if (rate == null) return;
stats.setProperty("stat_" + rateName + '.' + getPeriod(curRate), renderRate(curRate)); long periods[] = rate.getPeriods();
} for (int i = 0; i < periods.length; i++) {
if (selectedPeriods != null) {
boolean found = false;
for (int j = 0; j < selectedPeriods.length; j++) {
if (selectedPeriods[j] == periods[i]) {
found = true;
break;
}
}
if (!found) continue;
}
Rate curRate = rate.getRate(periods[i]);
if (curRate == null) continue;
stats.setProperty("stat_" + rateName + '.' + getPeriod(curRate), renderRate(curRate));
}
} }
private static String renderRate(Rate rate) { private static String renderRate(Rate rate) {
StringBuffer buf = new StringBuffer(255); StringBuffer buf = new StringBuffer(255);
buf.append(num(rate.getAverageValue())).append(';'); buf.append(num(rate.getAverageValue())).append(';');
buf.append(num(rate.getExtremeAverageValue())).append(';'); buf.append(num(rate.getExtremeAverageValue())).append(';');
buf.append(pct(rate.getPercentageOfLifetimeValue())).append(';'); buf.append(pct(rate.getPercentageOfLifetimeValue())).append(';');
if (rate.getLifetimeTotalEventTime() > 0) { if (rate.getLifetimeTotalEventTime() > 0) {
buf.append(pct(rate.getLastEventSaturation())).append(';'); buf.append(pct(rate.getLastEventSaturation())).append(';');
buf.append(num(rate.getLastSaturationLimit())).append(';'); buf.append(num(rate.getLastSaturationLimit())).append(';');
buf.append(pct(rate.getExtremeEventSaturation())).append(';'); buf.append(pct(rate.getExtremeEventSaturation())).append(';');
buf.append(num(rate.getExtremeSaturationLimit())).append(';'); buf.append(num(rate.getExtremeSaturationLimit())).append(';');
} }
buf.append(num(rate.getLastEventCount())).append(';'); buf.append(num(rate.getLastEventCount())).append(';');
long numPeriods = rate.getLifetimePeriods(); long numPeriods = rate.getLifetimePeriods();
if (numPeriods > 0) { if (numPeriods > 0) {
double avgFrequency = rate.getLifetimeEventCount() / (double)numPeriods; double avgFrequency = rate.getLifetimeEventCount() / (double)numPeriods;
double peakFrequency = rate.getExtremeEventCount(); double peakFrequency = rate.getExtremeEventCount();
buf.append(num(avgFrequency)).append(';'); buf.append(num(avgFrequency)).append(';');
buf.append(num(rate.getExtremeEventCount())).append(';'); buf.append(num(rate.getExtremeEventCount())).append(';');
} }
return buf.toString(); return buf.toString();
} }
private static String getPeriod(Rate rate) { return DataHelper.formatDuration(rate.getPeriod()); } private static String getPeriod(Rate rate) { return DataHelper.formatDuration(rate.getPeriod()); }
@ -142,6 +176,5 @@ public class StatisticsManager implements Service {
private final static DecimalFormat _pct = new DecimalFormat("#0.00%", new DecimalFormatSymbols(Locale.UK)); private final static DecimalFormat _pct = new DecimalFormat("#0.00%", new DecimalFormatSymbols(Locale.UK));
private final static String pct(double num) { synchronized (_pct) { return _pct.format(num); } } private final static String pct(double num) { synchronized (_pct) { return _pct.format(num); } }
public String renderStatusHTML() { return ""; } public String renderStatusHTML() { return ""; }
} }

View File

@ -15,14 +15,20 @@ import net.i2p.data.i2np.I2NPMessage;
import net.i2p.data.i2np.SourceRouteBlock; import net.i2p.data.i2np.SourceRouteBlock;
import net.i2p.router.HandlerJobBuilder; import net.i2p.router.HandlerJobBuilder;
import net.i2p.router.Job; import net.i2p.router.Job;
import net.i2p.stat.StatManager;
/** /**
* Build a HandleDatabaseLookupMessageJob whenever a DatabaseLookupMessage arrives * Build a HandleDatabaseLookupMessageJob whenever a DatabaseLookupMessage arrives
* *
*/ */
public class DatabaseLookupMessageHandler implements HandlerJobBuilder { public class DatabaseLookupMessageHandler implements HandlerJobBuilder {
static {
StatManager.getInstance().createRateStat("netDb.lookupsReceived", "How many netDb lookups have we received?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
}
public Job createJob(I2NPMessage receivedMessage, RouterIdentity from, Hash fromHash, SourceRouteBlock replyBlock) { public Job createJob(I2NPMessage receivedMessage, RouterIdentity from, Hash fromHash, SourceRouteBlock replyBlock) {
// ignore the reply block for the moment StatManager.getInstance().addRateData("netDb.lookupsReceived", 1, 0);
return new HandleDatabaseLookupMessageJob((DatabaseLookupMessage)receivedMessage, from, fromHash); // ignore the reply block for the moment
return new HandleDatabaseLookupMessageJob((DatabaseLookupMessage)receivedMessage, from, fromHash);
} }
} }

View File

@ -37,6 +37,7 @@ import net.i2p.router.message.SendMessageDirectJob;
import net.i2p.router.message.SendTunnelMessageJob; import net.i2p.router.message.SendTunnelMessageJob;
import net.i2p.util.Clock; import net.i2p.util.Clock;
import net.i2p.util.Log; import net.i2p.util.Log;
import net.i2p.stat.StatManager;
/** /**
* Handle a lookup for a key received from a remote peer. Needs to be implemented * Handle a lookup for a key received from a remote peer. Needs to be implemented
@ -51,138 +52,152 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
private final static int MAX_ROUTERS_RETURNED = 3; private final static int MAX_ROUTERS_RETURNED = 3;
private final static int REPLY_TIMEOUT = 60*1000; private final static int REPLY_TIMEOUT = 60*1000;
private final static int MESSAGE_PRIORITY = 300; private final static int MESSAGE_PRIORITY = 300;
static {
StatManager.getInstance().createRateStat("netDb.lookupsHandled", "How many netDb lookups have we handled?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
StatManager.getInstance().createRateStat("netDb.lookupsMatched", "How many netDb lookups did we have the data for?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
}
public HandleDatabaseLookupMessageJob(DatabaseLookupMessage receivedMessage, RouterIdentity from, Hash fromHash) { public HandleDatabaseLookupMessageJob(DatabaseLookupMessage receivedMessage, RouterIdentity from, Hash fromHash) {
_message = receivedMessage; _message = receivedMessage;
_from = from; _from = from;
_fromHash = fromHash; _fromHash = fromHash;
} }
public void runJob() { public void runJob() {
if (_log.shouldLog(Log.DEBUG)) if (_log.shouldLog(Log.DEBUG))
_log.debug("Handling database lookup message for " + _message.getSearchKey()); _log.debug("Handling database lookup message for " + _message.getSearchKey());
Hash fromKey = _message.getFrom().getIdentity().getHash(); Hash fromKey = _message.getFrom().getIdentity().getHash();
if (_message.getReplyTunnel() != null) { if (_message.getReplyTunnel() != null) {
if (_log.shouldLog(Log.INFO)) if (_log.shouldLog(Log.INFO))
_log.info("dbLookup received with replies going to " + fromKey + " (tunnel " + _message.getReplyTunnel() + ")"); _log.info("dbLookup received with replies going to " + fromKey
} + " (tunnel " + _message.getReplyTunnel() + ")");
}
NetworkDatabaseFacade.getInstance().store(fromKey, _message.getFrom()); NetworkDatabaseFacade.getInstance().store(fromKey, _message.getFrom());
LeaseSet ls = NetworkDatabaseFacade.getInstance().lookupLeaseSetLocally(_message.getSearchKey()); LeaseSet ls = NetworkDatabaseFacade.getInstance().lookupLeaseSetLocally(_message.getSearchKey());
if (ls != null) { if (ls != null) {
// send that lease set to the _message.getFromHash peer // send that lease set to the _message.getFromHash peer
if (_log.shouldLog(Log.DEBUG)) if (_log.shouldLog(Log.DEBUG))
_log.debug("We do have key " + _message.getSearchKey().toBase64() + " locally as a lease set. sending to " + fromKey.toBase64()); _log.debug("We do have key " + _message.getSearchKey().toBase64()
sendData(_message.getSearchKey(), ls, fromKey, _message.getReplyTunnel()); + " locally as a lease set. sending to " + fromKey.toBase64());
} else { sendData(_message.getSearchKey(), ls, fromKey, _message.getReplyTunnel());
RouterInfo info = NetworkDatabaseFacade.getInstance().lookupRouterInfoLocally(_message.getSearchKey()); } else {
if (info != null) { RouterInfo info = NetworkDatabaseFacade.getInstance().lookupRouterInfoLocally(_message.getSearchKey());
// send that routerInfo to the _message.getFromHash peer if (info != null) {
if (_log.shouldLog(Log.DEBUG)) // send that routerInfo to the _message.getFromHash peer
_log.debug("We do have key " + _message.getSearchKey().toBase64() + " locally as a router info. sending to " + fromKey.toBase64()); if (_log.shouldLog(Log.DEBUG))
sendData(_message.getSearchKey(), info, fromKey, _message.getReplyTunnel()); _log.debug("We do have key " + _message.getSearchKey().toBase64()
} else { + " locally as a router info. sending to " + fromKey.toBase64());
// not found locally - return closest peer routerInfo structs sendData(_message.getSearchKey(), info, fromKey, _message.getReplyTunnel());
Set routerInfoSet = NetworkDatabaseFacade.getInstance().findNearestRouters(_message.getSearchKey(), MAX_ROUTERS_RETURNED, _message.getDontIncludePeers()); } else {
if (_log.shouldLog(Log.DEBUG)) // not found locally - return closest peer routerInfo structs
_log.debug("We do not have key " + _message.getSearchKey().toBase64() + " locally. sending back " + routerInfoSet.size() + " peers to " + fromKey.toBase64()); Set routerInfoSet = NetworkDatabaseFacade.getInstance().findNearestRouters(_message.getSearchKey(),
sendClosest(_message.getSearchKey(), routerInfoSet, fromKey, _message.getReplyTunnel()); MAX_ROUTERS_RETURNED, _message.getDontIncludePeers());
} if (_log.shouldLog(Log.DEBUG))
} _log.debug("We do not have key " + _message.getSearchKey().toBase64() +
" locally. sending back " + routerInfoSet.size() + " peers to " + fromKey.toBase64());
sendClosest(_message.getSearchKey(), routerInfoSet, fromKey, _message.getReplyTunnel());
}
}
} }
private void sendData(Hash key, DataStructure data, Hash toPeer, TunnelId replyTunnel) { private void sendData(Hash key, DataStructure data, Hash toPeer, TunnelId replyTunnel) {
if (_log.shouldLog(Log.DEBUG)) if (_log.shouldLog(Log.DEBUG))
_log.debug("Sending data matching key key " + key.toBase64() + " to peer " + toPeer.toBase64() + " tunnel " + replyTunnel); _log.debug("Sending data matching key key " + key.toBase64() + " to peer " + toPeer.toBase64()
DatabaseStoreMessage msg = new DatabaseStoreMessage(); + " tunnel " + replyTunnel);
msg.setKey(key); StatManager.getInstance().addRateData("netDb.lookupsMatched", 1, 0);
if (data instanceof LeaseSet) { DatabaseStoreMessage msg = new DatabaseStoreMessage();
msg.setLeaseSet((LeaseSet)data); msg.setKey(key);
msg.setValueType(DatabaseStoreMessage.KEY_TYPE_LEASESET); if (data instanceof LeaseSet) {
} else if (data instanceof RouterInfo) { msg.setLeaseSet((LeaseSet)data);
msg.setRouterInfo((RouterInfo)data); msg.setValueType(DatabaseStoreMessage.KEY_TYPE_LEASESET);
msg.setValueType(DatabaseStoreMessage.KEY_TYPE_ROUTERINFO); } else if (data instanceof RouterInfo) {
} msg.setRouterInfo((RouterInfo)data);
sendMessage(msg, toPeer, replyTunnel); msg.setValueType(DatabaseStoreMessage.KEY_TYPE_ROUTERINFO);
}
sendMessage(msg, toPeer, replyTunnel);
} }
private void sendClosest(Hash key, Set routerInfoSet, Hash toPeer, TunnelId replyTunnel) { private void sendClosest(Hash key, Set routerInfoSet, Hash toPeer, TunnelId replyTunnel) {
if (_log.shouldLog(Log.DEBUG)) if (_log.shouldLog(Log.DEBUG))
_log.debug("Sending closest routers to key " + key.toBase64() + ": # peers = " + routerInfoSet.size() + " tunnel " + replyTunnel); _log.debug("Sending closest routers to key " + key.toBase64() + ": # peers = "
DatabaseSearchReplyMessage msg = new DatabaseSearchReplyMessage(); + routerInfoSet.size() + " tunnel " + replyTunnel);
msg.setFromHash(Router.getInstance().getRouterInfo().getIdentity().getHash()); DatabaseSearchReplyMessage msg = new DatabaseSearchReplyMessage();
msg.setSearchKey(key); msg.setFromHash(Router.getInstance().getRouterInfo().getIdentity().getHash());
if (routerInfoSet.size() <= 0) { msg.setSearchKey(key);
// always include something, so lets toss ourselves in there if (routerInfoSet.size() <= 0) {
routerInfoSet.add(Router.getInstance().getRouterInfo()); // always include something, so lets toss ourselves in there
} routerInfoSet.add(Router.getInstance().getRouterInfo());
msg.addReplies(routerInfoSet); }
sendMessage(msg, toPeer, replyTunnel); // should this go via garlic messages instead? msg.addReplies(routerInfoSet);
sendMessage(msg, toPeer, replyTunnel); // should this go via garlic messages instead?
} }
private void sendMessage(I2NPMessage message, Hash toPeer, TunnelId replyTunnel) { private void sendMessage(I2NPMessage message, Hash toPeer, TunnelId replyTunnel) {
Job send = null; StatManager.getInstance().addRateData("netDb.lookupsHandled", 1, 0);
if (replyTunnel != null) { Job send = null;
sendThroughTunnel(message, toPeer, replyTunnel); if (replyTunnel != null) {
} else { sendThroughTunnel(message, toPeer, replyTunnel);
if (_log.shouldLog(Log.DEBUG)) } else {
_log.debug("Sending reply directly to " + toPeer); if (_log.shouldLog(Log.DEBUG))
send = new SendMessageDirectJob(message, toPeer, REPLY_TIMEOUT+Clock.getInstance().now(), MESSAGE_PRIORITY); _log.debug("Sending reply directly to " + toPeer);
} send = new SendMessageDirectJob(message, toPeer, REPLY_TIMEOUT+Clock.getInstance().now(), MESSAGE_PRIORITY);
}
NetworkDatabaseFacade.getInstance().lookupRouterInfo(toPeer, send, null, REPLY_TIMEOUT);
NetworkDatabaseFacade.getInstance().lookupRouterInfo(toPeer, send, null, REPLY_TIMEOUT);
} }
private void sendThroughTunnel(I2NPMessage message, Hash toPeer, TunnelId replyTunnel) { private void sendThroughTunnel(I2NPMessage message, Hash toPeer, TunnelId replyTunnel) {
TunnelInfo info = TunnelManagerFacade.getInstance().getTunnelInfo(replyTunnel); TunnelInfo info = TunnelManagerFacade.getInstance().getTunnelInfo(replyTunnel);
// the sendTunnelMessageJob can't handle injecting into the tunnel anywhere but the beginning // the sendTunnelMessageJob can't handle injecting into the tunnel anywhere but the beginning
// (and if we are the beginning, we have the signing key) // (and if we are the beginning, we have the signing key)
if ( (info == null) || (info.getSigningKey() != null)) { if ( (info == null) || (info.getSigningKey() != null)) {
if (_log.shouldLog(Log.INFO)) if (_log.shouldLog(Log.INFO))
_log.info("Sending reply through " + replyTunnel + " on " + toPeer); _log.info("Sending reply through " + replyTunnel + " on " + toPeer);
JobQueue.getInstance().addJob(new SendTunnelMessageJob(message, replyTunnel, toPeer, null, null, null, null, null, REPLY_TIMEOUT, MESSAGE_PRIORITY)); JobQueue.getInstance().addJob(new SendTunnelMessageJob(message, replyTunnel, toPeer, null, null, null, null, null, REPLY_TIMEOUT, MESSAGE_PRIORITY));
} else { } else {
// its a tunnel we're participating in, but we're NOT the gateway, so // its a tunnel we're participating in, but we're NOT the gateway, so
if (_log.shouldLog(Log.INFO)) if (_log.shouldLog(Log.INFO))
_log.info("Want to reply to a db request via a tunnel, but we're a participant in the reply! so send it to the gateway"); _log.info("Want to reply to a db request via a tunnel, but we're a participant in the reply! so send it to the gateway");
if ( (toPeer == null) || (replyTunnel == null) ) { if ( (toPeer == null) || (replyTunnel == null) ) {
if (_log.shouldLog(Log.ERROR)) if (_log.shouldLog(Log.ERROR))
_log.error("Someone br0ke us. where is this message supposed to go again?", getAddedBy()); _log.error("Someone br0ke us. where is this message supposed to go again?", getAddedBy());
return; return;
} }
long expiration = REPLY_TIMEOUT + Clock.getInstance().now(); long expiration = REPLY_TIMEOUT + Clock.getInstance().now();
TunnelMessage msg = new TunnelMessage(); TunnelMessage msg = new TunnelMessage();
try { try {
ByteArrayOutputStream baos = new ByteArrayOutputStream(1024); ByteArrayOutputStream baos = new ByteArrayOutputStream(1024);
message.writeBytes(baos); message.writeBytes(baos);
msg.setData(baos.toByteArray()); msg.setData(baos.toByteArray());
msg.setTunnelId(replyTunnel); msg.setTunnelId(replyTunnel);
msg.setMessageExpiration(new Date(expiration)); msg.setMessageExpiration(new Date(expiration));
JobQueue.getInstance().addJob(new SendMessageDirectJob(msg, toPeer, null, null, null, null, expiration, MESSAGE_PRIORITY)); JobQueue.getInstance().addJob(new SendMessageDirectJob(msg, toPeer, null, null, null, null, expiration, MESSAGE_PRIORITY));
String bodyType = message.getClass().getName(); String bodyType = message.getClass().getName();
MessageHistory.getInstance().wrap(bodyType, message.getUniqueId(), TunnelMessage.class.getName(), msg.getUniqueId()); MessageHistory.getInstance().wrap(bodyType, message.getUniqueId(), TunnelMessage.class.getName(), msg.getUniqueId());
} catch (IOException ioe) { } catch (IOException ioe) {
if (_log.shouldLog(Log.ERROR)) if (_log.shouldLog(Log.ERROR))
_log.error("Error writing out the tunnel message to send to the tunnel", ioe); _log.error("Error writing out the tunnel message to send to the tunnel", ioe);
} catch (DataFormatException dfe) { } catch (DataFormatException dfe) {
if (_log.shouldLog(Log.ERROR)) if (_log.shouldLog(Log.ERROR))
_log.error("Error writing out the tunnel message to send to the tunnel", dfe); _log.error("Error writing out the tunnel message to send to the tunnel", dfe);
} }
return; return;
} }
} }
public String getName() { return "Handle Database Lookup Message"; } public String getName() { return "Handle Database Lookup Message"; }
public void dropped() { public void dropped() {
MessageHistory.getInstance().messageProcessingError(_message.getUniqueId(), _message.getClass().getName(), "Dropped due to overload"); MessageHistory.getInstance().messageProcessingError(_message.getUniqueId(), _message.getClass().getName(), "Dropped due to overload");
} }
} }

View File

@ -18,6 +18,7 @@ import net.i2p.router.MessageHistory;
import net.i2p.router.NetworkDatabaseFacade; import net.i2p.router.NetworkDatabaseFacade;
import net.i2p.router.ProfileManager; import net.i2p.router.ProfileManager;
import net.i2p.util.Log; import net.i2p.util.Log;
import net.i2p.stat.StatManager;
/** /**
* Receive DatabaseStoreMessage data and store it in the local net db * Receive DatabaseStoreMessage data and store it in the local net db
@ -28,38 +29,47 @@ public class HandleDatabaseStoreMessageJob extends JobImpl {
private DatabaseStoreMessage _message; private DatabaseStoreMessage _message;
private RouterIdentity _from; private RouterIdentity _from;
private Hash _fromHash; private Hash _fromHash;
static {
StatManager.getInstance().createRateStat("netDb.storeHandled", "How many netDb store messages have we handled?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
}
public HandleDatabaseStoreMessageJob(DatabaseStoreMessage receivedMessage, RouterIdentity from, Hash fromHash) { public HandleDatabaseStoreMessageJob(DatabaseStoreMessage receivedMessage, RouterIdentity from, Hash fromHash) {
_message = receivedMessage; _message = receivedMessage;
_from = from; _from = from;
_fromHash = fromHash; _fromHash = fromHash;
} }
public void runJob() { public void runJob() {
if (_log.shouldLog(Log.DEBUG)) if (_log.shouldLog(Log.DEBUG))
_log.debug("Handling database store message"); _log.debug("Handling database store message");
boolean wasNew = false; boolean wasNew = false;
if (_message.getValueType() == DatabaseStoreMessage.KEY_TYPE_LEASESET) if (_message.getValueType() == DatabaseStoreMessage.KEY_TYPE_LEASESET) {
wasNew = (null == NetworkDatabaseFacade.getInstance().store(_message.getKey(), _message.getLeaseSet())); Object match = NetworkDatabaseFacade.getInstance().store(_message.getKey(), _message.getLeaseSet());
else if (_message.getValueType() == DatabaseStoreMessage.KEY_TYPE_ROUTERINFO) { wasNew = (null == match);
if (_log.shouldLog(Log.INFO)) } else if (_message.getValueType() == DatabaseStoreMessage.KEY_TYPE_ROUTERINFO) {
_log.info("Handling dbStore of router " + _message.getKey() + " with publishDate of " + new Date(_message.getRouterInfo().getPublished())); if (_log.shouldLog(Log.INFO))
wasNew = (null == NetworkDatabaseFacade.getInstance().store(_message.getKey(), _message.getRouterInfo())); _log.info("Handling dbStore of router " + _message.getKey() + " with publishDate of "
ProfileManager.getInstance().heardAbout(_message.getKey()); + new Date(_message.getRouterInfo().getPublished()));
} else { Object match = NetworkDatabaseFacade.getInstance().store(_message.getKey(), _message.getRouterInfo());
if (_log.shouldLog(Log.ERROR)) wasNew = (null == match);
_log.error("Invalid DatabaseStoreMessage data type - " + _message.getValueType() + ": " + _message); ProfileManager.getInstance().heardAbout(_message.getKey());
} } else {
if (_from != null) if (_log.shouldLog(Log.ERROR))
_fromHash = _from.getHash(); _log.error("Invalid DatabaseStoreMessage data type - " + _message.getValueType()
if (_fromHash != null) + ": " + _message);
ProfileManager.getInstance().dbStoreReceived(_fromHash, wasNew); }
if (_from != null)
_fromHash = _from.getHash();
if (_fromHash != null)
ProfileManager.getInstance().dbStoreReceived(_fromHash, wasNew);
StatManager.getInstance().addRateData("netDb.storeHandled", 1, 0);
} }
public String getName() { return "Handle Database Store Message"; } public String getName() { return "Handle Database Store Message"; }
public void dropped() { public void dropped() {
MessageHistory.getInstance().messageProcessingError(_message.getUniqueId(), _message.getClass().getName(), "Dropped due to overload"); MessageHistory.getInstance().messageProcessingError(_message.getUniqueId(), _message.getClass().getName(), "Dropped due to overload");
} }
} }

View File

@ -58,10 +58,10 @@ class SearchJob extends JobImpl {
private static final long PER_PEER_TIMEOUT = 30*1000; private static final long PER_PEER_TIMEOUT = 30*1000;
static { static {
StatManager.getInstance().createRateStat("netDb.successTime", "How long a successful search takes", "Network Database", new long[] { 60*60*1000l, 24*60*60*1000l }); StatManager.getInstance().createRateStat("netDb.successTime", "How long a successful search takes", "Network Database", new long[] { 60*60*1000l, 24*60*60*1000l });
StatManager.getInstance().createRateStat("netDb.failedTime", "How long a failed search takes", "Network Database", new long[] { 60*60*1000l, 24*60*60*1000l }); StatManager.getInstance().createRateStat("netDb.failedTime", "How long a failed search takes", "Network Database", new long[] { 60*60*1000l, 24*60*60*1000l });
StatManager.getInstance().createRateStat("netDb.successPeers", "How many peers are contacted in a successful search", "Network Database", new long[] { 60*60*1000l, 24*60*60*1000l }); StatManager.getInstance().createRateStat("netDb.successPeers", "How many peers are contacted in a successful search", "Network Database", new long[] { 60*60*1000l, 24*60*60*1000l });
StatManager.getInstance().createRateStat("netDb.failedPeers", "How many peers are contacted in a failed search", "Network Database", new long[] { 60*60*1000l, 24*60*60*1000l }); StatManager.getInstance().createRateStat("netDb.failedPeers", "How many peers are contacted in a failed search", "Network Database", new long[] { 60*60*1000l, 24*60*60*1000l });
} }
/** /**
@ -69,21 +69,21 @@ class SearchJob extends JobImpl {
* *
*/ */
public SearchJob(KademliaNetworkDatabaseFacade facade, Hash key, Job onSuccess, Job onFailure, long timeoutMs, boolean keepStats, boolean isLease) { public SearchJob(KademliaNetworkDatabaseFacade facade, Hash key, Job onSuccess, Job onFailure, long timeoutMs, boolean keepStats, boolean isLease) {
if ( (key == null) || (key.getData() == null) ) throw new IllegalArgumentException("Search for null key? wtf"); if ( (key == null) || (key.getData() == null) ) throw new IllegalArgumentException("Search for null key? wtf");
_facade = facade; _facade = facade;
_state = new SearchState(key); _state = new SearchState(key);
_onSuccess = onSuccess; _onSuccess = onSuccess;
_onFailure = onFailure; _onFailure = onFailure;
_timeoutMs = timeoutMs; _timeoutMs = timeoutMs;
_keepStats = keepStats; _keepStats = keepStats;
_isLease = isLease; _isLease = isLease;
_expiration = Clock.getInstance().now() + timeoutMs; _expiration = Clock.getInstance().now() + timeoutMs;
} }
public void runJob() { public void runJob() {
if (_log.shouldLog(Log.INFO)) if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": Searching for " + _state.getTarget()); // , getAddedBy()); _log.info(getJobId() + ": Searching for " + _state.getTarget()); // , getAddedBy());
searchNext(); searchNext();
} }
protected SearchState getState() { return _state; } protected SearchState getState() { return _state; }
@ -95,26 +95,27 @@ class SearchJob extends JobImpl {
* Send the next search, or stop if its completed * Send the next search, or stop if its completed
*/ */
protected void searchNext() { protected void searchNext() {
if (_state.completed()) { if (_state.completed()) {
if (_log.shouldLog(Log.INFO)) if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": Already completed"); _log.info(getJobId() + ": Already completed");
return; return;
} }
_log.info(getJobId() + ": Searching: " + _state); if (_log.shouldLog(Log.INFO))
if (isLocal()) { _log.info(getJobId() + ": Searching: " + _state);
if (_log.shouldLog(Log.INFO)) if (isLocal()) {
_log.info(getJobId() + ": Key found locally"); if (_log.shouldLog(Log.INFO))
_state.complete(true); _log.info(getJobId() + ": Key found locally");
succeed(); _state.complete(true);
} else if (isExpired()) { succeed();
if (_log.shouldLog(Log.WARN)) } else if (isExpired()) {
_log.warn(getJobId() + ": Key search expired"); if (_log.shouldLog(Log.WARN))
_state.complete(true); _log.warn(getJobId() + ": Key search expired");
fail(); _state.complete(true);
} else { fail();
//_log.debug("Continuing search"); } else {
continueSearch(); //_log.debug("Continuing search");
} continueSearch();
}
} }
/** /**
@ -124,7 +125,7 @@ class SearchJob extends JobImpl {
private boolean isLocal() { return _facade.getDataStore().isKnown(_state.getTarget()); } private boolean isLocal() { return _facade.getDataStore().isKnown(_state.getTarget()); }
private boolean isExpired() { private boolean isExpired() {
return Clock.getInstance().now() >= _expiration; return Clock.getInstance().now() >= _expiration;
} }
/** /**
@ -134,58 +135,66 @@ class SearchJob extends JobImpl {
* *
*/ */
protected void continueSearch() { protected void continueSearch() {
if (_state.completed()) { if (_state.completed()) {
if (_log.shouldLog(Log.DEBUG)) if (_log.shouldLog(Log.DEBUG))
_log.debug(getJobId() + ": Search already completed", new Exception("already completed")); _log.debug(getJobId() + ": Search already completed", new Exception("already completed"));
return; return;
} }
int toCheck = SEARCH_BREDTH - _state.getPending().size(); int toCheck = SEARCH_BREDTH - _state.getPending().size();
if (toCheck <= 0) { if (toCheck <= 0) {
// too many already pending // too many already pending
if (_log.shouldLog(Log.WARN)) if (_log.shouldLog(Log.WARN))
_log.warn(getJobId() + ": Too many searches already pending (pending: " + _state.getPending().size() + " max: " + SEARCH_BREDTH + ")", new Exception("too many pending")); _log.warn(getJobId() + ": Too many searches already pending (pending: "
requeuePending(); + _state.getPending().size() + " max: " + SEARCH_BREDTH + ")",
return; new Exception("too many pending"));
} requeuePending();
List closestHashes = getClosestRouters(_state.getTarget(), toCheck, _state.getAttempted()); return;
if ( (closestHashes == null) || (closestHashes.size() <= 0) ) { }
if (_state.getPending().size() <= 0) { List closestHashes = getClosestRouters(_state.getTarget(), toCheck, _state.getAttempted());
// we tried to find some peers, but there weren't any and no one else is going to answer if ( (closestHashes == null) || (closestHashes.size() <= 0) ) {
if (_log.shouldLog(Log.WARN)) if (_state.getPending().size() <= 0) {
_log.warn(getJobId() + ": No peers left, and none pending! Already searched: " + _state.getAttempted().size() + " failed: " + _state.getFailed().size(), new Exception("none left")); // we tried to find some peers, but there weren't any and no one else is going to answer
fail(); if (_log.shouldLog(Log.WARN))
} else { _log.warn(getJobId() + ": No peers left, and none pending! Already searched: "
// no more to try, but we might get data or close peers from some outstanding requests + _state.getAttempted().size() + " failed: " + _state.getFailed().size(),
if (_log.shouldLog(Log.WARN)) new Exception("none left"));
_log.warn(getJobId() + ": No peers left, but some are pending! Pending: " + _state.getPending().size() + " attempted: " + _state.getAttempted().size() + " failed: " + _state.getFailed().size(), new Exception("none left, but pending")); fail();
requeuePending(); } else {
return; // no more to try, but we might get data or close peers from some outstanding requests
} if (_log.shouldLog(Log.WARN))
} else { _log.warn(getJobId() + ": No peers left, but some are pending! Pending: "
_state.addPending(closestHashes); + _state.getPending().size() + " attempted: " + _state.getAttempted().size()
for (Iterator iter = closestHashes.iterator(); iter.hasNext(); ) { + " failed: " + _state.getFailed().size(),
Hash peer = (Hash)iter.next(); new Exception("none left, but pending"));
DataStructure ds = _facade.getDataStore().get(peer); requeuePending();
if ( (ds == null) || !(ds instanceof RouterInfo) ) { return;
if (_log.shouldLog(Log.WARN)) }
_log.warn(getJobId() + ": Error selecting closest hash that wasnt a router! " + peer + " : " + ds); } else {
} else { _state.addPending(closestHashes);
sendSearch((RouterInfo)ds); for (Iterator iter = closestHashes.iterator(); iter.hasNext(); ) {
} Hash peer = (Hash)iter.next();
} DataStructure ds = _facade.getDataStore().get(peer);
} if ( (ds == null) || !(ds instanceof RouterInfo) ) {
if (_log.shouldLog(Log.WARN))
_log.warn(getJobId() + ": Error selecting closest hash that wasnt a router! "
+ peer + " : " + ds);
} else {
sendSearch((RouterInfo)ds);
}
}
}
} }
private void requeuePending() { private void requeuePending() {
if (_pendingRequeueJob == null) if (_pendingRequeueJob == null)
_pendingRequeueJob = new JobImpl() { _pendingRequeueJob = new JobImpl() {
public String getName() { return "Requeue search with pending"; } public String getName() { return "Requeue search with pending"; }
public void runJob() { searchNext(); } public void runJob() { searchNext(); }
}; };
long now = Clock.getInstance().now(); long now = Clock.getInstance().now();
if (_pendingRequeueJob.getTiming().getStartAfter() < now) if (_pendingRequeueJob.getTiming().getStartAfter() < now)
_pendingRequeueJob.getTiming().setStartAfter(now+5*1000); _pendingRequeueJob.getTiming().setStartAfter(now+5*1000);
JobQueue.getInstance().addJob(_pendingRequeueJob); JobQueue.getInstance().addJob(_pendingRequeueJob);
} }
/** /**
@ -195,10 +204,10 @@ class SearchJob extends JobImpl {
* @return ordered list of Hash objects * @return ordered list of Hash objects
*/ */
private List getClosestRouters(Hash key, int numClosest, Set alreadyChecked) { private List getClosestRouters(Hash key, int numClosest, Set alreadyChecked) {
Hash rkey = RoutingKeyGenerator.getInstance().getRoutingKey(key); Hash rkey = RoutingKeyGenerator.getInstance().getRoutingKey(key);
if (_log.shouldLog(Log.DEBUG)) if (_log.shouldLog(Log.DEBUG))
_log.debug(getJobId() + ": Current routing key for " + key + ": " + rkey); _log.debug(getJobId() + ": Current routing key for " + key + ": " + rkey);
return PeerSelector.getInstance().selectNearestExplicit(rkey, numClosest, alreadyChecked, _facade.getKBuckets()); return PeerSelector.getInstance().selectNearestExplicit(rkey, numClosest, alreadyChecked, _facade.getKBuckets());
} }
/** /**
@ -206,20 +215,20 @@ class SearchJob extends JobImpl {
* *
*/ */
protected void sendSearch(RouterInfo router) { protected void sendSearch(RouterInfo router) {
if (router.getIdentity().equals(Router.getInstance().getRouterInfo().getIdentity())) { if (router.getIdentity().equals(Router.getInstance().getRouterInfo().getIdentity())) {
// don't search ourselves // don't search ourselves
if (_log.shouldLog(Log.ERROR)) if (_log.shouldLog(Log.ERROR))
_log.error(getJobId() + ": Dont send search to ourselves - why did we try?"); _log.error(getJobId() + ": Dont send search to ourselves - why did we try?");
return; return;
} else { } else {
if (_log.shouldLog(Log.DEBUG)) if (_log.shouldLog(Log.DEBUG))
_log.debug(getJobId() + ": Send search to " + router); _log.debug(getJobId() + ": Send search to " + router);
} }
if (_isLease || false) // moo if (_isLease || false) // moo
sendLeaseSearch(router); sendLeaseSearch(router);
else else
sendRouterSearch(router); sendRouterSearch(router);
} }
@ -229,55 +238,63 @@ class SearchJob extends JobImpl {
* *
*/ */
protected void sendLeaseSearch(RouterInfo router) { protected void sendLeaseSearch(RouterInfo router) {
TunnelId inTunnelId = getInboundTunnelId(); TunnelId inTunnelId = getInboundTunnelId();
if (inTunnelId == null) { if (inTunnelId == null) {
_log.error("No tunnels to get search replies through! wtf!"); _log.error("No tunnels to get search replies through! wtf!");
JobQueue.getInstance().addJob(new FailedJob(router)); JobQueue.getInstance().addJob(new FailedJob(router));
return; return;
} }
TunnelInfo inTunnel = TunnelManagerFacade.getInstance().getTunnelInfo(inTunnelId); TunnelInfo inTunnel = TunnelManagerFacade.getInstance().getTunnelInfo(inTunnelId);
RouterInfo inGateway = NetworkDatabaseFacade.getInstance().lookupRouterInfoLocally(inTunnel.getThisHop()); RouterInfo inGateway = NetworkDatabaseFacade.getInstance().lookupRouterInfoLocally(inTunnel.getThisHop());
if (inGateway == null) { if (inGateway == null) {
_log.error("We can't find the gateway to our inbound tunnel?! wtf"); _log.error("We can't find the gateway to our inbound tunnel?! wtf");
JobQueue.getInstance().addJob(new FailedJob(router)); JobQueue.getInstance().addJob(new FailedJob(router));
return; return;
} }
long expiration = Clock.getInstance().now() + PER_PEER_TIMEOUT; // getTimeoutMs(); long expiration = Clock.getInstance().now() + PER_PEER_TIMEOUT; // getTimeoutMs();
DatabaseLookupMessage msg = buildMessage(inTunnelId, inGateway, expiration); DatabaseLookupMessage msg = buildMessage(inTunnelId, inGateway, expiration);
TunnelId outTunnelId = getOutboundTunnelId(); TunnelId outTunnelId = getOutboundTunnelId();
if (outTunnelId == null) { if (outTunnelId == null) {
_log.error("No tunnels to send search out through! wtf!"); _log.error("No tunnels to send search out through! wtf!");
JobQueue.getInstance().addJob(new FailedJob(router)); JobQueue.getInstance().addJob(new FailedJob(router));
return; return;
} }
if (_log.shouldLog(Log.DEBUG)) if (_log.shouldLog(Log.DEBUG))
_log.debug(getJobId() + ": Sending leaseSet search to " + router.getIdentity().getHash().toBase64() + " for " + msg.getSearchKey().toBase64() + " w/ replies through [" + msg.getFrom().getIdentity().getHash().toBase64() + "] via tunnel [" + msg.getReplyTunnel() + "]"); _log.debug(getJobId() + ": Sending leaseSet search to " + router.getIdentity().getHash().toBase64()
+ " for " + msg.getSearchKey().toBase64() + " w/ replies through ["
+ msg.getFrom().getIdentity().getHash().toBase64() + "] via tunnel ["
+ msg.getReplyTunnel() + "]");
SearchMessageSelector sel = new SearchMessageSelector(router, _expiration, _state); SearchMessageSelector sel = new SearchMessageSelector(router, _expiration, _state);
long timeoutMs = PER_PEER_TIMEOUT; // getTimeoutMs(); long timeoutMs = PER_PEER_TIMEOUT; // getTimeoutMs();
SearchUpdateReplyFoundJob reply = new SearchUpdateReplyFoundJob(router, _state, _facade, this); SearchUpdateReplyFoundJob reply = new SearchUpdateReplyFoundJob(router, _state, _facade, this);
SendTunnelMessageJob j = new SendTunnelMessageJob(msg, outTunnelId, router.getIdentity().getHash(), null, null, reply, new FailedJob(router), sel, timeoutMs, SEARCH_PRIORITY); SendTunnelMessageJob j = new SendTunnelMessageJob(msg, outTunnelId, router.getIdentity().getHash(),
JobQueue.getInstance().addJob(j); null, null, reply, new FailedJob(router), sel,
timeoutMs, SEARCH_PRIORITY);
JobQueue.getInstance().addJob(j);
} }
/** we're searching for a router, so we can just send direct */ /** we're searching for a router, so we can just send direct */
protected void sendRouterSearch(RouterInfo router) { protected void sendRouterSearch(RouterInfo router) {
long expiration = Clock.getInstance().now() + PER_PEER_TIMEOUT; // getTimeoutMs(); long expiration = Clock.getInstance().now() + PER_PEER_TIMEOUT; // getTimeoutMs();
DatabaseLookupMessage msg = buildMessage(expiration); DatabaseLookupMessage msg = buildMessage(expiration);
if (_log.shouldLog(Log.INFO)) if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": Sending router search to " + router.getIdentity().getHash().toBase64() + " for " + msg.getSearchKey().toBase64() + " w/ replies to us [" + msg.getFrom().getIdentity().getHash().toBase64() + "]"); _log.info(getJobId() + ": Sending router search to " + router.getIdentity().getHash().toBase64()
SearchMessageSelector sel = new SearchMessageSelector(router, _expiration, _state); + " for " + msg.getSearchKey().toBase64() + " w/ replies to us ["
long timeoutMs = PER_PEER_TIMEOUT; + msg.getFrom().getIdentity().getHash().toBase64() + "]");
SearchUpdateReplyFoundJob reply = new SearchUpdateReplyFoundJob(router, _state, _facade, this); SearchMessageSelector sel = new SearchMessageSelector(router, _expiration, _state);
SendMessageDirectJob j = new SendMessageDirectJob(msg, router.getIdentity().getHash(), reply, new FailedJob(router), sel, expiration, SEARCH_PRIORITY); long timeoutMs = PER_PEER_TIMEOUT;
JobQueue.getInstance().addJob(j); SearchUpdateReplyFoundJob reply = new SearchUpdateReplyFoundJob(router, _state, _facade, this);
SendMessageDirectJob j = new SendMessageDirectJob(msg, router.getIdentity().getHash(),
reply, new FailedJob(router), sel, expiration, SEARCH_PRIORITY);
JobQueue.getInstance().addJob(j);
} }
/** /**
@ -286,15 +303,15 @@ class SearchJob extends JobImpl {
* @return tunnel id (or null if none are found) * @return tunnel id (or null if none are found)
*/ */
private TunnelId getOutboundTunnelId() { private TunnelId getOutboundTunnelId() {
TunnelSelectionCriteria crit = new TunnelSelectionCriteria(); TunnelSelectionCriteria crit = new TunnelSelectionCriteria();
crit.setMaximumTunnelsRequired(1); crit.setMaximumTunnelsRequired(1);
crit.setMinimumTunnelsRequired(1); crit.setMinimumTunnelsRequired(1);
List tunnelIds = TunnelManagerFacade.getInstance().selectOutboundTunnelIds(crit); List tunnelIds = TunnelManagerFacade.getInstance().selectOutboundTunnelIds(crit);
if (tunnelIds.size() <= 0) { if (tunnelIds.size() <= 0) {
return null; return null;
} }
return (TunnelId)tunnelIds.get(0); return (TunnelId)tunnelIds.get(0);
} }
/** /**
@ -303,14 +320,14 @@ class SearchJob extends JobImpl {
* @return tunnel id (or null if none are found) * @return tunnel id (or null if none are found)
*/ */
private TunnelId getInboundTunnelId() { private TunnelId getInboundTunnelId() {
TunnelSelectionCriteria crit = new TunnelSelectionCriteria(); TunnelSelectionCriteria crit = new TunnelSelectionCriteria();
crit.setMaximumTunnelsRequired(1); crit.setMaximumTunnelsRequired(1);
crit.setMinimumTunnelsRequired(1); crit.setMinimumTunnelsRequired(1);
List tunnelIds = TunnelManagerFacade.getInstance().selectInboundTunnelIds(crit); List tunnelIds = TunnelManagerFacade.getInstance().selectInboundTunnelIds(crit);
if (tunnelIds.size() <= 0) { if (tunnelIds.size() <= 0) {
return null; return null;
} }
return (TunnelId)tunnelIds.get(0); return (TunnelId)tunnelIds.get(0);
} }
/** /**
@ -321,13 +338,13 @@ class SearchJob extends JobImpl {
* @param expiration when the search should stop * @param expiration when the search should stop
*/ */
protected DatabaseLookupMessage buildMessage(TunnelId replyTunnelId, RouterInfo replyGateway, long expiration) { protected DatabaseLookupMessage buildMessage(TunnelId replyTunnelId, RouterInfo replyGateway, long expiration) {
DatabaseLookupMessage msg = new DatabaseLookupMessage(); DatabaseLookupMessage msg = new DatabaseLookupMessage();
msg.setSearchKey(_state.getTarget()); msg.setSearchKey(_state.getTarget());
msg.setFrom(replyGateway); msg.setFrom(replyGateway);
msg.setDontIncludePeers(_state.getAttempted()); msg.setDontIncludePeers(_state.getAttempted());
msg.setMessageExpiration(new Date(expiration)); msg.setMessageExpiration(new Date(expiration));
msg.setReplyTunnel(replyTunnelId); msg.setReplyTunnel(replyTunnelId);
return msg; return msg;
} }
/** /**
@ -336,65 +353,69 @@ class SearchJob extends JobImpl {
* *
*/ */
protected DatabaseLookupMessage buildMessage(long expiration) { protected DatabaseLookupMessage buildMessage(long expiration) {
DatabaseLookupMessage msg = new DatabaseLookupMessage(); DatabaseLookupMessage msg = new DatabaseLookupMessage();
msg.setSearchKey(_state.getTarget()); msg.setSearchKey(_state.getTarget());
msg.setFrom(Router.getInstance().getRouterInfo()); msg.setFrom(Router.getInstance().getRouterInfo());
msg.setDontIncludePeers(_state.getAttempted()); msg.setDontIncludePeers(_state.getAttempted());
msg.setMessageExpiration(new Date(expiration)); msg.setMessageExpiration(new Date(expiration));
msg.setReplyTunnel(null); msg.setReplyTunnel(null);
return msg; return msg;
} }
void replyFound(DatabaseSearchReplyMessage message, Hash peer) { void replyFound(DatabaseSearchReplyMessage message, Hash peer) {
long duration = _state.replyFound(peer); long duration = _state.replyFound(peer);
// this processing can take a while, so split 'er up // this processing can take a while, so split 'er up
JobQueue.getInstance().addJob(new SearchReplyJob((DatabaseSearchReplyMessage)message, peer, duration)); JobQueue.getInstance().addJob(new SearchReplyJob((DatabaseSearchReplyMessage)message, peer, duration));
} }
private final class SearchReplyJob extends JobImpl { private final class SearchReplyJob extends JobImpl {
private DatabaseSearchReplyMessage _msg; private DatabaseSearchReplyMessage _msg;
private Hash _peer; private Hash _peer;
private int _curIndex; private int _curIndex;
private int _invalidPeers; private int _invalidPeers;
private int _seenPeers; private int _seenPeers;
private int _newPeers; private int _newPeers;
private int _duplicatePeers; private int _duplicatePeers;
private long _duration; private long _duration;
public SearchReplyJob(DatabaseSearchReplyMessage message, Hash peer, long duration) { public SearchReplyJob(DatabaseSearchReplyMessage message, Hash peer, long duration) {
_msg = message; _msg = message;
_peer = peer; _peer = peer;
_curIndex = 0; _curIndex = 0;
_invalidPeers = 0; _invalidPeers = 0;
_seenPeers = 0; _seenPeers = 0;
_newPeers = 0; _newPeers = 0;
_duplicatePeers = 0; _duplicatePeers = 0;
} }
public String getName() { return "Process Reply for Kademlia Search"; } public String getName() { return "Process Reply for Kademlia Search"; }
public void runJob() { public void runJob() {
if (_curIndex >= _msg.getNumReplies()) { if (_curIndex >= _msg.getNumReplies()) {
ProfileManager.getInstance().dbLookupReply(_peer, _newPeers, _seenPeers, _invalidPeers, _duplicatePeers, _duration); ProfileManager.getInstance().dbLookupReply(_peer, _newPeers, _seenPeers,
} else { _invalidPeers, _duplicatePeers, _duration);
RouterInfo ri = _msg.getReply(_curIndex); } else {
if (ri.isValid()) { RouterInfo ri = _msg.getReply(_curIndex);
if (_state.wasAttempted(ri.getIdentity().getHash())) { if (ri.isValid()) {
_duplicatePeers++; if (_state.wasAttempted(ri.getIdentity().getHash())) {
} _duplicatePeers++;
if (_log.shouldLog(Log.INFO)) }
_log.info(getJobId() + ": dbSearchReply received on search containing router " + ri.getIdentity().getHash() + " with publishDate of " + new Date(ri.getPublished())); if (_log.shouldLog(Log.INFO))
_facade.store(ri.getIdentity().getHash(), ri); _log.info(getJobId() + ": dbSearchReply received on search containing router "
if (_facade.getKBuckets().add(ri.getIdentity().getHash())) + ri.getIdentity().getHash() + " with publishDate of "
_newPeers++; + new Date(ri.getPublished()));
else _facade.store(ri.getIdentity().getHash(), ri);
_seenPeers++; if (_facade.getKBuckets().add(ri.getIdentity().getHash()))
} else { _newPeers++;
if (_log.shouldLog(Log.ERROR)) else
_log.error(getJobId() + ": Received an invalid peer from " + _peer + ": " + ri, new Exception("Invalid peer")); _seenPeers++;
_invalidPeers++; } else {
} if (_log.shouldLog(Log.ERROR))
_curIndex++; _log.error(getJobId() + ": Received an invalid peer from " + _peer + ": "
requeue(0); + ri, new Exception("Invalid peer"));
} _invalidPeers++;
} }
_curIndex++;
requeue(0);
}
}
} }
/** /**
@ -403,66 +424,70 @@ class SearchJob extends JobImpl {
* *
*/ */
protected class FailedJob extends JobImpl { protected class FailedJob extends JobImpl {
private Hash _peer; private Hash _peer;
private boolean _penalizePeer; private boolean _penalizePeer;
public FailedJob(RouterInfo peer) { public FailedJob(RouterInfo peer) {
this(peer, true); this(peer, true);
} }
/** /**
* Allow the choice as to whether failed searches should count against * Allow the choice as to whether failed searches should count against
* the peer (such as if we search for a random key) * the peer (such as if we search for a random key)
* *
*/ */
public FailedJob(RouterInfo peer, boolean penalizePeer) { public FailedJob(RouterInfo peer, boolean penalizePeer) {
super(); super();
_penalizePeer = penalizePeer; _penalizePeer = penalizePeer;
_peer = peer.getIdentity().getHash(); _peer = peer.getIdentity().getHash();
} }
public void runJob() { public void runJob() {
_state.replyTimeout(_peer); _state.replyTimeout(_peer);
if (_penalizePeer) { if (_penalizePeer) {
_log.warn("Penalizing peer for timeout on search: " + _peer.toBase64()); if (_log.shouldLog(Log.WARN))
ProfileManager.getInstance().dbLookupFailed(_peer); _log.warn("Penalizing peer for timeout on search: " + _peer.toBase64());
} else { ProfileManager.getInstance().dbLookupFailed(_peer);
_log.error("NOT (!!) Penalizing peer for timeout on search: " + _peer.toBase64()); } else {
} if (_log.shouldLog(Log.ERROR))
searchNext(); _log.error("NOT (!!) Penalizing peer for timeout on search: " + _peer.toBase64());
} }
public String getName() { return "Kademlia Search Failed"; } searchNext();
}
public String getName() { return "Kademlia Search Failed"; }
} }
/** /**
* Search was totally successful * Search was totally successful
*/ */
protected void succeed() { protected void succeed() {
if (_log.shouldLog(Log.INFO)) if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": Succeeded search for key " + _state.getTarget()); _log.info(getJobId() + ": Succeeded search for key " + _state.getTarget());
if (_log.shouldLog(Log.DEBUG)) if (_log.shouldLog(Log.DEBUG))
_log.debug(getJobId() + ": State of successful search: " + _state); _log.debug(getJobId() + ": State of successful search: " + _state);
if (_keepStats) { if (_keepStats) {
long time = Clock.getInstance().now() - _state.getWhenStarted(); long time = Clock.getInstance().now() - _state.getWhenStarted();
StatManager.getInstance().addRateData("netDb.successTime", time, 0); StatManager.getInstance().addRateData("netDb.successTime", time, 0);
StatManager.getInstance().addRateData("netDb.successPeers", _state.getAttempted().size(), time); StatManager.getInstance().addRateData("netDb.successPeers", _state.getAttempted().size(), time);
} }
if (_onSuccess != null) if (_onSuccess != null)
JobQueue.getInstance().addJob(_onSuccess); JobQueue.getInstance().addJob(_onSuccess);
} }
/** /**
* Search totally failed * Search totally failed
*/ */
protected void fail() { protected void fail() {
_log.info(getJobId() + ": Failed search for key " + _state.getTarget()); if (_log.shouldLog(Log.INFO))
_log.debug(getJobId() + ": State of failed search: " + _state); _log.info(getJobId() + ": Failed search for key " + _state.getTarget());
if (_log.shouldLog(Log.DEBUG))
if (_keepStats) { _log.debug(getJobId() + ": State of failed search: " + _state);
long time = Clock.getInstance().now() - _state.getWhenStarted();
StatManager.getInstance().addRateData("netDb.failedTime", time, 0); if (_keepStats) {
StatManager.getInstance().addRateData("netDb.failedPeers", _state.getAttempted().size(), time); long time = Clock.getInstance().now() - _state.getWhenStarted();
} StatManager.getInstance().addRateData("netDb.failedTime", time, 0);
if (_onFailure != null) StatManager.getInstance().addRateData("netDb.failedPeers", _state.getAttempted().size(), time);
JobQueue.getInstance().addJob(_onFailure); }
if (_onFailure != null)
JobQueue.getInstance().addJob(_onFailure);
} }
public String getName() { return "Kademlia NetDb Search"; } public String getName() { return "Kademlia NetDb Search"; }

View File

@ -48,6 +48,7 @@ import net.i2p.router.message.SendTunnelMessageJob;
import net.i2p.util.Clock; import net.i2p.util.Clock;
import net.i2p.util.Log; import net.i2p.util.Log;
import net.i2p.util.RandomSource; import net.i2p.util.RandomSource;
import net.i2p.stat.StatManager;
class StoreJob extends JobImpl { class StoreJob extends JobImpl {
private final Log _log = new Log(StoreJob.class); private final Log _log = new Log(StoreJob.class);
@ -71,44 +72,50 @@ class StoreJob extends JobImpl {
*/ */
private final static int EXPLORATORY_REDUNDANCY = 1; private final static int EXPLORATORY_REDUNDANCY = 1;
private final static int STORE_PRIORITY = 100; private final static int STORE_PRIORITY = 100;
static {
StatManager.getInstance().createRateStat("netDb.storeSent", "How many netDb store messages have we sent?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
}
/** /**
* Create a new search for the routingKey specified * Create a new search for the routingKey specified
* *
*/ */
public StoreJob(KademliaNetworkDatabaseFacade facade, Hash key, DataStructure data, Job onSuccess, Job onFailure, long timeoutMs) { public StoreJob(KademliaNetworkDatabaseFacade facade, Hash key, DataStructure data, Job onSuccess, Job onFailure, long timeoutMs) {
_facade = facade; _facade = facade;
_state = new StoreState(key, data); _state = new StoreState(key, data);
_onSuccess = onSuccess; _onSuccess = onSuccess;
_onFailure = onFailure; _onFailure = onFailure;
_timeoutMs = timeoutMs; _timeoutMs = timeoutMs;
_expiration = Clock.getInstance().now() + timeoutMs; _expiration = Clock.getInstance().now() + timeoutMs;
} }
public String getName() { return "Kademlia NetDb Store";} public String getName() { return "Kademlia NetDb Store";}
public void runJob() { public void runJob() {
sendNext(); sendNext();
} }
protected boolean isExpired() { protected boolean isExpired() {
return Clock.getInstance().now() >= _expiration; return Clock.getInstance().now() >= _expiration;
} }
/** /**
* send the key to the next batch of peers * send the key to the next batch of peers
*/ */
protected void sendNext() { protected void sendNext() {
if (_state.completed()) { if (_state.completed()) {
_log.info("Already completed"); if (_log.shouldLog(Log.INFO))
return; _log.info("Already completed");
} return;
if (isExpired()) { }
_state.complete(true); if (isExpired()) {
fail(); _state.complete(true);
} else { fail();
_log.info("Sending: " + _state); } else {
continueSending(); if (_log.shouldLog(Log.INFO))
} _log.info("Sending: " + _state);
continueSending();
}
} }
/** /**
@ -118,37 +125,39 @@ class StoreJob extends JobImpl {
* *
*/ */
protected void continueSending() { protected void continueSending() {
if (_state.completed()) return; if (_state.completed()) return;
int toCheck = PARALLELIZATION - _state.getPending().size(); int toCheck = PARALLELIZATION - _state.getPending().size();
if (toCheck <= 0) { if (toCheck <= 0) {
// too many already pending // too many already pending
return; return;
} }
if (toCheck > PARALLELIZATION) if (toCheck > PARALLELIZATION)
toCheck = PARALLELIZATION; toCheck = PARALLELIZATION;
List closestHashes = getClosestRouters(_state.getTarget(), toCheck, _state.getAttempted()); List closestHashes = getClosestRouters(_state.getTarget(), toCheck, _state.getAttempted());
if ( (closestHashes == null) || (closestHashes.size() <= 0) ) { if ( (closestHashes == null) || (closestHashes.size() <= 0) ) {
if (_state.getPending().size() <= 0) { if (_state.getPending().size() <= 0) {
// we tried to find some peers, but there weren't any and no one else is going to answer // we tried to find some peers, but there weren't any and no one else is going to answer
fail(); fail();
} else { } else {
// no more to try, but we might get data or close peers from some outstanding requests // no more to try, but we might get data or close peers from some outstanding requests
return; return;
} }
} else { } else {
_state.addPending(closestHashes); _state.addPending(closestHashes);
_log.info("Continue sending key " + _state.getTarget() + " to " + closestHashes); if (_log.shouldLog(Log.INFO))
for (Iterator iter = closestHashes.iterator(); iter.hasNext(); ) { _log.info("Continue sending key " + _state.getTarget() + " to " + closestHashes);
Hash peer = (Hash)iter.next(); for (Iterator iter = closestHashes.iterator(); iter.hasNext(); ) {
DataStructure ds = _facade.getDataStore().get(peer); Hash peer = (Hash)iter.next();
if ( (ds == null) || !(ds instanceof RouterInfo) ) { DataStructure ds = _facade.getDataStore().get(peer);
_log.warn("Error selecting closest hash that wasnt a router! " + peer + " : " + ds); if ( (ds == null) || !(ds instanceof RouterInfo) ) {
} else { if (_log.shouldLog(Log.WARN))
sendStore((RouterInfo)ds); _log.warn("Error selecting closest hash that wasnt a router! " + peer + " : " + ds);
} } else {
} sendStore((RouterInfo)ds);
} }
}
}
} }
/** /**
@ -160,9 +169,10 @@ class StoreJob extends JobImpl {
* @return ordered list of Hash objects * @return ordered list of Hash objects
*/ */
protected List getClosestRouters(Hash key, int numClosest, Set alreadyChecked) { protected List getClosestRouters(Hash key, int numClosest, Set alreadyChecked) {
Hash rkey = RoutingKeyGenerator.getInstance().getRoutingKey(key); Hash rkey = RoutingKeyGenerator.getInstance().getRoutingKey(key);
_log.debug("Current routing key for " + key + ": " + rkey); if (_log.shouldLog(Log.DEBUG))
return PeerSelector.getInstance().selectNearestExplicit(rkey, numClosest, alreadyChecked, _facade.getKBuckets()); _log.debug("Current routing key for " + key + ": " + rkey);
return PeerSelector.getInstance().selectNearestExplicit(rkey, numClosest, alreadyChecked, _facade.getKBuckets());
} }
/** /**
@ -171,250 +181,78 @@ class StoreJob extends JobImpl {
* *
*/ */
protected void sendStore(RouterInfo router) { protected void sendStore(RouterInfo router) {
DatabaseStoreMessage msg = new DatabaseStoreMessage(); DatabaseStoreMessage msg = new DatabaseStoreMessage();
msg.setKey(_state.getTarget()); msg.setKey(_state.getTarget());
if (_state.getData() instanceof RouterInfo) if (_state.getData() instanceof RouterInfo)
msg.setRouterInfo((RouterInfo)_state.getData()); msg.setRouterInfo((RouterInfo)_state.getData());
else if (_state.getData() instanceof LeaseSet) else if (_state.getData() instanceof LeaseSet)
msg.setLeaseSet((LeaseSet)_state.getData()); msg.setLeaseSet((LeaseSet)_state.getData());
else else
throw new IllegalArgumentException("Storing an unknown data type! " + _state.getData()); throw new IllegalArgumentException("Storing an unknown data type! " + _state.getData());
msg.setMessageExpiration(new Date(Clock.getInstance().now() + _timeoutMs)); msg.setMessageExpiration(new Date(Clock.getInstance().now() + _timeoutMs));
if (router.getIdentity().equals(Router.getInstance().getRouterInfo().getIdentity())) { if (router.getIdentity().equals(Router.getInstance().getRouterInfo().getIdentity())) {
// don't send it to ourselves // don't send it to ourselves
_log.error("Dont send store to ourselves - why did we try?"); if (_log.shouldLog(Log.ERROR))
return; _log.error("Dont send store to ourselves - why did we try?");
} else { return;
_log.debug("Send store to " + router.getIdentity().getHash().toBase64()); } else {
} if (_log.shouldLog(Log.DEBUG))
_log.debug("Send store to " + router.getIdentity().getHash().toBase64());
}
sendStore(msg, router, _expiration); sendStore(msg, router, _expiration);
} }
protected void sendStore(DatabaseStoreMessage msg, RouterInfo peer, long expiration) { protected void sendStore(DatabaseStoreMessage msg, RouterInfo peer, long expiration) {
//sendStoreAsGarlic(msg, peer, expiration); sendStoreThroughTunnel(msg, peer, expiration);
sendStoreThroughTunnel(msg, peer, expiration);
} }
protected void sendStoreThroughTunnel(DatabaseStoreMessage msg, RouterInfo peer, long expiration) { protected void sendStoreThroughTunnel(DatabaseStoreMessage msg, RouterInfo peer, long expiration) {
FailedJob fail = new FailedJob(peer); FailedJob fail = new FailedJob(peer);
Job sent = new OptimisticSendSuccess(peer); Job sent = new OptimisticSendSuccess(peer);
TunnelInfo info = null; TunnelInfo info = null;
TunnelId outboundTunnelId = selectOutboundTunnel(); TunnelId outboundTunnelId = selectOutboundTunnel();
if (outboundTunnelId != null) if (outboundTunnelId != null)
info = TunnelManagerFacade.getInstance().getTunnelInfo(outboundTunnelId); info = TunnelManagerFacade.getInstance().getTunnelInfo(outboundTunnelId);
if (info == null) { if (info == null) {
_log.error("selectOutboundTunnel didn't find a valid tunnel! outboundTunnelId = " + outboundTunnelId + " is not known by the tunnel manager"); if (_log.shouldLog(Log.ERROR))
return; _log.error("selectOutboundTunnel didn't find a valid tunnel! outboundTunnelId = "
} + outboundTunnelId + " is not known by the tunnel manager");
_log.info("Store for " + _state.getTarget() + " expiring on " + new Date(_expiration) + " is going to " + peer.getIdentity().getHash() + " via outbound tunnel: " + info); return;
// send it out our outboundTunnelId with instructions for our endpoint to forward it }
// to the router specified (though no particular tunnelId on the target) if (_log.shouldLog(Log.INFO))
JobQueue.getInstance().addJob(new SendTunnelMessageJob(msg, outboundTunnelId, peer.getIdentity().getHash(), null, sent, null, fail, null, _expiration-Clock.getInstance().now(), STORE_PRIORITY)); _log.info("Store for " + _state.getTarget() + " expiring on " + new Date(_expiration)
+ " is going to " + peer.getIdentity().getHash() + " via outbound tunnel: " + info);
// send it out our outboundTunnelId with instructions for our endpoint to forward it
// to the router specified (though no particular tunnelId on the target)
Job j = new SendTunnelMessageJob(msg, outboundTunnelId, peer.getIdentity().getHash(),
null, sent, null, fail, null, _expiration-Clock.getInstance().now(),
STORE_PRIORITY);
JobQueue.getInstance().addJob(j);
StatManager.getInstance().addRateData("netDb.storeSent", 1, 0);
} }
private TunnelId selectOutboundTunnel() { private TunnelId selectOutboundTunnel() {
TunnelSelectionCriteria criteria = new TunnelSelectionCriteria(); TunnelSelectionCriteria criteria = new TunnelSelectionCriteria();
criteria.setAnonymityPriority(80); criteria.setAnonymityPriority(80);
criteria.setLatencyPriority(50); criteria.setLatencyPriority(50);
criteria.setReliabilityPriority(20); criteria.setReliabilityPriority(20);
criteria.setMaximumTunnelsRequired(1); criteria.setMaximumTunnelsRequired(1);
criteria.setMinimumTunnelsRequired(1); criteria.setMinimumTunnelsRequired(1);
List tunnelIds = TunnelManagerFacade.getInstance().selectOutboundTunnelIds(criteria); List tunnelIds = TunnelManagerFacade.getInstance().selectOutboundTunnelIds(criteria);
if (tunnelIds.size() <= 0) { if (tunnelIds.size() <= 0) {
_log.error("No outbound tunnels?!"); _log.error("No outbound tunnels?!");
return null; return null;
} else { } else {
return (TunnelId)tunnelIds.get(0); return (TunnelId)tunnelIds.get(0);
} }
} }
/**
* Send the store to the peer by way of a garlic and route an ack back to us
*
*/
protected void sendStoreAsGarlic(DatabaseStoreMessage msg, RouterInfo peer, long expiration) {
long waitingForId = RandomSource.getInstance().nextInt(Integer.MAX_VALUE);
GarlicConfig cfg = buildGarlicConfig(msg, peer, waitingForId, expiration);
FailedJob failedJob = new FailedJob(peer);
long timeoutMs = expiration - Clock.getInstance().now();
StoreMessageSelector selector = new StoreMessageSelector(peer, waitingForId);
SessionKey sentKey = new SessionKey();
Set sentTags = new HashSet(32);
PublicKey rcptKey = cfg.getRecipientPublicKey();
if (rcptKey == null) {
if (cfg.getRecipient() == null) {
throw new IllegalArgumentException("Null recipient specified");
} else if (cfg.getRecipient().getIdentity() == null) {
throw new IllegalArgumentException("Null recipient.identity specified");
} else if (cfg.getRecipient().getIdentity().getPublicKey() == null) {
throw new IllegalArgumentException("Null recipient.identity.publicKey specified");
} else
rcptKey = cfg.getRecipient().getIdentity().getPublicKey();
}
JobQueue.getInstance().addJob(new SendGarlicJob(cfg, null, failedJob, new UpdateReplyFoundJob(peer, sentKey, sentTags, rcptKey), failedJob, timeoutMs, STORE_PRIORITY, selector, sentKey, sentTags));
}
/**
* Build a garlic containing the data store and an ack to be unwrapped at the
* target, with the data store sent locally and the ack sent back to us through
* a random tunnel as a DeliveryStatusMessage containing the ackId
*
*/
protected GarlicConfig buildGarlicConfig(I2NPMessage msg, RouterInfo target, long ackId, long expiration) {
GarlicConfig config = new GarlicConfig();
PayloadGarlicConfig dataClove = buildDataClove(msg, target, expiration);
config.addClove(dataClove);
PayloadGarlicConfig ackClove = buildAckClove(ackId, expiration);
config.addClove(ackClove);
DeliveryInstructions instructions = new DeliveryInstructions();
instructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_ROUTER);
instructions.setDelayRequested(false);
instructions.setDelaySeconds(0);
instructions.setEncrypted(false);
instructions.setEncryptionKey(null);
instructions.setRouter(target.getIdentity().getHash());
instructions.setTunnelId(null);
config.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null));
config.setDeliveryInstructions(instructions);
config.setId(RandomSource.getInstance().nextInt(Integer.MAX_VALUE));
config.setExpiration(_expiration);
config.setRecipientPublicKey(target.getIdentity().getPublicKey());
config.setRecipient(target);
config.setRequestAck(false);
return config;
}
/**
* Build a clove that sends a DeliveryStatusMessage to us after tunneling it
* through a random inbound tunnel
*
*/
protected PayloadGarlicConfig buildAckClove(long ackId, long expiration) {
DeliveryStatusMessage ackMsg = new DeliveryStatusMessage();
ackMsg.setArrival(new Date(Clock.getInstance().now()));
ackMsg.setMessageId(ackId);
ackMsg.setMessageExpiration(new Date(expiration));
ackMsg.setUniqueId(RandomSource.getInstance().nextInt(Integer.MAX_VALUE));
PayloadGarlicConfig ackClove = new PayloadGarlicConfig();
TunnelSelectionCriteria criteria = new TunnelSelectionCriteria();
criteria.setAnonymityPriority(80);
criteria.setLatencyPriority(20);
criteria.setReliabilityPriority(50);
criteria.setMaximumTunnelsRequired(1);
criteria.setMinimumTunnelsRequired(1);
List tunnelIds = TunnelManagerFacade.getInstance().selectInboundTunnelIds(criteria);
if (tunnelIds.size() <= 0) {
_log.error("No inbound tunnels exist for a db store ack to come through!");
return null;
}
TunnelId replyToTunnelId = (TunnelId)tunnelIds.get(0); // tunnel id on that gateway
TunnelInfo info = TunnelManagerFacade.getInstance().getTunnelInfo(replyToTunnelId);
RouterInfo replyPeer = NetworkDatabaseFacade.getInstance().lookupRouterInfoLocally(info.getThisHop()); // inbound tunnel gateway
if (replyPeer == null) {
_log.error("We don't know how to reach the gateway of our own inbound tunnel?! " + info);
return null;
}
Hash replyToTunnelRouter = replyPeer.getIdentity().getHash();
DeliveryInstructions ackInstructions = new DeliveryInstructions();
ackInstructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_TUNNEL);
ackInstructions.setRouter(replyToTunnelRouter);
ackInstructions.setTunnelId(replyToTunnelId);
ackInstructions.setDelayRequested(false);
ackInstructions.setDelaySeconds(0);
ackInstructions.setEncrypted(false);
ackClove.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null));
ackClove.setDeliveryInstructions(ackInstructions);
ackClove.setExpiration(_expiration);
ackClove.setId(RandomSource.getInstance().nextInt(Integer.MAX_VALUE));
ackClove.setPayload(ackMsg);
ackClove.setRecipient(replyPeer);
ackClove.setRequestAck(false);
return ackClove;
}
/**
* Build a clove that sends the data to the target (which is local)
*/
protected PayloadGarlicConfig buildDataClove(I2NPMessage data, RouterInfo target, long expiration) {
PayloadGarlicConfig clove = new PayloadGarlicConfig();
DeliveryInstructions instructions = new DeliveryInstructions();
instructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_LOCAL);
instructions.setRouter(target.getIdentity().getHash());
instructions.setTunnelId(null);
instructions.setDelayRequested(false);
instructions.setDelaySeconds(0);
instructions.setEncrypted(false);
clove.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null));
clove.setDeliveryInstructions(instructions);
clove.setExpiration(expiration);
clove.setId(RandomSource.getInstance().nextInt(Integer.MAX_VALUE));
clove.setPayload(data);
clove.setRecipientPublicKey(null);
clove.setRequestAck(false);
return clove;
}
/** /**
* Called after a match to a db store is found (match against a deliveryStatusMessage) * Called after a match to a db store is found (match against a deliveryStatusMessage)
* *
*/ */
protected class UpdateReplyFoundJob extends JobImpl implements ReplyJob {
private I2NPMessage _message;
private Hash _peer;
private SessionKey _sentKey;
private Set _sentTags;
private PublicKey _toKey;
public UpdateReplyFoundJob(RouterInfo peer, SessionKey sentKey, Set sentTags, PublicKey toKey) {
super();
_peer = peer.getIdentity().getHash();
_sentKey = sentKey;
_sentTags = sentTags;
_toKey = toKey;
}
public String getName() { return "Update Reply Found for Kademlia Store"; }
public void runJob() {
_log.info("Reply from " + _peer + " with message " + _message);
if (_message.getType() == DeliveryStatusMessage.MESSAGE_TYPE) {
long delay = _state.confirmed(_peer);
ProfileManager.getInstance().dbStoreSent(_peer, delay);
if ( (_sentKey != null) && (_sentKey.getData() != null) && (_sentTags != null) && (_sentTags.size() > 0) && (_toKey != null) ) {
SessionKeyManager.getInstance().tagsDelivered(_toKey, _sentKey, _sentTags);
_log.info("Delivered tags successfully to " + _peer + "! # tags: " + _sentTags.size());
}
if (_state.getSuccessful().size() >= REDUNDANCY) {
succeed();
} else {
sendNext();
}
} else {
_log.error("Selector matched to an UpdateReplyFoundJob with a message that isnt a DeliveryStatusMessage! " + _message);
}
}
public void setMessage(I2NPMessage message) { _message = message; }
}
/** /**
* Called after sending a dbStore to a peer successfully without waiting for confirm and * Called after sending a dbStore to a peer successfully without waiting for confirm and
@ -422,25 +260,27 @@ class StoreJob extends JobImpl {
* *
*/ */
protected class OptimisticSendSuccess extends JobImpl { protected class OptimisticSendSuccess extends JobImpl {
private Hash _peer; private Hash _peer;
public OptimisticSendSuccess(RouterInfo peer) { public OptimisticSendSuccess(RouterInfo peer) {
super(); super();
_peer = peer.getIdentity().getHash(); _peer = peer.getIdentity().getHash();
} }
public String getName() { return "Optimistic Kademlia Store Send Success"; } public String getName() { return "Optimistic Kademlia Store Send Success"; }
public void runJob() { public void runJob() {
_log.info("Optimistically marking store of " + _state.getTarget() + " to " + _peer + " successful"); if (_log.shouldLog(Log.INFO))
//long howLong = _state.confirmed(_peer); _log.info("Optimistically marking store of " + _state.getTarget()
//ProfileManager.getInstance().dbStoreSent(_peer, howLong); + " to " + _peer + " successful");
//long howLong = _state.confirmed(_peer);
//ProfileManager.getInstance().dbStoreSent(_peer, howLong);
if (_state.getSuccessful().size() >= REDUNDANCY) { if (_state.getSuccessful().size() >= REDUNDANCY) {
succeed(); succeed();
} else { } else {
sendNext(); sendNext();
} }
} }
} }
/** /**
@ -449,17 +289,17 @@ class StoreJob extends JobImpl {
* *
*/ */
protected class FailedJob extends JobImpl { protected class FailedJob extends JobImpl {
private Hash _peer; private Hash _peer;
public FailedJob(RouterInfo peer) { public FailedJob(RouterInfo peer) {
super(); super();
_peer = peer.getIdentity().getHash(); _peer = peer.getIdentity().getHash();
} }
public void runJob() { public void runJob() {
_state.replyTimeout(_peer); _state.replyTimeout(_peer);
ProfileManager.getInstance().dbStoreFailed(_peer); ProfileManager.getInstance().dbStoreFailed(_peer);
sendNext(); sendNext();
} }
public String getName() { return "Kademlia Store Failed"; } public String getName() { return "Kademlia Store Failed"; }
} }
/** /**
@ -468,211 +308,219 @@ class StoreJob extends JobImpl {
* *
*/ */
protected class StoreMessageSelector implements MessageSelector { protected class StoreMessageSelector implements MessageSelector {
private Hash _peer; private Hash _peer;
private long _waitingForId; private long _waitingForId;
private boolean _found; private boolean _found;
public StoreMessageSelector(RouterInfo peer, long waitingForId) { public StoreMessageSelector(RouterInfo peer, long waitingForId) {
_peer = peer.getIdentity().getHash(); _peer = peer.getIdentity().getHash();
_found = false; _found = false;
_waitingForId = waitingForId; _waitingForId = waitingForId;
} }
public boolean continueMatching() { return !_found; } public boolean continueMatching() { return !_found; }
public long getExpiration() { return _expiration; } public long getExpiration() { return _expiration; }
public boolean isMatch(I2NPMessage message) { public boolean isMatch(I2NPMessage message) {
_log.debug("isMatch("+message.getClass().getName() + ") [want deliveryStatusMessage from " + _peer + " wrt " + _state.getTarget() + "]"); if (_log.shouldLog(Log.DEBUG))
if (message instanceof DeliveryStatusMessage) { _log.debug("isMatch("+message.getClass().getName() + ") [want deliveryStatusMessage from "
DeliveryStatusMessage msg = (DeliveryStatusMessage)message; + _peer + " wrt " + _state.getTarget() + "]");
if (msg.getMessageId() == _waitingForId) { if (message instanceof DeliveryStatusMessage) {
_log.debug("Found match for the key we're waiting for: " + _waitingForId); DeliveryStatusMessage msg = (DeliveryStatusMessage)message;
_found = true; if (msg.getMessageId() == _waitingForId) {
return true; if (_log.shouldLog(Log.DEBUG))
} else { _log.debug("Found match for the key we're waiting for: " + _waitingForId);
_log.debug("DeliveryStatusMessage of a key we're not looking for"); _found = true;
return false; return true;
} } else {
} else { if (_log.shouldLog(Log.DEBUG))
_log.debug("Not a DeliveryStatusMessage"); _log.debug("DeliveryStatusMessage of a key we're not looking for");
return false; return false;
} }
} } else {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Not a DeliveryStatusMessage");
return false;
}
}
} }
/** /**
* Send was totally successful * Send was totally successful
*/ */
protected void succeed() { protected void succeed() {
_log.info("Succeeded sending key " + _state.getTarget()); if (_log.shouldLog(Log.INFO))
_log.debug("State of successful send: " + _state); _log.info("Succeeded sending key " + _state.getTarget());
if (_onSuccess != null) if (_log.shouldLog(Log.DEBUG))
JobQueue.getInstance().addJob(_onSuccess); _log.debug("State of successful send: " + _state);
_facade.noteKeySent(_state.getTarget()); if (_onSuccess != null)
JobQueue.getInstance().addJob(_onSuccess);
_facade.noteKeySent(_state.getTarget());
} }
/** /**
* Send totally failed * Send totally failed
*/ */
protected void fail() { protected void fail() {
_log.info("Failed sending key " + _state.getTarget()); if (_log.shouldLog(Log.INFO))
_log.debug("State of failed send: " + _state, new Exception("Who failed me?")); _log.info("Failed sending key " + _state.getTarget());
if (_onFailure != null) if (_log.shouldLog(Log.DEBUG))
JobQueue.getInstance().addJob(_onFailure); _log.debug("State of failed send: " + _state, new Exception("Who failed me?"));
if (_onFailure != null)
JobQueue.getInstance().addJob(_onFailure);
} }
protected static class StoreState { protected static class StoreState {
private Hash _key; private Hash _key;
private DataStructure _data; private DataStructure _data;
private HashSet _pendingPeers; private HashSet _pendingPeers;
private HashMap _pendingPeerTimes; private HashMap _pendingPeerTimes;
private HashSet _successfulPeers; private HashSet _successfulPeers;
private HashSet _successfulExploratoryPeers; private HashSet _successfulExploratoryPeers;
private HashSet _failedPeers; private HashSet _failedPeers;
private HashSet _attemptedPeers; private HashSet _attemptedPeers;
private volatile long _completed; private volatile long _completed;
private volatile long _started; private volatile long _started;
public StoreState(Hash key, DataStructure data) { public StoreState(Hash key, DataStructure data) {
_key = key; _key = key;
_data = data; _data = data;
_pendingPeers = new HashSet(16); _pendingPeers = new HashSet(16);
_pendingPeerTimes = new HashMap(16); _pendingPeerTimes = new HashMap(16);
_attemptedPeers = new HashSet(16); _attemptedPeers = new HashSet(16);
_failedPeers = new HashSet(16); _failedPeers = new HashSet(16);
_successfulPeers = new HashSet(16); _successfulPeers = new HashSet(16);
_successfulExploratoryPeers = new HashSet(16); _successfulExploratoryPeers = new HashSet(16);
_completed = -1; _completed = -1;
_started = Clock.getInstance().now(); _started = Clock.getInstance().now();
} }
public Hash getTarget() { return _key; } public Hash getTarget() { return _key; }
public DataStructure getData() { return _data; } public DataStructure getData() { return _data; }
public Set getPending() { public Set getPending() {
synchronized (_pendingPeers) { synchronized (_pendingPeers) {
return (Set)_pendingPeers.clone(); return (Set)_pendingPeers.clone();
} }
} }
public Set getAttempted() { public Set getAttempted() {
synchronized (_attemptedPeers) { synchronized (_attemptedPeers) {
return (Set)_attemptedPeers.clone(); return (Set)_attemptedPeers.clone();
} }
} }
public Set getSuccessful() { public Set getSuccessful() {
synchronized (_successfulPeers) { synchronized (_successfulPeers) {
return (Set)_successfulPeers.clone(); return (Set)_successfulPeers.clone();
} }
} }
public Set getSuccessfulExploratory() { public Set getSuccessfulExploratory() {
synchronized (_successfulExploratoryPeers) { synchronized (_successfulExploratoryPeers) {
return (Set)_successfulExploratoryPeers.clone(); return (Set)_successfulExploratoryPeers.clone();
} }
} }
public Set getFailed() { public Set getFailed() {
synchronized (_failedPeers) { synchronized (_failedPeers) {
return (Set)_failedPeers.clone(); return (Set)_failedPeers.clone();
} }
} }
public boolean completed() { return _completed != -1; } public boolean completed() { return _completed != -1; }
public void complete(boolean completed) { public void complete(boolean completed) {
if (completed) if (completed)
_completed = Clock.getInstance().now(); _completed = Clock.getInstance().now();
} }
public long getWhenStarted() { return _started; } public long getWhenStarted() { return _started; }
public long getWhenCompleted() { return _completed; } public long getWhenCompleted() { return _completed; }
public void addPending(Collection pending) { public void addPending(Collection pending) {
synchronized (_pendingPeers) { synchronized (_pendingPeers) {
_pendingPeers.addAll(pending); _pendingPeers.addAll(pending);
for (Iterator iter = pending.iterator(); iter.hasNext(); ) for (Iterator iter = pending.iterator(); iter.hasNext(); )
_pendingPeerTimes.put(iter.next(), new Long(Clock.getInstance().now())); _pendingPeerTimes.put(iter.next(), new Long(Clock.getInstance().now()));
} }
synchronized (_attemptedPeers) { synchronized (_attemptedPeers) {
_attemptedPeers.addAll(pending); _attemptedPeers.addAll(pending);
} }
} }
public long confirmed(Hash peer) { public long confirmed(Hash peer) {
long rv = -1; long rv = -1;
synchronized (_pendingPeers) { synchronized (_pendingPeers) {
_pendingPeers.remove(peer); _pendingPeers.remove(peer);
Long when = (Long)_pendingPeerTimes.remove(peer); Long when = (Long)_pendingPeerTimes.remove(peer);
if (when != null) if (when != null)
rv = Clock.getInstance().now() - when.longValue(); rv = Clock.getInstance().now() - when.longValue();
} }
synchronized (_successfulPeers) { synchronized (_successfulPeers) {
_successfulPeers.add(peer); _successfulPeers.add(peer);
} }
return rv; return rv;
} }
public long confirmedExploratory(Hash peer) { public long confirmedExploratory(Hash peer) {
long rv = -1; long rv = -1;
synchronized (_pendingPeers) { synchronized (_pendingPeers) {
_pendingPeers.remove(peer); _pendingPeers.remove(peer);
Long when = (Long)_pendingPeerTimes.remove(peer); Long when = (Long)_pendingPeerTimes.remove(peer);
if (when != null) if (when != null)
rv = Clock.getInstance().now() - when.longValue(); rv = Clock.getInstance().now() - when.longValue();
} }
synchronized (_successfulExploratoryPeers) { synchronized (_successfulExploratoryPeers) {
_successfulExploratoryPeers.add(peer); _successfulExploratoryPeers.add(peer);
} }
return rv; return rv;
} }
public void replyTimeout(Hash peer) { public void replyTimeout(Hash peer) {
synchronized (_pendingPeers) { synchronized (_pendingPeers) {
_pendingPeers.remove(peer); _pendingPeers.remove(peer);
} }
synchronized (_failedPeers) { synchronized (_failedPeers) {
_failedPeers.add(peer); _failedPeers.add(peer);
} }
} }
public String toString() { public String toString() {
StringBuffer buf = new StringBuffer(256); StringBuffer buf = new StringBuffer(256);
buf.append("Storing ").append(_key); buf.append("Storing ").append(_key);
buf.append(" "); buf.append(" ");
if (_completed <= 0) if (_completed <= 0)
buf.append(" completed? false "); buf.append(" completed? false ");
else else
buf.append(" completed on ").append(new Date(_completed)); buf.append(" completed on ").append(new Date(_completed));
buf.append(" Attempted: "); buf.append(" Attempted: ");
synchronized (_attemptedPeers) { synchronized (_attemptedPeers) {
for (Iterator iter = _attemptedPeers.iterator(); iter.hasNext(); ) { for (Iterator iter = _attemptedPeers.iterator(); iter.hasNext(); ) {
Hash peer = (Hash)iter.next(); Hash peer = (Hash)iter.next();
buf.append(peer.toBase64()).append(" "); buf.append(peer.toBase64()).append(" ");
} }
} }
buf.append(" Pending: "); buf.append(" Pending: ");
synchronized (_pendingPeers) { synchronized (_pendingPeers) {
for (Iterator iter = _pendingPeers.iterator(); iter.hasNext(); ) { for (Iterator iter = _pendingPeers.iterator(); iter.hasNext(); ) {
Hash peer = (Hash)iter.next(); Hash peer = (Hash)iter.next();
buf.append(peer.toBase64()).append(" "); buf.append(peer.toBase64()).append(" ");
} }
} }
buf.append(" Failed: "); buf.append(" Failed: ");
synchronized (_failedPeers) { synchronized (_failedPeers) {
for (Iterator iter = _failedPeers.iterator(); iter.hasNext(); ) { for (Iterator iter = _failedPeers.iterator(); iter.hasNext(); ) {
Hash peer = (Hash)iter.next(); Hash peer = (Hash)iter.next();
buf.append(peer.toBase64()).append(" "); buf.append(peer.toBase64()).append(" ");
} }
} }
buf.append(" Successful: "); buf.append(" Successful: ");
synchronized (_successfulPeers) { synchronized (_successfulPeers) {
for (Iterator iter = _successfulPeers.iterator(); iter.hasNext(); ) { for (Iterator iter = _successfulPeers.iterator(); iter.hasNext(); ) {
Hash peer = (Hash)iter.next(); Hash peer = (Hash)iter.next();
buf.append(peer.toBase64()).append(" "); buf.append(peer.toBase64()).append(" ");
} }
} }
buf.append(" Successful Exploratory: "); buf.append(" Successful Exploratory: ");
synchronized (_successfulExploratoryPeers) { synchronized (_successfulExploratoryPeers) {
for (Iterator iter = _successfulExploratoryPeers.iterator(); iter.hasNext(); ) { for (Iterator iter = _successfulExploratoryPeers.iterator(); iter.hasNext(); ) {
Hash peer = (Hash)iter.next(); Hash peer = (Hash)iter.next();
buf.append(peer.toBase64()).append(" "); buf.append(peer.toBase64()).append(" ");
} }
} }
return buf.toString(); return buf.toString();
} }
} }
} }

View File

@ -41,118 +41,125 @@ public class HandleTunnelCreateMessageJob extends JobImpl {
private final static int PRIORITY = 123; private final static int PRIORITY = 123;
HandleTunnelCreateMessageJob(TunnelCreateMessage receivedMessage, RouterIdentity from, Hash fromHash, SourceRouteBlock replyBlock) { HandleTunnelCreateMessageJob(TunnelCreateMessage receivedMessage, RouterIdentity from, Hash fromHash, SourceRouteBlock replyBlock) {
_message = receivedMessage; _message = receivedMessage;
_from = from; _from = from;
_fromHash = fromHash; _fromHash = fromHash;
_replyBlock = replyBlock; _replyBlock = replyBlock;
} }
public void runJob() { public void runJob() {
if (_log.shouldLog(Log.DEBUG)) _log.debug("Handling tunnel create"); if (_log.shouldLog(Log.DEBUG)) _log.debug("Handling tunnel create");
TunnelInfo info = new TunnelInfo(); TunnelInfo info = new TunnelInfo();
info.setConfigurationKey(_message.getConfigurationKey()); info.setConfigurationKey(_message.getConfigurationKey());
info.setEncryptionKey(_message.getTunnelKey()); info.setEncryptionKey(_message.getTunnelKey());
info.setNextHop(_message.getNextRouter()); info.setNextHop(_message.getNextRouter());
TunnelSettings settings = new TunnelSettings(); TunnelSettings settings = new TunnelSettings();
settings.setBytesPerMinuteAverage(_message.getMaxAvgBytesPerMin()); settings.setBytesPerMinuteAverage(_message.getMaxAvgBytesPerMin());
settings.setBytesPerMinutePeak(_message.getMaxPeakBytesPerMin()); settings.setBytesPerMinutePeak(_message.getMaxPeakBytesPerMin());
settings.setMessagesPerMinuteAverage(_message.getMaxAvgMessagesPerMin()); settings.setMessagesPerMinuteAverage(_message.getMaxAvgMessagesPerMin());
settings.setMessagesPerMinutePeak(_message.getMaxPeakMessagesPerMin()); settings.setMessagesPerMinutePeak(_message.getMaxPeakMessagesPerMin());
settings.setExpiration(_message.getTunnelDurationSeconds()*1000+Clock.getInstance().now()); settings.setExpiration(_message.getTunnelDurationSeconds()*1000+Clock.getInstance().now());
settings.setIncludeDummy(_message.getIncludeDummyTraffic()); settings.setIncludeDummy(_message.getIncludeDummyTraffic());
settings.setReorder(_message.getReorderMessages()); settings.setReorder(_message.getReorderMessages());
info.setSettings(settings); info.setSettings(settings);
info.setSigningKey(_message.getVerificationPrivateKey());
info.setThisHop(Router.getInstance().getRouterInfo().getIdentity().getHash());
info.setTunnelId(_message.getTunnelId());
info.setVerificationKey(_message.getVerificationPublicKey());
info.setSigningKey(_message.getVerificationPrivateKey()); info.getTunnelId().setType(TunnelId.TYPE_PARTICIPANT);
info.setThisHop(Router.getInstance().getRouterInfo().getIdentity().getHash());
info.setTunnelId(_message.getTunnelId());
info.setVerificationKey(_message.getVerificationPublicKey());
info.getTunnelId().setType(TunnelId.TYPE_PARTICIPANT); if (_message.getNextRouter() == null) {
if (_log.shouldLog(Log.DEBUG)) _log.debug("We're the endpoint, don't test the \"next\" peer [duh]");
if (_message.getNextRouter() == null) { boolean ok = TunnelManagerFacade.getInstance().joinTunnel(info);
if (_log.shouldLog(Log.DEBUG)) _log.debug("We're the endpoint, don't test the \"next\" peer [duh]"); sendReply(ok);
boolean ok = TunnelManagerFacade.getInstance().joinTunnel(info); } else {
sendReply(ok); NetworkDatabaseFacade.getInstance().lookupRouterInfo(info.getNextHop(), new TestJob(info), new JoinJob(info, false), TIMEOUT);
} else { }
NetworkDatabaseFacade.getInstance().lookupRouterInfo(info.getNextHop(), new TestJob(info), new JoinJob(info, false), TIMEOUT);
}
} }
private class TestJob extends JobImpl { private class TestJob extends JobImpl {
private TunnelInfo _target; private TunnelInfo _target;
public TestJob(TunnelInfo target) { public TestJob(TunnelInfo target) {
_target = target; _target = target;
} }
public String getName() { return "Run a test for peer reachability"; } public String getName() { return "Run a test for peer reachability"; }
public void runJob() { public void runJob() {
RouterInfo info = NetworkDatabaseFacade.getInstance().lookupRouterInfoLocally(_target.getNextHop()); RouterInfo info = NetworkDatabaseFacade.getInstance().lookupRouterInfoLocally(_target.getNextHop());
if (info == null) { if (info == null) {
if (_log.shouldLog(Log.ERROR)) if (_log.shouldLog(Log.ERROR))
_log.error("Error - unable to look up peer " + _target.toBase64() + ", even though we were queued up via onSuccess??"); _log.error("Error - unable to look up peer " + _target.toBase64() + ", even though we were queued up via onSuccess??");
return; return;
} else { } else {
if (_log.shouldLog(Log.INFO)) if (_log.shouldLog(Log.INFO))
_log.info("Lookup successful for tested peer " + _target.toBase64() + ", now continue with the test"); _log.info("Lookup successful for tested peer " + _target.toBase64() + ", now continue with the test");
JobQueue.getInstance().addJob(new BuildTestMessageJob(info, Router.getInstance().getRouterInfo().getIdentity().getHash(), new JoinJob(_target, true), new JoinJob(_target, false), TIMEOUT, PRIORITY)); Hash peer = Router.getInstance().getRouterInfo().getIdentity().getHash();
} JoinJob success = new JoinJob(_target, true);
} JoinJob failure = new JoinJob(_target, false);
BuildTestMessageJob test = new BuildTestMessageJob(info, peer, success, failure, TIMEOUT, PRIORITY);
JobQueue.getInstance().addJob(test);
}
}
} }
private void sendReply(boolean ok) { private void sendReply(boolean ok) {
if (_log.shouldLog(Log.DEBUG)) if (_log.shouldLog(Log.DEBUG))
_log.debug("Sending reply to a tunnel create of id " + _message.getTunnelId() + " with ok (" + ok + ") to router " + _message.getReplyBlock().getRouter().toBase64()); _log.debug("Sending reply to a tunnel create of id " + _message.getTunnelId()
+ " with ok (" + ok + ") to router " + _message.getReplyBlock().getRouter().toBase64());
MessageHistory.getInstance().receiveTunnelCreate(_message.getTunnelId(), _message.getNextRouter(),
new Date(Clock.getInstance().now() + 1000*_message.getTunnelDurationSeconds()),
ok, _message.getReplyBlock().getRouter());
MessageHistory.getInstance().receiveTunnelCreate(_message.getTunnelId(), _message.getNextRouter(), new Date(Clock.getInstance().now() + 1000*_message.getTunnelDurationSeconds()), ok, _message.getReplyBlock().getRouter()); TunnelCreateStatusMessage msg = new TunnelCreateStatusMessage();
msg.setFromHash(Router.getInstance().getRouterInfo().getIdentity().getHash());
TunnelCreateStatusMessage msg = new TunnelCreateStatusMessage(); msg.setTunnelId(_message.getTunnelId());
msg.setFromHash(Router.getInstance().getRouterInfo().getIdentity().getHash()); if (ok) {
msg.setTunnelId(_message.getTunnelId()); msg.setStatus(TunnelCreateStatusMessage.STATUS_SUCCESS);
if (ok) { } else {
msg.setStatus(TunnelCreateStatusMessage.STATUS_SUCCESS); // since we don't actually check anything, this is a catch all
} else { msg.setStatus(TunnelCreateStatusMessage.STATUS_FAILED_OVERLOADED);
// since we don't actually check anything, this is a catch all }
msg.setStatus(TunnelCreateStatusMessage.STATUS_FAILED_OVERLOADED); msg.setMessageExpiration(new Date(Clock.getInstance().now()+60*1000));
} SendReplyMessageJob job = new SendReplyMessageJob(_message.getReplyBlock(), msg, PRIORITY);
msg.setMessageExpiration(new Date(Clock.getInstance().now()+60*1000)); JobQueue.getInstance().addJob(job);
SendReplyMessageJob job = new SendReplyMessageJob(_message.getReplyBlock(), msg, PRIORITY);
JobQueue.getInstance().addJob(job);
} }
public String getName() { return "Handle Tunnel Create Message"; } public String getName() { return "Handle Tunnel Create Message"; }
private class JoinJob extends JobImpl { private class JoinJob extends JobImpl {
private TunnelInfo _info; private TunnelInfo _info;
private boolean _isReachable; private boolean _isReachable;
public JoinJob(TunnelInfo info, boolean isReachable) { public JoinJob(TunnelInfo info, boolean isReachable) {
_info = info; _info = info;
_isReachable = isReachable; _isReachable = isReachable;
} }
public void runJob() { public void runJob() {
if (!_isReachable) { if (!_isReachable) {
long before = Clock.getInstance().now(); long before = Clock.getInstance().now();
sendReply(false); sendReply(false);
long after = Clock.getInstance().now(); long after = Clock.getInstance().now();
if (_log.shouldLog(Log.DEBUG)) if (_log.shouldLog(Log.DEBUG))
_log.debug("JoinJob .refuse took " + (after-before) + "ms to refuse " + _info); _log.debug("JoinJob .refuse took " + (after-before) + "ms to refuse " + _info);
} else { } else {
long before = Clock.getInstance().now(); long before = Clock.getInstance().now();
boolean ok = TunnelManagerFacade.getInstance().joinTunnel(_info); boolean ok = TunnelManagerFacade.getInstance().joinTunnel(_info);
long afterJoin = Clock.getInstance().now(); long afterJoin = Clock.getInstance().now();
sendReply(ok); sendReply(ok);
long after = Clock.getInstance().now(); long after = Clock.getInstance().now();
if (_log.shouldLog(Log.DEBUG)) if (_log.shouldLog(Log.DEBUG))
_log.debug("JoinJob .joinTunnel took " + (afterJoin-before) + "ms and sendReply took " + (after-afterJoin) + "ms"); _log.debug("JoinJob .joinTunnel took " + (afterJoin-before) + "ms and sendReply took " + (after-afterJoin) + "ms");
} }
} }
public String getName() { return "Process the tunnel join after testing the nextHop"; } public String getName() { return "Process the tunnel join after testing the nextHop"; }
} }
public void dropped() { public void dropped() {
MessageHistory.getInstance().messageProcessingError(_message.getUniqueId(), _message.getClass().getName(), "Dropped due to overload"); MessageHistory.getInstance().messageProcessingError(_message.getUniqueId(), _message.getClass().getName(), "Dropped due to overload");
} }
} }

View File

@ -28,26 +28,27 @@ public class PoolingTunnelManagerFacade extends TunnelManagerFacade {
private TunnelTestManager _testManager; private TunnelTestManager _testManager;
static { static {
StatManager.getInstance().createFrequencyStat("tunnel.acceptRequestFrequency", "How often do we accept requests to join a tunnel?", "Tunnels", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); StatManager.getInstance().createFrequencyStat("tunnel.acceptRequestFrequency", "How often do we accept requests to join a tunnel?", "Tunnels", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
StatManager.getInstance().createFrequencyStat("tunnel.rejectRequestFrequency", "How often do we reject requests to join a tunnel?", "Tunnels", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); StatManager.getInstance().createFrequencyStat("tunnel.rejectRequestFrequency", "How often do we reject requests to join a tunnel?", "Tunnels", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
StatManager.getInstance().createRateStat("tunnel.participatingTunnels", "How many tunnels are we participating in?", "Tunnels", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
} }
public PoolingTunnelManagerFacade() { public PoolingTunnelManagerFacade() {
super(); super();
InNetMessagePool.getInstance().registerHandlerJobBuilder(TunnelCreateMessage.MESSAGE_TYPE, new TunnelCreateMessageHandler()); InNetMessagePool.getInstance().registerHandlerJobBuilder(TunnelCreateMessage.MESSAGE_TYPE, new TunnelCreateMessageHandler());
} }
public void startup() { public void startup() {
if (_pool == null) if (_pool == null)
_pool = new TunnelPool(); _pool = new TunnelPool();
_pool.startup(); _pool.startup();
_testManager = new TunnelTestManager(_pool); _testManager = new TunnelTestManager(_pool);
} }
public void shutdown() { public void shutdown() {
_pool.shutdown(); _pool.shutdown();
_testManager.stopTesting(); _testManager.stopTesting();
_testManager = null; _testManager = null;
} }
/** /**
@ -56,54 +57,62 @@ public class PoolingTunnelManagerFacade extends TunnelManagerFacade {
* @return true if the router will accept participation, else false. * @return true if the router will accept participation, else false.
*/ */
public boolean joinTunnel(TunnelInfo info) { public boolean joinTunnel(TunnelInfo info) {
if (info == null) { if (info == null) {
_log.error("Null tunnel", new Exception("Null tunnel")); if (_log.shouldLog(Log.ERROR))
StatManager.getInstance().updateFrequency("tunnel.rejectRequestFrequency"); _log.error("Null tunnel", new Exception("Null tunnel"));
return false; StatManager.getInstance().updateFrequency("tunnel.rejectRequestFrequency");
} return false;
if (info.getSettings() == null) { }
_log.error("Null settings!", new Exception("settings are null")); if (info.getSettings() == null) {
StatManager.getInstance().updateFrequency("tunnel.rejectRequestFrequency"); if (_log.shouldLog(Log.ERROR))
return false; _log.error("Null settings!", new Exception("settings are null"));
} StatManager.getInstance().updateFrequency("tunnel.rejectRequestFrequency");
if (info.getSettings().getExpiration() == 0) { return false;
_log.info("No expiration for tunnel " + info.getTunnelId().getTunnelId(), new Exception("No expiration")); }
StatManager.getInstance().updateFrequency("tunnel.rejectRequestFrequency"); if (info.getSettings().getExpiration() == 0) {
return false; if (_log.shouldLog(Log.INFO))
} else { _log.info("No expiration for tunnel " + info.getTunnelId().getTunnelId(),
if (info.getSettings().getExpiration() < Clock.getInstance().now()) { new Exception("No expiration"));
_log.warn("Already expired - " + new Date(info.getSettings().getExpiration()), new Exception("Already expired")); StatManager.getInstance().updateFrequency("tunnel.rejectRequestFrequency");
StatManager.getInstance().updateFrequency("tunnel.rejectRequestFrequency"); return false;
return false; } else {
} if (info.getSettings().getExpiration() < Clock.getInstance().now()) {
} if (_log.shouldLog(Log.WARN))
_log.warn("Already expired - " + new Date(info.getSettings().getExpiration()),
_log.debug("Joining tunnel: " + info); new Exception("Already expired"));
boolean ok = _pool.addParticipatingTunnel(info); StatManager.getInstance().updateFrequency("tunnel.rejectRequestFrequency");
if (!ok) return false;
StatManager.getInstance().updateFrequency("tunnel.rejectRequestFrequency"); }
else }
StatManager.getInstance().updateFrequency("tunnel.acceptRequestFrequency");
return ok; if (_log.shouldLog(Log.DEBUG))
_log.debug("Joining tunnel: " + info);
boolean ok = _pool.addParticipatingTunnel(info);
if (!ok)
StatManager.getInstance().updateFrequency("tunnel.rejectRequestFrequency");
else
StatManager.getInstance().updateFrequency("tunnel.acceptRequestFrequency");
StatManager.getInstance().addRateData("tunnel.participatingTunnels", _pool.getParticipatingTunnelCount(), 0);
return ok;
} }
/** /**
* Retrieve the information related to a particular tunnel * Retrieve the information related to a particular tunnel
* *
*/ */
public TunnelInfo getTunnelInfo(TunnelId id) { public TunnelInfo getTunnelInfo(TunnelId id) {
return _pool.getTunnelInfo(id); return _pool.getTunnelInfo(id);
} }
/** /**
* Retrieve a set of tunnels from the existing ones for various purposes * Retrieve a set of tunnels from the existing ones for various purposes
*/ */
public List selectOutboundTunnelIds(TunnelSelectionCriteria criteria) { public List selectOutboundTunnelIds(TunnelSelectionCriteria criteria) {
return PoolingTunnelSelector.selectOutboundTunnelIds(_pool, criteria); return PoolingTunnelSelector.selectOutboundTunnelIds(_pool, criteria);
} }
/** /**
* Retrieve a set of tunnels from the existing ones for various purposes * Retrieve a set of tunnels from the existing ones for various purposes
*/ */
public List selectInboundTunnelIds(TunnelSelectionCriteria criteria) { public List selectInboundTunnelIds(TunnelSelectionCriteria criteria) {
return PoolingTunnelSelector.selectInboundTunnelIds(_pool, criteria); return PoolingTunnelSelector.selectInboundTunnelIds(_pool, criteria);
} }
/** /**
@ -113,12 +122,12 @@ public class PoolingTunnelManagerFacade extends TunnelManagerFacade {
* *
*/ */
public void createTunnels(Destination destination, ClientTunnelSettings clientSettings, long timeoutMs) { public void createTunnels(Destination destination, ClientTunnelSettings clientSettings, long timeoutMs) {
ClientTunnelPool pool = _pool.getClientPool(destination); ClientTunnelPool pool = _pool.getClientPool(destination);
if (pool != null) { if (pool != null) {
pool.setClientSettings(clientSettings); pool.setClientSettings(clientSettings);
} else { } else {
_pool.createClientPool(destination, clientSettings); _pool.createClientPool(destination, clientSettings);
} }
} }
/** /**
@ -127,32 +136,34 @@ public class PoolingTunnelManagerFacade extends TunnelManagerFacade {
* *
*/ */
public void peerFailed(Hash peer) { public void peerFailed(Hash peer) {
int numFailed = 0; int numFailed = 0;
for (Iterator iter = _pool.getManagedTunnelIds().iterator(); iter.hasNext(); ) { for (Iterator iter = _pool.getManagedTunnelIds().iterator(); iter.hasNext(); ) {
TunnelId id = (TunnelId)iter.next(); TunnelId id = (TunnelId)iter.next();
TunnelInfo info = (TunnelInfo)_pool.getTunnelInfo(id); TunnelInfo info = (TunnelInfo)_pool.getTunnelInfo(id);
if (isParticipant(info, peer)) { if (isParticipant(info, peer)) {
_log.info("Peer " + peer.toBase64() + " failed and they participate in tunnel " + id.getTunnelId() + ". Marking the tunnel as not ready!"); _log.info("Peer " + peer.toBase64() + " failed and they participate in tunnel "
info.setIsReady(false); + id.getTunnelId() + ". Marking the tunnel as not ready!");
numFailed++; info.setIsReady(false);
numFailed++;
long lifetime = Clock.getInstance().now() - info.getCreated(); long lifetime = Clock.getInstance().now() - info.getCreated();
StatManager.getInstance().addRateData("tunnel.failAfterTime", lifetime, lifetime); StatManager.getInstance().addRateData("tunnel.failAfterTime", lifetime, lifetime);
} }
} }
_log.info("On peer " + peer.toBase64() + " failure, " + numFailed + " tunnels were killed"); if (_log.shouldLog(Log.INFO))
_log.info("On peer " + peer.toBase64() + " failure, " + numFailed + " tunnels were killed");
} }
private boolean isParticipant(TunnelInfo info, Hash peer) { private boolean isParticipant(TunnelInfo info, Hash peer) {
if ( (info == null) || (peer == null) ) return false; if ( (info == null) || (peer == null) ) return false;
TunnelInfo cur = info; TunnelInfo cur = info;
while (cur != null) { while (cur != null) {
if (peer.equals(cur.getThisHop())) return true; if (peer.equals(cur.getThisHop())) return true;
if (peer.equals(cur.getNextHop())) return true; if (peer.equals(cur.getNextHop())) return true;
cur = cur.getNextHopInfo(); cur = cur.getNextHopInfo();
} }
return false; return false;
} }
/** /**
@ -160,32 +171,32 @@ public class PoolingTunnelManagerFacade extends TunnelManagerFacade {
* *
*/ */
public boolean isInUse(Hash peer) { public boolean isInUse(Hash peer) {
if (isInUse(peer, _pool.getManagedTunnelIds())) { if (isInUse(peer, _pool.getManagedTunnelIds())) {
if (_log.shouldLog(Log.INFO)) if (_log.shouldLog(Log.INFO))
_log.debug("Peer is in a managed tunnel: " + peer.toBase64()); _log.debug("Peer is in a managed tunnel: " + peer.toBase64());
return true; return true;
} }
if (isInUse(peer, _pool.getPendingTunnels())) { if (isInUse(peer, _pool.getPendingTunnels())) {
if (_log.shouldLog(Log.INFO)) if (_log.shouldLog(Log.INFO))
_log.debug("Peer is in a pending tunnel: " + peer.toBase64()); _log.debug("Peer is in a pending tunnel: " + peer.toBase64());
return true; return true;
} }
if (isInUse(peer, _pool.getParticipatingTunnels())) { if (isInUse(peer, _pool.getParticipatingTunnels())) {
if (_log.shouldLog(Log.INFO)) if (_log.shouldLog(Log.INFO))
_log.debug("Peer is in a participating tunnel: " + peer.toBase64()); _log.debug("Peer is in a participating tunnel: " + peer.toBase64());
return true; return true;
} }
return false; return false;
} }
private boolean isInUse(Hash peer, Set tunnelIds) { private boolean isInUse(Hash peer, Set tunnelIds) {
for (Iterator iter = tunnelIds.iterator(); iter.hasNext(); ) { for (Iterator iter = tunnelIds.iterator(); iter.hasNext(); ) {
TunnelId id = (TunnelId)iter.next(); TunnelId id = (TunnelId)iter.next();
TunnelInfo info = _pool.getTunnelInfo(id); TunnelInfo info = _pool.getTunnelInfo(id);
if (isParticipant(info, peer)) if (isParticipant(info, peer))
return true; return true;
} }
return false; return false;
} }
/** /**
@ -193,9 +204,9 @@ public class PoolingTunnelManagerFacade extends TunnelManagerFacade {
* *
*/ */
public String renderStatusHTML() { public String renderStatusHTML() {
if (_pool != null) if (_pool != null)
return _pool.renderStatusHTML(); return _pool.renderStatusHTML();
else else
return "<h2>Tunnel Manager not initialized</h2>\n"; return "<h2>Tunnel Manager not initialized</h2>\n";
} }
} }

View File

@ -48,8 +48,8 @@ class TunnelPool {
/** active or has it been shutdown? */ /** active or has it been shutdown? */
private boolean _isLive; private boolean _isLive;
/** write out the current state every 15 seconds */ /** write out the current state every 60 seconds */
private final static long WRITE_POOL_DELAY = 15*1000; private final static long WRITE_POOL_DELAY = 60*1000;
/** allow the tunnel create timeout to be overridden, default is 60 seconds [but really slow computers should be larger] */ /** allow the tunnel create timeout to be overridden, default is 60 seconds [but really slow computers should be larger] */
public final static String TUNNEL_CREATION_TIMEOUT_PARAM = "tunnel.creationTimeoutMs"; public final static String TUNNEL_CREATION_TIMEOUT_PARAM = "tunnel.creationTimeoutMs";
@ -59,13 +59,13 @@ class TunnelPool {
public final static int TARGET_CLIENTS_DEFAULT = 3; public final static int TARGET_CLIENTS_DEFAULT = 3;
static { static {
StatManager.getInstance().createFrequencyStat("tunnel.failFrequency", "How often do tunnels prematurely fail (after being successfully built)?", "Tunnels", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l }); StatManager.getInstance().createFrequencyStat("tunnel.failFrequency", "How often do tunnels prematurely fail (after being successfully built)?", "Tunnels", new long[] { 60*1000l, 60*60*1000l, 24*60*60*1000l });
StatManager.getInstance().createRateStat("tunnel.failAfterTime", "How long do tunnels that fail prematurely last before failing?", "Tunnels", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); StatManager.getInstance().createRateStat("tunnel.failAfterTime", "How long do tunnels that fail prematurely last before failing?", "Tunnels", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
} }
public TunnelPool() { public TunnelPool() {
_isLive = true; _isLive = true;
_persistenceHelper = new TunnelPoolPersistenceHelper(); _persistenceHelper = new TunnelPoolPersistenceHelper();
} }
/** /**
@ -73,45 +73,45 @@ class TunnelPool {
* *
*/ */
public TunnelInfo getTunnelInfo(TunnelId id) { public TunnelInfo getTunnelInfo(TunnelId id) {
if (!_isLive) return null; if (!_isLive) return null;
if (id == null) return null; if (id == null) return null;
boolean typeKnown = id.getType() != TunnelId.TYPE_UNSPECIFIED; boolean typeKnown = id.getType() != TunnelId.TYPE_UNSPECIFIED;
if ( (!typeKnown) || (id.getType() == TunnelId.TYPE_PARTICIPANT) ) { if ( (!typeKnown) || (id.getType() == TunnelId.TYPE_PARTICIPANT) ) {
synchronized (_participatingTunnels) { synchronized (_participatingTunnels) {
if (_participatingTunnels.containsKey(id)) if (_participatingTunnels.containsKey(id))
return (TunnelInfo)_participatingTunnels.get(id); return (TunnelInfo)_participatingTunnels.get(id);
} }
} }
if ( (!typeKnown) || (id.getType() == TunnelId.TYPE_OUTBOUND) ) { if ( (!typeKnown) || (id.getType() == TunnelId.TYPE_OUTBOUND) ) {
synchronized (_outboundTunnels) { synchronized (_outboundTunnels) {
if (_outboundTunnels.containsKey(id)) if (_outboundTunnels.containsKey(id))
return (TunnelInfo)_outboundTunnels.get(id); return (TunnelInfo)_outboundTunnels.get(id);
} }
} }
if ( (!typeKnown) || (id.getType() == TunnelId.TYPE_INBOUND) ) { if ( (!typeKnown) || (id.getType() == TunnelId.TYPE_INBOUND) ) {
synchronized (_freeInboundTunnels) { synchronized (_freeInboundTunnels) {
if (_freeInboundTunnels.containsKey(id)) if (_freeInboundTunnels.containsKey(id))
return (TunnelInfo)_freeInboundTunnels.get(id); return (TunnelInfo)_freeInboundTunnels.get(id);
} }
} }
synchronized (_pendingTunnels) { synchronized (_pendingTunnels) {
if (_pendingTunnels.containsKey(id)) if (_pendingTunnels.containsKey(id))
return (TunnelInfo)_pendingTunnels.get(id); return (TunnelInfo)_pendingTunnels.get(id);
} }
if ( (!typeKnown) || (id.getType() == TunnelId.TYPE_INBOUND) ) { if ( (!typeKnown) || (id.getType() == TunnelId.TYPE_INBOUND) ) {
synchronized (_clientPools) { synchronized (_clientPools) {
for (Iterator iter = _clientPools.values().iterator(); iter.hasNext(); ) { for (Iterator iter = _clientPools.values().iterator(); iter.hasNext(); ) {
ClientTunnelPool pool = (ClientTunnelPool)iter.next(); ClientTunnelPool pool = (ClientTunnelPool)iter.next();
if (pool.isInboundTunnel(id)) if (pool.isInboundTunnel(id))
return pool.getInboundTunnel(id); return pool.getInboundTunnel(id);
else if (pool.isInactiveInboundTunnel(id)) else if (pool.isInactiveInboundTunnel(id))
return pool.getInactiveInboundTunnel(id); return pool.getInactiveInboundTunnel(id);
} }
} }
} }
return null; return null;
} }
/** /**
@ -120,21 +120,21 @@ class TunnelPool {
* *
*/ */
public Set getManagedTunnelIds() { public Set getManagedTunnelIds() {
if (!_isLive) return null; if (!_isLive) return null;
Set ids = new HashSet(64); Set ids = new HashSet(64);
synchronized (_outboundTunnels) { synchronized (_outboundTunnels) {
ids.addAll(_outboundTunnels.keySet()); ids.addAll(_outboundTunnels.keySet());
} }
synchronized (_freeInboundTunnels) { synchronized (_freeInboundTunnels) {
ids.addAll(_freeInboundTunnels.keySet()); ids.addAll(_freeInboundTunnels.keySet());
} }
synchronized (_clientPools) { synchronized (_clientPools) {
for (Iterator iter = _clientPools.values().iterator(); iter.hasNext(); ) { for (Iterator iter = _clientPools.values().iterator(); iter.hasNext(); ) {
ClientTunnelPool pool = (ClientTunnelPool)iter.next(); ClientTunnelPool pool = (ClientTunnelPool)iter.next();
ids.addAll(pool.getInboundTunnelIds()); ids.addAll(pool.getInboundTunnelIds());
} }
} }
return ids; return ids;
} }
/** /**
@ -143,156 +143,164 @@ class TunnelPool {
* @return true if the tunnel was allocated successfully, false if an error occurred * @return true if the tunnel was allocated successfully, false if an error occurred
*/ */
public boolean allocateTunnel(TunnelId id, Destination dest) { public boolean allocateTunnel(TunnelId id, Destination dest) {
if (!_isLive) return false; if (!_isLive) return false;
ClientTunnelPool pool = getClientPool(dest); ClientTunnelPool pool = getClientPool(dest);
if (pool == null) { if (pool == null) {
_log.error("Error allocating tunnel " + id + " to " + dest + ": no pool for the client known"); if (_log.shouldLog(Log.ERROR))
return false; _log.error("Error allocating tunnel " + id + " to " + dest + ": no pool for the client known");
} return false;
TunnelInfo tunnel = removeFreeTunnel(id); }
if (tunnel == null) { TunnelInfo tunnel = removeFreeTunnel(id);
_log.error("Error allocating tunnel " + id + " to " + dest + ": tunnel is no longer free?"); if (tunnel == null) {
return false; if (_log.shouldLog(Log.ERROR))
} _log.error("Error allocating tunnel " + id + " to " + dest + ": tunnel is no longer free?");
return false;
TunnelInfo t = tunnel; }
while (t != null) {
t.setDestination(dest); TunnelInfo t = tunnel;
t = t.getNextHopInfo(); while (t != null) {
} t.setDestination(dest);
t = t.getNextHopInfo();
pool.addInboundTunnel(tunnel); }
return true;
pool.addInboundTunnel(tunnel);
return true;
} }
/** /**
* Set of tunnelIds for outbound tunnels * Set of tunnelIds for outbound tunnels
*/ */
public Set getOutboundTunnels() { public Set getOutboundTunnels() {
if (!_isLive) return null; if (!_isLive) return null;
synchronized (_outboundTunnels) { synchronized (_outboundTunnels) {
return new HashSet(_outboundTunnels.keySet()); return new HashSet(_outboundTunnels.keySet());
} }
} }
public int getOutboundTunnelCount() { public int getOutboundTunnelCount() {
if (!_isLive) return 0; if (!_isLive) return 0;
synchronized (_outboundTunnels) { synchronized (_outboundTunnels) {
return _outboundTunnels.size(); return _outboundTunnels.size();
} }
} }
public TunnelInfo getOutboundTunnel(TunnelId id) { public TunnelInfo getOutboundTunnel(TunnelId id) {
if (!_isLive) return null; if (!_isLive) return null;
synchronized (_outboundTunnels) { synchronized (_outboundTunnels) {
return (TunnelInfo)_outboundTunnels.get(id); return (TunnelInfo)_outboundTunnels.get(id);
} }
} }
public void addOutboundTunnel(TunnelInfo tunnel) { public void addOutboundTunnel(TunnelInfo tunnel) {
if (!_isLive) return; if (!_isLive) return;
if (_log.shouldLog(Log.DEBUG)) _log.debug("Add outbound tunnel " + tunnel.getTunnelId()); if (_log.shouldLog(Log.DEBUG)) _log.debug("Add outbound tunnel " + tunnel.getTunnelId());
MessageHistory.getInstance().tunnelJoined("outbound", tunnel); MessageHistory.getInstance().tunnelJoined("outbound", tunnel);
synchronized (_outboundTunnels) { synchronized (_outboundTunnels) {
_outboundTunnels.put(tunnel.getTunnelId(), tunnel); _outboundTunnels.put(tunnel.getTunnelId(), tunnel);
} }
synchronized (_pendingTunnels) { synchronized (_pendingTunnels) {
_pendingTunnels.remove(tunnel.getTunnelId()); _pendingTunnels.remove(tunnel.getTunnelId());
} }
} }
public void removeOutboundTunnel(TunnelId id) { public void removeOutboundTunnel(TunnelId id) {
if (!_isLive) return; if (!_isLive) return;
if (_log.shouldLog(Log.DEBUG)) _log.debug("Removing outbound tunnel " + id); if (_log.shouldLog(Log.DEBUG)) _log.debug("Removing outbound tunnel " + id);
int remaining = 0; int remaining = 0;
synchronized (_outboundTunnels) { synchronized (_outboundTunnels) {
_outboundTunnels.remove(id); _outboundTunnels.remove(id);
remaining = _outboundTunnels.size(); remaining = _outboundTunnels.size();
} }
if (remaining <= 0) { if (remaining <= 0) {
buildFakeTunnels(); buildFakeTunnels();
} }
} }
/** /**
* Set of tunnelIds that this router has available for consumption * Set of tunnelIds that this router has available for consumption
*/ */
public Set getFreeTunnels() { public Set getFreeTunnels() {
if (!_isLive) return null; if (!_isLive) return null;
synchronized (_freeInboundTunnels) { synchronized (_freeInboundTunnels) {
return new HashSet(_freeInboundTunnels.keySet()); return new HashSet(_freeInboundTunnels.keySet());
} }
} }
public int getFreeTunnelCount() { public int getFreeTunnelCount() {
if (!_isLive) return 0; if (!_isLive) return 0;
synchronized (_freeInboundTunnels) { synchronized (_freeInboundTunnels) {
return _freeInboundTunnels.size(); return _freeInboundTunnels.size();
} }
} }
public TunnelInfo getFreeTunnel(TunnelId id) { public TunnelInfo getFreeTunnel(TunnelId id) {
if (!_isLive) return null; if (!_isLive) return null;
synchronized (_freeInboundTunnels) { synchronized (_freeInboundTunnels) {
return (TunnelInfo)_freeInboundTunnels.get(id); return (TunnelInfo)_freeInboundTunnels.get(id);
} }
} }
public void addFreeTunnel(TunnelInfo tunnel) { public void addFreeTunnel(TunnelInfo tunnel) {
if (!_isLive) return; if (!_isLive) return;
if (_log.shouldLog(Log.DEBUG)) _log.debug("Add free inbound tunnel " + tunnel.getTunnelId()); if (_log.shouldLog(Log.DEBUG)) _log.debug("Add free inbound tunnel " + tunnel.getTunnelId());
MessageHistory.getInstance().tunnelJoined("free inbound", tunnel); MessageHistory.getInstance().tunnelJoined("free inbound", tunnel);
synchronized (_freeInboundTunnels) { synchronized (_freeInboundTunnels) {
_freeInboundTunnels.put(tunnel.getTunnelId(), tunnel); _freeInboundTunnels.put(tunnel.getTunnelId(), tunnel);
} }
synchronized (_pendingTunnels) { synchronized (_pendingTunnels) {
_pendingTunnels.remove(tunnel.getTunnelId()); _pendingTunnels.remove(tunnel.getTunnelId());
} }
} }
public TunnelInfo removeFreeTunnel(TunnelId id) { public TunnelInfo removeFreeTunnel(TunnelId id) {
if (!_isLive) return null; if (!_isLive) return null;
if (_log.shouldLog(Log.DEBUG)) _log.debug("Removing free inbound tunnel " + id); if (_log.shouldLog(Log.DEBUG)) _log.debug("Removing free inbound tunnel " + id);
int remaining = 0; int remaining = 0;
TunnelInfo rv = null; TunnelInfo rv = null;
synchronized (_freeInboundTunnels) { synchronized (_freeInboundTunnels) {
rv = (TunnelInfo)_freeInboundTunnels.remove(id); rv = (TunnelInfo)_freeInboundTunnels.remove(id);
remaining = _freeInboundTunnels.size(); remaining = _freeInboundTunnels.size();
} }
if (remaining <= 0) if (remaining <= 0)
buildFakeTunnels(); buildFakeTunnels();
return rv; return rv;
} }
/** /**
* set of tunnelIds that this router is participating in (but not managing) * set of tunnelIds that this router is participating in (but not managing)
*/ */
public Set getParticipatingTunnels() { public Set getParticipatingTunnels() {
if (!_isLive) return null; if (!_isLive) return null;
synchronized (_participatingTunnels) { synchronized (_participatingTunnels) {
return new HashSet(_participatingTunnels.keySet()); return new HashSet(_participatingTunnels.keySet());
} }
}
public int getParticipatingTunnelCount() {
if (!_isLive) return 0;
synchronized (_participatingTunnels) {
return _participatingTunnels.size();
}
} }
public TunnelInfo getParticipatingTunnel(TunnelId id) { public TunnelInfo getParticipatingTunnel(TunnelId id) {
if (!_isLive) return null; if (!_isLive) return null;
synchronized (_participatingTunnels) { synchronized (_participatingTunnels) {
return (TunnelInfo)_participatingTunnels.get(id); return (TunnelInfo)_participatingTunnels.get(id);
} }
} }
public boolean addParticipatingTunnel(TunnelInfo tunnel) { public boolean addParticipatingTunnel(TunnelInfo tunnel) {
if (!_isLive) return false; if (!_isLive) return false;
if (_log.shouldLog(Log.DEBUG)) _log.debug("Add participating tunnel " + tunnel.getTunnelId()); if (_log.shouldLog(Log.DEBUG)) _log.debug("Add participating tunnel " + tunnel.getTunnelId());
MessageHistory.getInstance().tunnelJoined("participant", tunnel); MessageHistory.getInstance().tunnelJoined("participant", tunnel);
synchronized (_participatingTunnels) { synchronized (_participatingTunnels) {
if (_participatingTunnels.containsKey(tunnel.getTunnelId())) { if (_participatingTunnels.containsKey(tunnel.getTunnelId())) {
return false; return false;
} else { } else {
_participatingTunnels.put(tunnel.getTunnelId(), tunnel); _participatingTunnels.put(tunnel.getTunnelId(), tunnel);
tunnel.setIsReady(true); tunnel.setIsReady(true);
return true; return true;
} }
} }
} }
public TunnelInfo removeParticipatingTunnel(TunnelId id) { public TunnelInfo removeParticipatingTunnel(TunnelId id) {
if (!_isLive) return null; if (!_isLive) return null;
if (_log.shouldLog(Log.DEBUG)) _log.debug("Removing participating tunnel " + id); if (_log.shouldLog(Log.DEBUG)) _log.debug("Removing participating tunnel " + id);
synchronized (_participatingTunnels) { synchronized (_participatingTunnels) {
return (TunnelInfo)_participatingTunnels.remove(id); return (TunnelInfo)_participatingTunnels.remove(id);
} }
} }
/** /**
@ -300,10 +308,10 @@ class TunnelPool {
* *
*/ */
public Set getClientPools() { public Set getClientPools() {
if (!_isLive) return null; if (!_isLive) return null;
synchronized (_clientPools) { synchronized (_clientPools) {
return new HashSet(_clientPools.keySet()); return new HashSet(_clientPools.keySet());
} }
} }
/** /**
@ -311,78 +319,78 @@ class TunnelPool {
* *
*/ */
public void createClientPool(Destination dest, ClientTunnelSettings settings) { public void createClientPool(Destination dest, ClientTunnelSettings settings) {
if (!_isLive) return; if (!_isLive) return;
ClientTunnelPool pool = null; ClientTunnelPool pool = null;
synchronized (_clientPools) { synchronized (_clientPools) {
if (_clientPools.containsKey(dest)) { if (_clientPools.containsKey(dest)) {
pool = (ClientTunnelPool)_clientPools.get(dest); pool = (ClientTunnelPool)_clientPools.get(dest);
if (_log.shouldLog(Log.INFO)) if (_log.shouldLog(Log.INFO))
_log.info("Reusing an existing client tunnel pool for " + dest.calculateHash()); _log.info("Reusing an existing client tunnel pool for " + dest.calculateHash());
} else { } else {
pool = new ClientTunnelPool(dest, settings, this); pool = new ClientTunnelPool(dest, settings, this);
if (_log.shouldLog(Log.INFO)) if (_log.shouldLog(Log.INFO))
_log.info("New client tunnel pool created for " + dest.calculateHash()); _log.info("New client tunnel pool created for " + dest.calculateHash());
_clientPools.put(dest, pool); _clientPools.put(dest, pool);
} }
} }
pool.startPool(); pool.startPool();
} }
ClientTunnelPool addClientPool(ClientTunnelPool pool) { ClientTunnelPool addClientPool(ClientTunnelPool pool) {
if (!_isLive) return null; if (!_isLive) return null;
ClientTunnelPool old = null; ClientTunnelPool old = null;
if (_log.shouldLog(Log.INFO)) if (_log.shouldLog(Log.INFO))
_log.info("Client tunnel pool added for " + pool.getDestination().calculateHash()); _log.info("Client tunnel pool added for " + pool.getDestination().calculateHash());
synchronized (_clientPools) { synchronized (_clientPools) {
old = (ClientTunnelPool)_clientPools.put(pool.getDestination(), pool); old = (ClientTunnelPool)_clientPools.put(pool.getDestination(), pool);
} }
return old; return old;
} }
public ClientTunnelPool getClientPool(Destination dest) { public ClientTunnelPool getClientPool(Destination dest) {
if (!_isLive) return null; if (!_isLive) return null;
synchronized (_clientPools) { synchronized (_clientPools) {
return (ClientTunnelPool)_clientPools.get(dest); return (ClientTunnelPool)_clientPools.get(dest);
} }
} }
public void removeClientPool(Destination dest) { public void removeClientPool(Destination dest) {
if (!_isLive) return; if (!_isLive) return;
if (_log.shouldLog(Log.DEBUG)) _log.debug("Removing client tunnel pool for " + dest.calculateHash()); if (_log.shouldLog(Log.DEBUG)) _log.debug("Removing client tunnel pool for " + dest.calculateHash());
ClientTunnelPool pool = null; ClientTunnelPool pool = null;
synchronized (_clientPools) { synchronized (_clientPools) {
pool = (ClientTunnelPool)_clientPools.remove(dest); pool = (ClientTunnelPool)_clientPools.remove(dest);
} }
if (pool != null) if (pool != null)
pool.stopPool(); pool.stopPool();
} }
public Set getPendingTunnels() { public Set getPendingTunnels() {
if (!_isLive) return null; if (!_isLive) return null;
synchronized (_pendingTunnels) { synchronized (_pendingTunnels) {
return new HashSet(_pendingTunnels.keySet()); return new HashSet(_pendingTunnels.keySet());
} }
} }
public TunnelInfo getPendingTunnel(TunnelId id) { public TunnelInfo getPendingTunnel(TunnelId id) {
if (!_isLive) return null; if (!_isLive) return null;
synchronized (_pendingTunnels) { synchronized (_pendingTunnels) {
return (TunnelInfo)_pendingTunnels.get(id); return (TunnelInfo)_pendingTunnels.get(id);
} }
} }
public void addPendingTunnel(TunnelInfo info) { public void addPendingTunnel(TunnelInfo info) {
if (!_isLive) return; if (!_isLive) return;
MessageHistory.getInstance().tunnelJoined("pending", info); MessageHistory.getInstance().tunnelJoined("pending", info);
synchronized (_pendingTunnels) { synchronized (_pendingTunnels) {
_pendingTunnels.put(info.getTunnelId(), info); _pendingTunnels.put(info.getTunnelId(), info);
} }
} }
public void removePendingTunnel(TunnelId id) { public void removePendingTunnel(TunnelId id) {
if (!_isLive) return; if (!_isLive) return;
if (_log.shouldLog(Log.DEBUG)) _log.debug("Removing pending tunnel " + id); if (_log.shouldLog(Log.DEBUG)) _log.debug("Removing pending tunnel " + id);
synchronized (_pendingTunnels) { synchronized (_pendingTunnels) {
_pendingTunnels.remove(id); _pendingTunnels.remove(id);
} }
} }
/** fetch the settings for the pool (tunnel settings and quantities) */ /** fetch the settings for the pool (tunnel settings and quantities) */
@ -399,16 +407,16 @@ class TunnelPool {
/** determine the number of hops in the longest tunnel we have */ /** determine the number of hops in the longest tunnel we have */
public int getLongestTunnelLength() { public int getLongestTunnelLength() {
int max = 0; int max = 0;
synchronized (_freeInboundTunnels) { synchronized (_freeInboundTunnels) {
for (Iterator iter = _freeInboundTunnels.values().iterator(); iter.hasNext(); ) { for (Iterator iter = _freeInboundTunnels.values().iterator(); iter.hasNext(); ) {
TunnelInfo info = (TunnelInfo)iter.next(); TunnelInfo info = (TunnelInfo)iter.next();
int len = info.getLength(); int len = info.getLength();
if (len > max) if (len > max)
max = len; max = len;
} }
} }
return max; return max;
} }
/** /**
@ -418,225 +426,225 @@ class TunnelPool {
* *
*/ */
public void buildFakeTunnels() { public void buildFakeTunnels() {
if (getFreeValidTunnelCount() < 3) { if (getFreeValidTunnelCount() < 3) {
if (_log.shouldLog(Log.WARN)) if (_log.shouldLog(Log.WARN))
_log.warn("Running low on valid inbound tunnels, building another"); _log.warn("Running low on valid inbound tunnels, building another");
TunnelInfo inTunnelGateway = TunnelBuilder.getInstance().configureInboundTunnel(null, getPoolSettings(), true); TunnelInfo inTunnelGateway = TunnelBuilder.getInstance().configureInboundTunnel(null, getPoolSettings(), true);
RequestTunnelJob inReqJob = new RequestTunnelJob(this, inTunnelGateway, true, getTunnelCreationTimeout()); RequestTunnelJob inReqJob = new RequestTunnelJob(this, inTunnelGateway, true, getTunnelCreationTimeout());
inReqJob.runJob(); inReqJob.runJob();
} }
if (getOutboundValidTunnelCount() < 3) { if (getOutboundValidTunnelCount() < 3) {
if (_log.shouldLog(Log.WARN)) if (_log.shouldLog(Log.WARN))
_log.warn("Running low on valid outbound tunnels, building another"); _log.warn("Running low on valid outbound tunnels, building another");
TunnelInfo outTunnelGateway = TunnelBuilder.getInstance().configureOutboundTunnel(getPoolSettings(), true); TunnelInfo outTunnelGateway = TunnelBuilder.getInstance().configureOutboundTunnel(getPoolSettings(), true);
RequestTunnelJob outReqJob = new RequestTunnelJob(this, outTunnelGateway, false, getTunnelCreationTimeout()); RequestTunnelJob outReqJob = new RequestTunnelJob(this, outTunnelGateway, false, getTunnelCreationTimeout());
outReqJob.runJob(); outReqJob.runJob();
} }
} }
private int getFreeValidTunnelCount() { private int getFreeValidTunnelCount() {
int found = 0; int found = 0;
Set ids = getFreeTunnels(); Set ids = getFreeTunnels();
long mustExpireAfter = Clock.getInstance().now(); long mustExpireAfter = Clock.getInstance().now();
for (Iterator iter = ids.iterator(); iter.hasNext(); ) { for (Iterator iter = ids.iterator(); iter.hasNext(); ) {
TunnelId id = (TunnelId)iter.next(); TunnelId id = (TunnelId)iter.next();
TunnelInfo info = getFreeTunnel(id); TunnelInfo info = getFreeTunnel(id);
if ( (info != null) && (info.getIsReady()) ) { if ( (info != null) && (info.getIsReady()) ) {
if (info.getSettings().getExpiration() > mustExpireAfter) { if (info.getSettings().getExpiration() > mustExpireAfter) {
if (info.getDestination() == null) { if (info.getDestination() == null) {
found++; found++;
} }
} }
} }
} }
return found; return found;
} }
private int getOutboundValidTunnelCount() { private int getOutboundValidTunnelCount() {
int found = 0; int found = 0;
Set ids = getOutboundTunnels(); Set ids = getOutboundTunnels();
long mustExpireAfter = Clock.getInstance().now(); long mustExpireAfter = Clock.getInstance().now();
for (Iterator iter = ids.iterator(); iter.hasNext(); ) { for (Iterator iter = ids.iterator(); iter.hasNext(); ) {
TunnelId id = (TunnelId)iter.next(); TunnelId id = (TunnelId)iter.next();
TunnelInfo info = getOutboundTunnel(id); TunnelInfo info = getOutboundTunnel(id);
if ( (info != null) && (info.getIsReady()) ) { if ( (info != null) && (info.getIsReady()) ) {
if (info.getSettings().getExpiration() > mustExpireAfter) { if (info.getSettings().getExpiration() > mustExpireAfter) {
found++; found++;
} }
} }
} }
return found; return found;
} }
public void tunnelFailed(TunnelId id) { public void tunnelFailed(TunnelId id) {
if (!_isLive) return; if (!_isLive) return;
if (_log.shouldLog(Log.INFO)) if (_log.shouldLog(Log.INFO))
_log.info("Tunnel " + id + " marked as not ready, since it /failed/", new Exception("Failed tunnel")); _log.info("Tunnel " + id + " marked as not ready, since it /failed/", new Exception("Failed tunnel"));
TunnelInfo info = getTunnelInfo(id); TunnelInfo info = getTunnelInfo(id);
if (info == null) if (info == null)
return; return;
MessageHistory.getInstance().tunnelFailed(info.getTunnelId()); MessageHistory.getInstance().tunnelFailed(info.getTunnelId());
info.setIsReady(false); info.setIsReady(false);
Hash us = Router.getInstance().getRouterInfo().getIdentity().getHash(); Hash us = Router.getInstance().getRouterInfo().getIdentity().getHash();
long lifetime = Clock.getInstance().now() - info.getCreated(); long lifetime = Clock.getInstance().now() - info.getCreated();
while (info != null) { while (info != null) {
if (!info.getThisHop().equals(us)) { if (!info.getThisHop().equals(us)) {
ProfileManager.getInstance().tunnelFailed(info.getThisHop()); ProfileManager.getInstance().tunnelFailed(info.getThisHop());
} }
info = info.getNextHopInfo(); info = info.getNextHopInfo();
} }
StatManager.getInstance().addRateData("tunnel.failAfterTime", lifetime, lifetime); StatManager.getInstance().addRateData("tunnel.failAfterTime", lifetime, lifetime);
StatManager.getInstance().updateFrequency("tunnel.failFrequency"); StatManager.getInstance().updateFrequency("tunnel.failFrequency");
buildFakeTunnels(); buildFakeTunnels();
} }
public void startup() { public void startup() {
if (_log.shouldLog(Log.INFO)) _log.info("Starting up tunnel pool"); if (_log.shouldLog(Log.INFO)) _log.info("Starting up tunnel pool");
_isLive = true; _isLive = true;
_outboundTunnels = new HashMap(8); _outboundTunnels = new HashMap(16);
_freeInboundTunnels = new HashMap(8); _freeInboundTunnels = new HashMap(16);
_clientPools = new HashMap(8); _clientPools = new HashMap(8);
_participatingTunnels = new HashMap(8); _participatingTunnels = new HashMap(64);
_pendingTunnels = new HashMap(8); _pendingTunnels = new HashMap(8);
_poolSettings = createPoolSettings(); _poolSettings = createPoolSettings();
_persistenceHelper.loadPool(this); _persistenceHelper.loadPool(this);
_tunnelCreationTimeout = -1; _tunnelCreationTimeout = -1;
try { try {
String str = Router.getInstance().getConfigSetting(TUNNEL_CREATION_TIMEOUT_PARAM); String str = Router.getInstance().getConfigSetting(TUNNEL_CREATION_TIMEOUT_PARAM);
_tunnelCreationTimeout = Long.parseLong(str); _tunnelCreationTimeout = Long.parseLong(str);
} catch (Throwable t) { } catch (Throwable t) {
_tunnelCreationTimeout = TUNNEL_CREATION_TIMEOUT_DEFAULT; _tunnelCreationTimeout = TUNNEL_CREATION_TIMEOUT_DEFAULT;
} }
_targetClients = TARGET_CLIENTS_DEFAULT; _targetClients = TARGET_CLIENTS_DEFAULT;
try { try {
String str = Router.getInstance().getConfigSetting(TARGET_CLIENTS_PARAM); String str = Router.getInstance().getConfigSetting(TARGET_CLIENTS_PARAM);
_targetClients = Integer.parseInt(str); _targetClients = Integer.parseInt(str);
} catch (Throwable t) { } catch (Throwable t) {
_targetClients = TARGET_CLIENTS_DEFAULT; _targetClients = TARGET_CLIENTS_DEFAULT;
} }
buildFakeTunnels(); buildFakeTunnels();
JobQueue.getInstance().addJob(new WritePoolJob()); JobQueue.getInstance().addJob(new WritePoolJob());
JobQueue.getInstance().addJob(new TunnelPoolManagerJob(this)); JobQueue.getInstance().addJob(new TunnelPoolManagerJob(this));
JobQueue.getInstance().addJob(new TunnelPoolExpirationJob(this)); JobQueue.getInstance().addJob(new TunnelPoolExpirationJob(this));
} }
public void shutdown() { public void shutdown() {
if (_log.shouldLog(Log.INFO)) _log.info("Shutting down tunnel pool"); if (_log.shouldLog(Log.INFO)) _log.info("Shutting down tunnel pool");
_persistenceHelper.writePool(this); _persistenceHelper.writePool(this);
_isLive = false; // the subjobs [should] check getIsLive() on each run _isLive = false; // the subjobs [should] check getIsLive() on each run
_outboundTunnels = null; _outboundTunnels = null;
_freeInboundTunnels = null; _freeInboundTunnels = null;
_clientPools = null; _clientPools = null;
_participatingTunnels = null; _participatingTunnels = null;
_poolSettings = null; _poolSettings = null;
_persistenceHelper = null; _persistenceHelper = null;
_tunnelCreationTimeout = -1; _tunnelCreationTimeout = -1;
} }
public boolean isLive() { return _isLive; } public boolean isLive() { return _isLive; }
private ClientTunnelSettings createPoolSettings() { private ClientTunnelSettings createPoolSettings() {
ClientTunnelSettings settings = new ClientTunnelSettings(); ClientTunnelSettings settings = new ClientTunnelSettings();
settings.readFromProperties(Router.getInstance().getConfigMap()); settings.readFromProperties(Router.getInstance().getConfigMap());
return settings; return settings;
} }
public String renderStatusHTML() { public String renderStatusHTML() {
if (!_isLive) return ""; if (!_isLive) return "";
StringBuffer buf = new StringBuffer(); StringBuffer buf = new StringBuffer();
buf.append("<h2>Tunnel Pool</h2>\n"); buf.append("<h2>Tunnel Pool</h2>\n");
renderTunnels(buf, "Free inbound tunnels", getFreeTunnels()); renderTunnels(buf, "Free inbound tunnels", getFreeTunnels());
renderTunnels(buf, "Outbound tunnels", getOutboundTunnels()); renderTunnels(buf, "Outbound tunnels", getOutboundTunnels());
renderTunnels(buf, "Participating tunnels", getParticipatingTunnels()); renderTunnels(buf, "Participating tunnels", getParticipatingTunnels());
for (Iterator iter = getClientPools().iterator(); iter.hasNext(); ) { for (Iterator iter = getClientPools().iterator(); iter.hasNext(); ) {
Destination dest = (Destination)iter.next(); Destination dest = (Destination)iter.next();
ClientTunnelPool pool = getClientPool(dest); ClientTunnelPool pool = getClientPool(dest);
renderTunnels(buf, "Inbound tunnels for " + dest.calculateHash() + " - (still connected? " + (!pool.isStopped()) + ")", pool.getInboundTunnelIds()); renderTunnels(buf, "Inbound tunnels for " + dest.calculateHash() + " - (still connected? " + (!pool.isStopped()) + ")", pool.getInboundTunnelIds());
} }
return buf.toString(); return buf.toString();
} }
private void renderTunnels(StringBuffer buf, String msg, Set tunnelIds) { private void renderTunnels(StringBuffer buf, String msg, Set tunnelIds) {
buf.append("<b>").append(msg).append(":</b> <i>(").append(tunnelIds.size()).append(" tunnels)</i><ul>\n"); buf.append("<b>").append(msg).append(":</b> <i>(").append(tunnelIds.size()).append(" tunnels)</i><ul>\n");
for (Iterator iter = tunnelIds.iterator(); iter.hasNext(); ) { for (Iterator iter = tunnelIds.iterator(); iter.hasNext(); ) {
TunnelId id = (TunnelId)iter.next(); TunnelId id = (TunnelId)iter.next();
TunnelInfo tunnel = getTunnelInfo(id); TunnelInfo tunnel = getTunnelInfo(id);
renderTunnel(buf, id, tunnel); renderTunnel(buf, id, tunnel);
} }
buf.append("</ul>\n"); buf.append("</ul>\n");
} }
private final static void renderTunnel(StringBuffer buf, TunnelId id, TunnelInfo tunnel) { private final static void renderTunnel(StringBuffer buf, TunnelId id, TunnelInfo tunnel) {
if (tunnel == null) { if (tunnel == null) {
buf.append("<li>Tunnel: ").append(id.getTunnelId()).append(" is not known</li>\n"); buf.append("<li>Tunnel: ").append(id.getTunnelId()).append(" is not known</li>\n");
} else { } else {
buf.append("<li>Tunnel: ").append(tunnel.getTunnelId()).append("</li><pre>"); buf.append("<li>Tunnel: ").append(tunnel.getTunnelId()).append("</li><pre>");
buf.append("\n\tStyle: ").append(getStyle(id)); buf.append("\n\tStyle: ").append(getStyle(id));
buf.append("\n\tReady? ").append(tunnel.getIsReady()); buf.append("\n\tReady? ").append(tunnel.getIsReady());
buf.append("\n\tDest? ").append(getDestination(tunnel)); buf.append("\n\tDest? ").append(getDestination(tunnel));
if (tunnel.getSettings() != null) if (tunnel.getSettings() != null)
buf.append("\n\tExpiration: ").append(new Date(tunnel.getSettings().getExpiration())); buf.append("\n\tExpiration: ").append(new Date(tunnel.getSettings().getExpiration()));
else else
buf.append("\n\tExpiration: none"); buf.append("\n\tExpiration: none");
buf.append("\n\tStart router: ").append(tunnel.getThisHop().toBase64()).append("\n"); buf.append("\n\tStart router: ").append(tunnel.getThisHop().toBase64()).append("\n");
TunnelInfo t = tunnel.getNextHopInfo(); TunnelInfo t = tunnel.getNextHopInfo();
if (t != null) { if (t != null) {
int hop = 1; int hop = 1;
while (t != null) { while (t != null) {
buf.append("\tHop ").append(hop).append(": ").append(t.getThisHop().toBase64()).append("\n"); buf.append("\tHop ").append(hop).append(": ").append(t.getThisHop().toBase64()).append("\n");
t = t.getNextHopInfo(); t = t.getNextHopInfo();
hop++; hop++;
} }
} else { } else {
if (tunnel.getNextHop() != null) if (tunnel.getNextHop() != null)
buf.append("\tNext: ").append(tunnel.getNextHop().toBase64()).append("\n"); buf.append("\tNext: ").append(tunnel.getNextHop().toBase64()).append("\n");
} }
buf.append("\n</pre>"); buf.append("\n</pre>");
} }
} }
private final static String getStyle(TunnelId id) { private final static String getStyle(TunnelId id) {
switch (id.getType()) { switch (id.getType()) {
case TunnelId.TYPE_INBOUND: case TunnelId.TYPE_INBOUND:
return "Inbound"; return "Inbound";
case TunnelId.TYPE_OUTBOUND: case TunnelId.TYPE_OUTBOUND:
return "Outbound"; return "Outbound";
case TunnelId.TYPE_PARTICIPANT: case TunnelId.TYPE_PARTICIPANT:
return "Participant"; return "Participant";
case TunnelId.TYPE_UNSPECIFIED: case TunnelId.TYPE_UNSPECIFIED:
return "Unspecified"; return "Unspecified";
default: default:
return "Other! - " + id.getType(); return "Other! - " + id.getType();
} }
} }
private final static String getDestination(TunnelInfo info) { private final static String getDestination(TunnelInfo info) {
while (info != null) { while (info != null) {
if (info.getDestination() != null) if (info.getDestination() != null)
return info.getDestination().calculateHash().toString(); return info.getDestination().calculateHash().toString();
else else
info = info.getNextHopInfo(); info = info.getNextHopInfo();
} }
return "none"; return "none";
} }
/** /**
* This job instructs the troops to invade mars with a spork. * This job instructs the troops to invade mars with a spork.
*/ */
private class WritePoolJob extends JobImpl { private class WritePoolJob extends JobImpl {
public WritePoolJob() { public WritePoolJob() {
getTiming().setStartAfter(Clock.getInstance().now() + WRITE_POOL_DELAY); getTiming().setStartAfter(Clock.getInstance().now() + WRITE_POOL_DELAY);
} }
public String getName() { return "Write Out Tunnel Pool"; } public String getName() { return "Write Out Tunnel Pool"; }
public void runJob() { public void runJob() {
if (!isLive()) if (!isLive())
return; return;
_persistenceHelper.writePool(TunnelPool.this); _persistenceHelper.writePool(TunnelPool.this);
requeue(WRITE_POOL_DELAY); requeue(WRITE_POOL_DELAY);
} }
} }
} }