forked from I2P_Developers/i2p.i2p
* work around the disagreement between different versions of sun's compiler and JVM:
Some of them think that its ok for an inner class of a subclass to access protected data of the outer class's parent when the parent is in another package. Others do not. Kaffe doesn't care (but thats because Kaffe doesn't do much for verification ;) The JLS is aparently confusing, but it doesnt matter whether its a code or javac bug, we've got to change the code. The simplest change would be to just make the JobImpl._context public, but I loath public data, so we make it private and add an accessor (and change dozens of files) whee
This commit is contained in:
@ -13,7 +13,7 @@ import net.i2p.util.Log;
|
||||
* Base implementation of a Job
|
||||
*/
|
||||
public abstract class JobImpl implements Job {
|
||||
protected RouterContext _context;
|
||||
private RouterContext _context;
|
||||
private JobTiming _timing;
|
||||
private static int _idSrc = 0;
|
||||
private int _id;
|
||||
@ -31,6 +31,8 @@ public abstract class JobImpl implements Job {
|
||||
public int getJobId() { return _id; }
|
||||
public JobTiming getTiming() { return _timing; }
|
||||
|
||||
public final RouterContext getContext() { return _context; }
|
||||
|
||||
public String toString() {
|
||||
StringBuffer buf = new StringBuffer(128);
|
||||
buf.append(super.toString());
|
||||
|
@ -495,16 +495,16 @@ public class ClientConnectionRunner {
|
||||
+ MessageStatusMessage.getStatusString(msg.getStatus())
|
||||
+ " for session [" + _sessionId.getSessionId()
|
||||
+ "] before they knew the messageId! delaying .5s");
|
||||
_lastTried = ClientConnectionRunner.this._context.clock().now();
|
||||
_lastTried = _context.clock().now();
|
||||
requeue(REQUEUE_DELAY);
|
||||
return;
|
||||
}
|
||||
|
||||
boolean alreadyProcessed = false;
|
||||
long beforeLock = MessageDeliveryStatusUpdate.this._context.clock().now();
|
||||
long beforeLock = _context.clock().now();
|
||||
long inLock = 0;
|
||||
synchronized (_alreadyProcessed) {
|
||||
inLock = MessageDeliveryStatusUpdate.this._context.clock().now();
|
||||
inLock = _context.clock().now();
|
||||
if (_alreadyProcessed.contains(_messageId)) {
|
||||
_log.warn("Status already updated");
|
||||
alreadyProcessed = true;
|
||||
@ -514,7 +514,7 @@ public class ClientConnectionRunner {
|
||||
_alreadyProcessed.remove(0);
|
||||
}
|
||||
}
|
||||
long afterLock = MessageDeliveryStatusUpdate.this._context.clock().now();
|
||||
long afterLock = _context.clock().now();
|
||||
|
||||
if (afterLock - beforeLock > 50) {
|
||||
_log.warn("MessageDeliveryStatusUpdate.locking took too long: " + (afterLock-beforeLock)
|
||||
@ -529,7 +529,7 @@ public class ClientConnectionRunner {
|
||||
+ MessageStatusMessage.getStatusString(msg.getStatus())
|
||||
+ " for session [" + _sessionId.getSessionId()
|
||||
+ "] (with nonce=2), retrying after ["
|
||||
+ (ClientConnectionRunner.this._context.clock().now() - _lastTried)
|
||||
+ (_context.clock().now() - _lastTried)
|
||||
+ "]", getAddedBy());
|
||||
} else {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
|
@ -338,7 +338,7 @@ public class ClientManager {
|
||||
private class HandleJob extends JobImpl {
|
||||
private ClientMessage _msg;
|
||||
public HandleJob(ClientMessage msg) {
|
||||
super(ClientManager.this._context);
|
||||
super(_context);
|
||||
_msg = msg;
|
||||
}
|
||||
public String getName() { return "Handle Inbound Client Messages"; }
|
||||
@ -350,7 +350,7 @@ public class ClientManager {
|
||||
runner = getRunner(_msg.getDestinationHash());
|
||||
|
||||
if (runner != null) {
|
||||
HandleJob.this._context.statManager().addRateData("client.receiveMessageSize",
|
||||
_context.statManager().addRateData("client.receiveMessageSize",
|
||||
_msg.getPayload().getSize(), 0);
|
||||
runner.receiveMessage(_msg.getDestination(), null, _msg.getPayload());
|
||||
} else {
|
||||
|
@ -65,6 +65,6 @@ class CreateSessionJob extends JobImpl {
|
||||
|
||||
// and load 'em up (using anything not yet set as the software defaults)
|
||||
settings.readFromProperties(props);
|
||||
_context.tunnelManager().createTunnels(_runner.getConfig().getDestination(), settings, LEASE_CREATION_TIMEOUT);
|
||||
getContext().tunnelManager().createTunnels(_runner.getConfig().getDestination(), settings, LEASE_CREATION_TIMEOUT);
|
||||
}
|
||||
}
|
||||
|
@ -48,7 +48,7 @@ class RequestLeaseSetJob extends JobImpl {
|
||||
if (_runner.isDead()) return;
|
||||
LeaseRequestState oldReq = _runner.getLeaseRequest();
|
||||
if (oldReq != null) {
|
||||
if (oldReq.getExpiration() > _context.clock().now()) {
|
||||
if (oldReq.getExpiration() > getContext().clock().now()) {
|
||||
_log.error("Old *current* leaseRequest already exists! Why are we trying to request too quickly?", getAddedBy());
|
||||
return;
|
||||
} else {
|
||||
@ -76,7 +76,7 @@ class RequestLeaseSetJob extends JobImpl {
|
||||
try {
|
||||
_runner.setLeaseRequest(state);
|
||||
_runner.doSend(msg);
|
||||
_context.jobQueue().addJob(new CheckLeaseRequestStatus(state));
|
||||
getContext().jobQueue().addJob(new CheckLeaseRequestStatus(state));
|
||||
return;
|
||||
} catch (I2CPMessageException ime) {
|
||||
_log.error("Error sending I2CP message requesting the lease set", ime);
|
||||
@ -97,7 +97,7 @@ class RequestLeaseSetJob extends JobImpl {
|
||||
private LeaseRequestState _req;
|
||||
|
||||
public CheckLeaseRequestStatus(LeaseRequestState state) {
|
||||
super(RequestLeaseSetJob.this._context);
|
||||
super(RequestLeaseSetJob.this.getContext());
|
||||
_req = state;
|
||||
getTiming().setStartAfter(state.getExpiration());
|
||||
}
|
||||
@ -111,7 +111,7 @@ class RequestLeaseSetJob extends JobImpl {
|
||||
_log.error("Failed to receive a leaseSet in the time allotted (" + new Date(_req.getExpiration()) + ")");
|
||||
_runner.disconnectClient("Took too long to request leaseSet");
|
||||
if (_req.getOnFailed() != null)
|
||||
RequestLeaseSetJob.this._context.jobQueue().addJob(_req.getOnFailed());
|
||||
RequestLeaseSetJob.this.getContext().jobQueue().addJob(_req.getOnFailed());
|
||||
|
||||
// only zero out the request if its the one we know about
|
||||
if (_req == _runner.getLeaseRequest())
|
||||
|
@ -75,18 +75,18 @@ public class BuildTestMessageJob extends JobImpl {
|
||||
_log.debug("Building garlic message to test " + _target.getIdentity().getHash().toBase64());
|
||||
GarlicConfig config = buildGarlicCloveConfig();
|
||||
// TODO: make the last params on this specify the correct sessionKey and tags used
|
||||
ReplyJob replyJob = new JobReplyJob(_context, _onSend, config.getRecipient().getIdentity().getPublicKey(), config.getId(), null, new HashSet());
|
||||
ReplyJob replyJob = new JobReplyJob(getContext(), _onSend, config.getRecipient().getIdentity().getPublicKey(), config.getId(), null, new HashSet());
|
||||
MessageSelector sel = buildMessageSelector();
|
||||
SendGarlicJob job = new SendGarlicJob(_context, config, null, _onSendFailed, replyJob, _onSendFailed, _timeoutMs, _priority, sel);
|
||||
_context.jobQueue().addJob(job);
|
||||
SendGarlicJob job = new SendGarlicJob(getContext(), config, null, _onSendFailed, replyJob, _onSendFailed, _timeoutMs, _priority, sel);
|
||||
getContext().jobQueue().addJob(job);
|
||||
}
|
||||
|
||||
private MessageSelector buildMessageSelector() {
|
||||
return new TestMessageSelector(_testMessageKey, _timeoutMs + _context.clock().now());
|
||||
return new TestMessageSelector(_testMessageKey, _timeoutMs + getContext().clock().now());
|
||||
}
|
||||
|
||||
private GarlicConfig buildGarlicCloveConfig() {
|
||||
_testMessageKey = _context.random().nextLong(I2NPMessage.MAX_ID_VALUE);
|
||||
_testMessageKey = getContext().random().nextLong(I2NPMessage.MAX_ID_VALUE);
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Test message key: " + _testMessageKey);
|
||||
GarlicConfig config = new GarlicConfig();
|
||||
@ -105,8 +105,8 @@ public class BuildTestMessageJob extends JobImpl {
|
||||
|
||||
config.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null));
|
||||
config.setDeliveryInstructions(instructions);
|
||||
config.setId(_context.random().nextLong(I2NPMessage.MAX_ID_VALUE));
|
||||
config.setExpiration(_timeoutMs+_context.clock().now()+2*Router.CLOCK_FUDGE_FACTOR);
|
||||
config.setId(getContext().random().nextLong(I2NPMessage.MAX_ID_VALUE));
|
||||
config.setExpiration(_timeoutMs+getContext().clock().now()+2*Router.CLOCK_FUDGE_FACTOR);
|
||||
config.setRecipient(_target);
|
||||
config.setRequestAck(false);
|
||||
|
||||
@ -126,16 +126,16 @@ public class BuildTestMessageJob extends JobImpl {
|
||||
ackInstructions.setDelaySeconds(0);
|
||||
ackInstructions.setEncrypted(false);
|
||||
|
||||
DeliveryStatusMessage msg = new DeliveryStatusMessage(_context);
|
||||
msg.setArrival(new Date(_context.clock().now()));
|
||||
DeliveryStatusMessage msg = new DeliveryStatusMessage(getContext());
|
||||
msg.setArrival(new Date(getContext().clock().now()));
|
||||
msg.setMessageId(_testMessageKey);
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Delivery status message key: " + _testMessageKey + " arrival: " + msg.getArrival());
|
||||
|
||||
ackClove.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null));
|
||||
ackClove.setDeliveryInstructions(ackInstructions);
|
||||
ackClove.setExpiration(_timeoutMs+_context.clock().now());
|
||||
ackClove.setId(_context.random().nextLong(I2NPMessage.MAX_ID_VALUE));
|
||||
ackClove.setExpiration(_timeoutMs+getContext().clock().now());
|
||||
ackClove.setId(getContext().random().nextLong(I2NPMessage.MAX_ID_VALUE));
|
||||
ackClove.setPayload(msg);
|
||||
ackClove.setRecipient(_target);
|
||||
ackClove.setRequestAck(false);
|
||||
@ -187,9 +187,9 @@ public class BuildTestMessageJob extends JobImpl {
|
||||
if ( (_keyDelivered != null) &&
|
||||
(_sessionTagsDelivered != null) &&
|
||||
(_sessionTagsDelivered.size() > 0) )
|
||||
_context.sessionKeyManager().tagsDelivered(_target, _keyDelivered, _sessionTagsDelivered);
|
||||
getContext().sessionKeyManager().tagsDelivered(_target, _keyDelivered, _sessionTagsDelivered);
|
||||
|
||||
_context.jobQueue().addJob(_job);
|
||||
getContext().jobQueue().addJob(_job);
|
||||
}
|
||||
|
||||
public void setMessage(I2NPMessage message) {
|
||||
|
@ -47,7 +47,7 @@ public class HandleGarlicMessageJob extends JobImpl {
|
||||
public HandleGarlicMessageJob(RouterContext context, GarlicMessage msg, RouterIdentity from, Hash fromHash) {
|
||||
super(context);
|
||||
_log = context.logManager().getLog(HandleGarlicMessageJob.class);
|
||||
_context.statManager().createRateStat("crypto.garlic.decryptFail", "How often garlic messages are undecryptable", "Encryption", new long[] { 5*60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
getContext().statManager().createRateStat("crypto.garlic.decryptFail", "How often garlic messages are undecryptable", "Encryption", new long[] { 5*60*1000, 60*60*1000, 24*60*60*1000 });
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("New handle garlicMessageJob called w/ message from [" + from + "]", new Exception("Debug"));
|
||||
_message = msg;
|
||||
@ -60,9 +60,9 @@ public class HandleGarlicMessageJob extends JobImpl {
|
||||
|
||||
public String getName() { return "Handle Inbound Garlic Message"; }
|
||||
public void runJob() {
|
||||
CloveSet set = _parser.getGarlicCloves(_message, _context.keyManager().getPrivateKey());
|
||||
CloveSet set = _parser.getGarlicCloves(_message, getContext().keyManager().getPrivateKey());
|
||||
if (set == null) {
|
||||
Set keys = _context.keyManager().getAllKeys();
|
||||
Set keys = getContext().keyManager().getAllKeys();
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Decryption with the router's key failed, now try with the " + keys.size() + " leaseSet keys");
|
||||
// our router key failed, which means that it was either encrypted wrong
|
||||
@ -95,8 +95,8 @@ public class HandleGarlicMessageJob extends JobImpl {
|
||||
_log.error("CloveMessageParser failed to decrypt the message [" + _message.getUniqueId()
|
||||
+ "] to us when received from [" + _fromHash + "] / [" + _from + "]",
|
||||
new Exception("Decrypt garlic failed"));
|
||||
_context.statManager().addRateData("crypto.garlic.decryptFail", 1, 0);
|
||||
_context.messageHistory().messageProcessingError(_message.getUniqueId(),
|
||||
getContext().statManager().addRateData("crypto.garlic.decryptFail", 1, 0);
|
||||
getContext().messageHistory().messageProcessingError(_message.getUniqueId(),
|
||||
_message.getClass().getName(),
|
||||
"Garlic could not be decrypted");
|
||||
}
|
||||
@ -116,7 +116,7 @@ public class HandleGarlicMessageJob extends JobImpl {
|
||||
// this should be in its own thread perhaps? and maybe _cloves should be
|
||||
// synced to disk?
|
||||
List toRemove = new ArrayList(32);
|
||||
long now = _context.clock().now();
|
||||
long now = getContext().clock().now();
|
||||
synchronized (_cloves) {
|
||||
for (Iterator iter = _cloves.keySet().iterator(); iter.hasNext();) {
|
||||
Long id = (Long)iter.next();
|
||||
@ -139,7 +139,7 @@ public class HandleGarlicMessageJob extends JobImpl {
|
||||
_log.debug("Clove " + clove.getCloveId() + " expiring on " + clove.getExpiration()
|
||||
+ " is not known");
|
||||
}
|
||||
long now = _context.clock().now();
|
||||
long now = getContext().clock().now();
|
||||
if (clove.getExpiration().getTime() < now) {
|
||||
if (clove.getExpiration().getTime() < now + Router.CLOCK_FUDGE_FACTOR) {
|
||||
_log.warn("Expired garlic received, but within our fudge factor ["
|
||||
@ -148,7 +148,7 @@ public class HandleGarlicMessageJob extends JobImpl {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.error("Expired garlic clove received - replay attack in progress? [cloveId = "
|
||||
+ clove.getCloveId() + " expiration = " + clove.getExpiration()
|
||||
+ " now = " + (new Date(_context.clock().now())));
|
||||
+ " now = " + (new Date(getContext().clock().now())));
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@ -174,7 +174,7 @@ public class HandleGarlicMessageJob extends JobImpl {
|
||||
}
|
||||
|
||||
public void dropped() {
|
||||
_context.messageHistory().messageProcessingError(_message.getUniqueId(),
|
||||
getContext().messageHistory().messageProcessingError(_message.getUniqueId(),
|
||||
_message.getClass().getName(),
|
||||
"Dropped due to overload");
|
||||
}
|
||||
|
@ -42,7 +42,7 @@ public class HandleSourceRouteReplyMessageJob extends JobImpl {
|
||||
|
||||
public HandleSourceRouteReplyMessageJob(RouterContext context, SourceRouteReplyMessage msg, RouterIdentity from, Hash fromHash) {
|
||||
super(context);
|
||||
_log = _context.logManager().getLog(HandleSourceRouteReplyMessageJob.class);
|
||||
_log = getContext().logManager().getLog(HandleSourceRouteReplyMessageJob.class);
|
||||
_message = msg;
|
||||
_from = from;
|
||||
_fromHash = fromHash;
|
||||
@ -53,9 +53,9 @@ public class HandleSourceRouteReplyMessageJob extends JobImpl {
|
||||
public String getName() { return "Handle Source Route Reply Message"; }
|
||||
public void runJob() {
|
||||
try {
|
||||
long before = _context.clock().now();
|
||||
_message.decryptHeader(_context.keyManager().getPrivateKey());
|
||||
long after = _context.clock().now();
|
||||
long before = getContext().clock().now();
|
||||
_message.decryptHeader(getContext().keyManager().getPrivateKey());
|
||||
long after = getContext().clock().now();
|
||||
if ( (after-before) > 1000) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Took more than a second (" + (after-before)
|
||||
@ -71,7 +71,7 @@ public class HandleSourceRouteReplyMessageJob extends JobImpl {
|
||||
+ _message.getUniqueId() + ")", dfe);
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Message header could not be decrypted: " + _message, getAddedBy());
|
||||
_context.messageHistory().messageProcessingError(_message.getUniqueId(),
|
||||
getContext().messageHistory().messageProcessingError(_message.getUniqueId(),
|
||||
_message.getClass().getName(),
|
||||
"Source route message header could not be decrypted");
|
||||
return;
|
||||
@ -85,7 +85,7 @@ public class HandleSourceRouteReplyMessageJob extends JobImpl {
|
||||
|
||||
DeliveryInstructions instructions = _message.getDecryptedInstructions();
|
||||
|
||||
long now = _context.clock().now();
|
||||
long now = getContext().clock().now();
|
||||
long expiration = _message.getDecryptedExpiration();
|
||||
// if its expiring really soon, jack the expiration 30 seconds
|
||||
if (expiration < now+10*1000)
|
||||
@ -97,7 +97,7 @@ public class HandleSourceRouteReplyMessageJob extends JobImpl {
|
||||
}
|
||||
|
||||
private boolean isValid() {
|
||||
long now = _context.clock().now();
|
||||
long now = getContext().clock().now();
|
||||
if (_message.getDecryptedExpiration() < now) {
|
||||
if (_message.getDecryptedExpiration() < now + Router.CLOCK_FUDGE_FACTOR) {
|
||||
_log.info("Expired message received, but within our fudge factor");
|
||||
@ -135,7 +135,7 @@ public class HandleSourceRouteReplyMessageJob extends JobImpl {
|
||||
// this should be in its own thread perhaps, or job? and maybe _seenMessages should be
|
||||
// synced to disk?
|
||||
List toRemove = new ArrayList(32);
|
||||
long now = _context.clock().now()-Router.CLOCK_FUDGE_FACTOR;
|
||||
long now = getContext().clock().now()-Router.CLOCK_FUDGE_FACTOR;
|
||||
synchronized (_seenMessages) {
|
||||
for (Iterator iter = _seenMessages.keySet().iterator(); iter.hasNext();) {
|
||||
Long id = (Long)iter.next();
|
||||
@ -149,7 +149,7 @@ public class HandleSourceRouteReplyMessageJob extends JobImpl {
|
||||
}
|
||||
|
||||
public void dropped() {
|
||||
_context.messageHistory().messageProcessingError(_message.getUniqueId(),
|
||||
getContext().messageHistory().messageProcessingError(_message.getUniqueId(),
|
||||
_message.getClass().getName(),
|
||||
"Dropped due to overload");
|
||||
}
|
||||
|
@ -64,7 +64,7 @@ public class HandleTunnelMessageJob extends JobImpl {
|
||||
public void runJob() {
|
||||
TunnelId id = _message.getTunnelId();
|
||||
|
||||
long excessLag = _context.clock().now() - _message.getMessageExpiration().getTime();
|
||||
long excessLag = getContext().clock().now() - _message.getMessageExpiration().getTime();
|
||||
if (excessLag > Router.CLOCK_FUDGE_FACTOR) {
|
||||
// expired while on the queue
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
@ -72,8 +72,8 @@ public class HandleTunnelMessageJob extends JobImpl {
|
||||
+ id.getTunnelId() + " expiring "
|
||||
+ excessLag
|
||||
+ "ms ago");
|
||||
_context.statManager().addRateData("tunnel.expiredAfterAcceptTime", excessLag, excessLag);
|
||||
_context.messageHistory().messageProcessingError(_message.getUniqueId(),
|
||||
getContext().statManager().addRateData("tunnel.expiredAfterAcceptTime", excessLag, excessLag);
|
||||
getContext().messageHistory().messageProcessingError(_message.getUniqueId(),
|
||||
TunnelMessage.class.getName(),
|
||||
"tunnel message expired on the queue");
|
||||
return;
|
||||
@ -86,18 +86,18 @@ public class HandleTunnelMessageJob extends JobImpl {
|
||||
+ "ms ago");
|
||||
}
|
||||
|
||||
TunnelInfo info = _context.tunnelManager().getTunnelInfo(id);
|
||||
TunnelInfo info = getContext().tunnelManager().getTunnelInfo(id);
|
||||
|
||||
if (info == null) {
|
||||
Hash from = _fromHash;
|
||||
if (_from != null)
|
||||
from = _from.getHash();
|
||||
_context.messageHistory().droppedTunnelMessage(id, from);
|
||||
getContext().messageHistory().droppedTunnelMessage(id, from);
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Received a message for an unknown tunnel [" + id.getTunnelId()
|
||||
+ "], dropping it: " + _message, getAddedBy());
|
||||
long timeRemaining = _message.getMessageExpiration().getTime() - _context.clock().now();
|
||||
_context.statManager().addRateData("tunnel.unknownTunnelTimeLeft", timeRemaining, 0);
|
||||
long timeRemaining = _message.getMessageExpiration().getTime() - getContext().clock().now();
|
||||
getContext().statManager().addRateData("tunnel.unknownTunnelTimeLeft", timeRemaining, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -107,8 +107,8 @@ public class HandleTunnelMessageJob extends JobImpl {
|
||||
if (info == null) {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("We are not part of a known tunnel?? wtf! drop.", getAddedBy());
|
||||
long timeRemaining = _message.getMessageExpiration().getTime() - _context.clock().now();
|
||||
_context.statManager().addRateData("tunnel.unknownTunnelTimeLeft", timeRemaining, 0);
|
||||
long timeRemaining = _message.getMessageExpiration().getTime() - getContext().clock().now();
|
||||
getContext().statManager().addRateData("tunnel.unknownTunnelTimeLeft", timeRemaining, 0);
|
||||
return;
|
||||
} else {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
@ -123,7 +123,7 @@ public class HandleTunnelMessageJob extends JobImpl {
|
||||
_log.debug("We are the gateway to tunnel " + id.getTunnelId());
|
||||
byte data[] = _message.getData();
|
||||
I2NPMessage msg = getBody(data);
|
||||
_context.jobQueue().addJob(new HandleGatewayMessageJob(msg, info, data.length));
|
||||
getContext().jobQueue().addJob(new HandleGatewayMessageJob(msg, info, data.length));
|
||||
return;
|
||||
} else {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
@ -131,14 +131,14 @@ public class HandleTunnelMessageJob extends JobImpl {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Process locally");
|
||||
if (info.getDestination() != null) {
|
||||
if (!_context.clientManager().isLocal(info.getDestination())) {
|
||||
if (!getContext().clientManager().isLocal(info.getDestination())) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Received a message on a tunnel allocated to a client that has disconnected - dropping it!");
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Dropping message for disconnected client: " + _message);
|
||||
|
||||
_context.messageHistory().droppedOtherMessage(_message);
|
||||
_context.messageHistory().messageProcessingError(_message.getUniqueId(),
|
||||
getContext().messageHistory().droppedOtherMessage(_message);
|
||||
getContext().messageHistory().messageProcessingError(_message.getUniqueId(),
|
||||
_message.getClass().getName(),
|
||||
"Disconnected client");
|
||||
return;
|
||||
@ -147,7 +147,7 @@ public class HandleTunnelMessageJob extends JobImpl {
|
||||
|
||||
I2NPMessage body = getBody(_message.getData());
|
||||
if (body != null) {
|
||||
_context.jobQueue().addJob(new HandleLocallyJob(body, info));
|
||||
getContext().jobQueue().addJob(new HandleLocallyJob(body, info));
|
||||
return;
|
||||
} else {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
@ -167,7 +167,7 @@ public class HandleTunnelMessageJob extends JobImpl {
|
||||
} else {
|
||||
// participant
|
||||
TunnelVerificationStructure struct = _message.getVerificationStructure();
|
||||
boolean ok = struct.verifySignature(_context, info.getVerificationKey().getKey());
|
||||
boolean ok = struct.verifySignature(getContext(), info.getVerificationKey().getKey());
|
||||
if (!ok) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Failed tunnel verification! Spoofing / tagging attack? " + _message, getAddedBy());
|
||||
@ -179,18 +179,18 @@ public class HandleTunnelMessageJob extends JobImpl {
|
||||
+ " received where we're not the gateway and there are remaining hops, so forward it on to "
|
||||
+ info.getNextHop().toBase64() + " via SendTunnelMessageJob");
|
||||
|
||||
_context.statManager().addRateData("tunnel.relayMessageSize",
|
||||
getContext().statManager().addRateData("tunnel.relayMessageSize",
|
||||
_message.getData().length, 0);
|
||||
|
||||
_context.jobQueue().addJob(new SendMessageDirectJob(_context, _message,
|
||||
getContext().jobQueue().addJob(new SendMessageDirectJob(getContext(), _message,
|
||||
info.getNextHop(),
|
||||
_context.clock().now() + FORWARD_TIMEOUT,
|
||||
getContext().clock().now() + FORWARD_TIMEOUT,
|
||||
FORWARD_PRIORITY));
|
||||
return;
|
||||
} else {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("No more hops, unwrap and follow the instructions");
|
||||
_context.jobQueue().addJob(new HandleEndpointJob(info));
|
||||
getContext().jobQueue().addJob(new HandleEndpointJob(info));
|
||||
return;
|
||||
}
|
||||
}
|
||||
@ -227,20 +227,20 @@ public class HandleTunnelMessageJob extends JobImpl {
|
||||
_log.error("Unable to recover the body from the tunnel", getAddedBy());
|
||||
return;
|
||||
} else {
|
||||
_context.jobQueue().addJob(new ProcessBodyLocallyJob(body, instructions, ourPlace));
|
||||
getContext().jobQueue().addJob(new ProcessBodyLocallyJob(body, instructions, ourPlace));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void honorInstructions(DeliveryInstructions instructions, I2NPMessage body) {
|
||||
_context.statManager().addRateData("tunnel.endpointMessageSize", _message.getData().length, 0);
|
||||
getContext().statManager().addRateData("tunnel.endpointMessageSize", _message.getData().length, 0);
|
||||
|
||||
switch (instructions.getDeliveryMode()) {
|
||||
case DeliveryInstructions.DELIVERY_MODE_LOCAL:
|
||||
sendToLocal(body);
|
||||
break;
|
||||
case DeliveryInstructions.DELIVERY_MODE_ROUTER:
|
||||
if (_context.routerHash().equals(instructions.getRouter())) {
|
||||
if (getContext().routerHash().equals(instructions.getRouter())) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Delivery instructions point at a router, but we're that router, so send to local");
|
||||
sendToLocal(body);
|
||||
@ -261,7 +261,7 @@ public class HandleTunnelMessageJob extends JobImpl {
|
||||
|
||||
private void sendToDest(Hash dest, I2NPMessage body) {
|
||||
if (body instanceof DataMessage) {
|
||||
boolean isLocal = _context.clientManager().isLocal(dest);
|
||||
boolean isLocal = getContext().clientManager().isLocal(dest);
|
||||
if (isLocal) {
|
||||
deliverMessage(null, dest, (DataMessage)body);
|
||||
return;
|
||||
@ -282,17 +282,17 @@ public class HandleTunnelMessageJob extends JobImpl {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Sending on to requested tunnel " + id.getTunnelId() + " on router "
|
||||
+ router.toBase64());
|
||||
TunnelMessage msg = new TunnelMessage(_context);
|
||||
TunnelMessage msg = new TunnelMessage(getContext());
|
||||
msg.setTunnelId(id);
|
||||
try {
|
||||
ByteArrayOutputStream baos = new ByteArrayOutputStream(1024);
|
||||
body.writeBytes(baos);
|
||||
msg.setData(baos.toByteArray());
|
||||
long exp = _context.clock().now() + FORWARD_TIMEOUT;
|
||||
_context.jobQueue().addJob(new SendMessageDirectJob(_context, msg, router, exp, FORWARD_PRIORITY));
|
||||
long exp = getContext().clock().now() + FORWARD_TIMEOUT;
|
||||
getContext().jobQueue().addJob(new SendMessageDirectJob(getContext(), msg, router, exp, FORWARD_PRIORITY));
|
||||
|
||||
String bodyType = body.getClass().getName();
|
||||
_context.messageHistory().wrap(bodyType, body.getUniqueId(), TunnelMessage.class.getName(), msg.getUniqueId());
|
||||
getContext().messageHistory().wrap(bodyType, body.getUniqueId(), TunnelMessage.class.getName(), msg.getUniqueId());
|
||||
} catch (DataFormatException dfe) {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Error writing out the message to forward to the tunnel", dfe);
|
||||
@ -306,26 +306,26 @@ public class HandleTunnelMessageJob extends JobImpl {
|
||||
// TODO: we may want to send it via a tunnel later on, but for now, direct will do.
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Sending on to requested router " + router.toBase64());
|
||||
long exp = _context.clock().now() + FORWARD_TIMEOUT;
|
||||
_context.jobQueue().addJob(new SendMessageDirectJob(_context, body, router, exp, FORWARD_PRIORITY));
|
||||
long exp = getContext().clock().now() + FORWARD_TIMEOUT;
|
||||
getContext().jobQueue().addJob(new SendMessageDirectJob(getContext(), body, router, exp, FORWARD_PRIORITY));
|
||||
}
|
||||
|
||||
private void sendToLocal(I2NPMessage body) {
|
||||
InNetMessage msg = new InNetMessage(_context);
|
||||
InNetMessage msg = new InNetMessage(getContext());
|
||||
msg.setMessage(body);
|
||||
msg.setFromRouter(_from);
|
||||
msg.setFromRouterHash(_fromHash);
|
||||
_context.inNetMessagePool().add(msg);
|
||||
getContext().inNetMessagePool().add(msg);
|
||||
}
|
||||
|
||||
private void deliverMessage(Destination dest, Hash destHash, DataMessage msg) {
|
||||
boolean valid = _context.messageValidator().validateMessage(msg.getUniqueId(), msg.getMessageExpiration().getTime());
|
||||
boolean valid = getContext().messageValidator().validateMessage(msg.getUniqueId(), msg.getMessageExpiration().getTime());
|
||||
if (!valid) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Duplicate data message received [" + msg.getUniqueId()
|
||||
+ " expiring on " + msg.getMessageExpiration() + "]");
|
||||
_context.messageHistory().droppedOtherMessage(msg);
|
||||
_context.messageHistory().messageProcessingError(msg.getUniqueId(), msg.getClass().getName(),
|
||||
getContext().messageHistory().droppedOtherMessage(msg);
|
||||
getContext().messageHistory().messageProcessingError(msg.getUniqueId(), msg.getClass().getName(),
|
||||
"Duplicate payload");
|
||||
return;
|
||||
}
|
||||
@ -344,9 +344,9 @@ public class HandleTunnelMessageJob extends JobImpl {
|
||||
cmsg.setPayload(payload);
|
||||
cmsg.setReceptionInfo(info);
|
||||
|
||||
_context.messageHistory().receivePayloadMessage(msg.getUniqueId());
|
||||
getContext().messageHistory().receivePayloadMessage(msg.getUniqueId());
|
||||
// if the destination isn't local, the ClientMessagePool forwards it off as an OutboundClientMessageJob
|
||||
_context.clientMessagePool().add(cmsg);
|
||||
getContext().clientMessagePool().add(cmsg);
|
||||
}
|
||||
|
||||
private I2NPMessage getBody(byte body[]) {
|
||||
@ -364,9 +364,9 @@ public class HandleTunnelMessageJob extends JobImpl {
|
||||
|
||||
private I2NPMessage decryptBody(byte encryptedMessage[], SessionKey key) {
|
||||
byte iv[] = new byte[16];
|
||||
Hash h = _context.sha().calculateHash(key.getData());
|
||||
Hash h = getContext().sha().calculateHash(key.getData());
|
||||
System.arraycopy(h.getData(), 0, iv, 0, iv.length);
|
||||
byte decrypted[] = _context.AESEngine().safeDecrypt(encryptedMessage, key, iv);
|
||||
byte decrypted[] = getContext().AESEngine().safeDecrypt(encryptedMessage, key, iv);
|
||||
if (decrypted == null) {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Error decrypting the message", getAddedBy());
|
||||
@ -378,9 +378,9 @@ public class HandleTunnelMessageJob extends JobImpl {
|
||||
private DeliveryInstructions getInstructions(byte encryptedInstructions[], SessionKey key) {
|
||||
try {
|
||||
byte iv[] = new byte[16];
|
||||
Hash h = _context.sha().calculateHash(key.getData());
|
||||
Hash h = getContext().sha().calculateHash(key.getData());
|
||||
System.arraycopy(h.getData(), 0, iv, 0, iv.length);
|
||||
byte decrypted[] = _context.AESEngine().safeDecrypt(encryptedInstructions, key, iv);
|
||||
byte decrypted[] = getContext().AESEngine().safeDecrypt(encryptedInstructions, key, iv);
|
||||
if (decrypted == null) {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Error decrypting the instructions", getAddedBy());
|
||||
@ -400,7 +400,7 @@ public class HandleTunnelMessageJob extends JobImpl {
|
||||
}
|
||||
|
||||
private TunnelInfo getUs(TunnelInfo info) {
|
||||
Hash us = _context.routerHash();
|
||||
Hash us = getContext().routerHash();
|
||||
while (info != null) {
|
||||
if (us.equals(info.getThisHop()))
|
||||
return info;
|
||||
@ -423,7 +423,7 @@ public class HandleTunnelMessageJob extends JobImpl {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!vstruct.verifySignature(_context, info.getVerificationKey().getKey())) {
|
||||
if (!vstruct.verifySignature(getContext(), info.getVerificationKey().getKey())) {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Received a tunnel message with an invalid signature!");
|
||||
// shitlist the sender?
|
||||
@ -431,7 +431,7 @@ public class HandleTunnelMessageJob extends JobImpl {
|
||||
}
|
||||
|
||||
// now validate the message
|
||||
Hash msgHash = _context.sha().calculateHash(_message.getData());
|
||||
Hash msgHash = getContext().sha().calculateHash(_message.getData());
|
||||
if (msgHash.equals(vstruct.getMessageHash())) {
|
||||
// hash matches. good.
|
||||
return true;
|
||||
@ -444,7 +444,7 @@ public class HandleTunnelMessageJob extends JobImpl {
|
||||
}
|
||||
|
||||
public void dropped() {
|
||||
_context.messageHistory().messageProcessingError(_message.getUniqueId(), _message.getClass().getName(),
|
||||
getContext().messageHistory().messageProcessingError(_message.getUniqueId(), _message.getClass().getName(),
|
||||
"Dropped due to overload");
|
||||
}
|
||||
|
||||
@ -459,13 +459,13 @@ public class HandleTunnelMessageJob extends JobImpl {
|
||||
private TunnelInfo _info;
|
||||
|
||||
public HandleGatewayMessageJob(I2NPMessage body, TunnelInfo tunnel, int length) {
|
||||
super(HandleTunnelMessageJob.this._context);
|
||||
super(HandleTunnelMessageJob.this.getContext());
|
||||
_body = body;
|
||||
_length = length;
|
||||
_info = tunnel;
|
||||
}
|
||||
public void runJob() {
|
||||
RouterContext ctx = HandleTunnelMessageJob.this._context;
|
||||
RouterContext ctx = HandleTunnelMessageJob.this.getContext();
|
||||
if (_body != null) {
|
||||
ctx.statManager().addRateData("tunnel.gatewayMessageSize", _length, 0);
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
@ -488,7 +488,7 @@ public class HandleTunnelMessageJob extends JobImpl {
|
||||
private TunnelInfo _info;
|
||||
|
||||
public HandleLocallyJob(I2NPMessage body, TunnelInfo tunnel) {
|
||||
super(HandleTunnelMessageJob.this._context);
|
||||
super(HandleTunnelMessageJob.this.getContext());
|
||||
_body = body;
|
||||
_info = tunnel;
|
||||
}
|
||||
@ -507,11 +507,11 @@ public class HandleTunnelMessageJob extends JobImpl {
|
||||
_log.info("Message for tunnel " + _info.getTunnelId() +
|
||||
" received at the gateway (us), but its a 0 length tunnel though it is a "
|
||||
+ _body.getClass().getName() + ", so process it locally");
|
||||
InNetMessage msg = new InNetMessage(HandleLocallyJob.this._context);
|
||||
InNetMessage msg = new InNetMessage(HandleLocallyJob.this.getContext());
|
||||
msg.setFromRouter(_from);
|
||||
msg.setFromRouterHash(_fromHash);
|
||||
msg.setMessage(_body);
|
||||
HandleLocallyJob.this._context.inNetMessagePool().add(msg);
|
||||
HandleLocallyJob.this.getContext().inNetMessagePool().add(msg);
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Message added to Inbound network pool for local processing: " + _message);
|
||||
}
|
||||
@ -523,7 +523,7 @@ public class HandleTunnelMessageJob extends JobImpl {
|
||||
private class HandleEndpointJob extends JobImpl {
|
||||
private TunnelInfo _info;
|
||||
public HandleEndpointJob(TunnelInfo info) {
|
||||
super(HandleTunnelMessageJob.this._context);
|
||||
super(HandleTunnelMessageJob.this.getContext());
|
||||
_info = info;
|
||||
}
|
||||
public void runJob() {
|
||||
@ -538,7 +538,7 @@ public class HandleTunnelMessageJob extends JobImpl {
|
||||
private TunnelInfo _ourPlace;
|
||||
private DeliveryInstructions _instructions;
|
||||
public ProcessBodyLocallyJob(I2NPMessage body, DeliveryInstructions instructions, TunnelInfo ourPlace) {
|
||||
super(HandleTunnelMessageJob.this._context);
|
||||
super(HandleTunnelMessageJob.this.getContext());
|
||||
_body = body;
|
||||
_instructions = instructions;
|
||||
_ourPlace = ourPlace;
|
||||
|
@ -121,7 +121,7 @@ public class OutboundClientMessageJob extends JobImpl {
|
||||
}
|
||||
}
|
||||
|
||||
_overallExpiration = timeoutMs + _context.clock().now();
|
||||
_overallExpiration = timeoutMs + getContext().clock().now();
|
||||
_status = new OutboundClientMessageStatus(msg);
|
||||
_nextStep = new NextStepJob();
|
||||
_lookupLeaseSetFailed = new LookupLeaseSetFailedJob();
|
||||
@ -137,11 +137,11 @@ public class OutboundClientMessageJob extends JobImpl {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug(getJobId() + ": Clove built");
|
||||
Hash to = _status.getTo().calculateHash();
|
||||
long timeoutMs = _overallExpiration - _context.clock().now();
|
||||
long timeoutMs = _overallExpiration - getContext().clock().now();
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug(getJobId() + ": Send outbound client message - sending off leaseSet lookup job");
|
||||
_status.incrementLookups();
|
||||
_context.netDb().lookupLeaseSet(to, _nextStep, _lookupLeaseSetFailed, timeoutMs);
|
||||
getContext().netDb().lookupLeaseSet(to, _nextStep, _lookupLeaseSetFailed, timeoutMs);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -163,7 +163,7 @@ public class OutboundClientMessageJob extends JobImpl {
|
||||
return;
|
||||
}
|
||||
|
||||
long now = _context.clock().now();
|
||||
long now = getContext().clock().now();
|
||||
if (now >= _overallExpiration) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn(getJobId() + ": sendNext() - Expired");
|
||||
@ -183,13 +183,13 @@ public class OutboundClientMessageJob extends JobImpl {
|
||||
_log.warn(getJobId() + ": No more leases, and we still haven't heard back from the peer"
|
||||
+ ", refetching the leaseSet to try again");
|
||||
_status.setLeaseSet(null);
|
||||
long remainingMs = _overallExpiration - _context.clock().now();
|
||||
long remainingMs = _overallExpiration - getContext().clock().now();
|
||||
if (_status.getNumLookups() < MAX_LEASE_LOOKUPS) {
|
||||
_status.incrementLookups();
|
||||
Hash to = _status.getMessage().getDestination().calculateHash();
|
||||
_status.clearAlreadySent(); // so we can send down old tunnels again
|
||||
_context.netDb().fail(to); // so we don't just fetch what we have
|
||||
_context.netDb().lookupLeaseSet(to, _nextStep, _lookupLeaseSetFailed, remainingMs);
|
||||
getContext().netDb().fail(to); // so we don't just fetch what we have
|
||||
getContext().netDb().lookupLeaseSet(to, _nextStep, _lookupLeaseSetFailed, remainingMs);
|
||||
return;
|
||||
} else {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
@ -200,7 +200,7 @@ public class OutboundClientMessageJob extends JobImpl {
|
||||
}
|
||||
}
|
||||
|
||||
_context.jobQueue().addJob(new SendJob(nextLease));
|
||||
getContext().jobQueue().addJob(new SendJob(nextLease));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -214,7 +214,7 @@ public class OutboundClientMessageJob extends JobImpl {
|
||||
private Lease getNextLease() {
|
||||
LeaseSet ls = _status.getLeaseSet();
|
||||
if (ls == null) {
|
||||
ls = _context.netDb().lookupLeaseSetLocally(_status.getTo().calculateHash());
|
||||
ls = getContext().netDb().lookupLeaseSetLocally(_status.getTo().calculateHash());
|
||||
if (ls == null) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info(getJobId() + ": Lookup locally didn't find the leaseSet");
|
||||
@ -225,7 +225,7 @@ public class OutboundClientMessageJob extends JobImpl {
|
||||
}
|
||||
_status.setLeaseSet(ls);
|
||||
}
|
||||
long now = _context.clock().now();
|
||||
long now = getContext().clock().now();
|
||||
|
||||
// get the possible leases
|
||||
List leases = new ArrayList(4);
|
||||
@ -285,7 +285,7 @@ public class OutboundClientMessageJob extends JobImpl {
|
||||
_log.warn(getJobId() + ": Bundle leaseSet probability overridden incorrectly ["
|
||||
+ str + "]", nfe);
|
||||
}
|
||||
if (probability >= _context.random().nextInt(100))
|
||||
if (probability >= getContext().random().nextInt(100))
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
@ -303,16 +303,16 @@ public class OutboundClientMessageJob extends JobImpl {
|
||||
*
|
||||
*/
|
||||
private void send(Lease lease) {
|
||||
long token = _context.random().nextLong(I2NPMessage.MAX_ID_VALUE);
|
||||
long token = getContext().random().nextLong(I2NPMessage.MAX_ID_VALUE);
|
||||
PublicKey key = _status.getLeaseSet().getEncryptionKey();
|
||||
SessionKey sessKey = new SessionKey();
|
||||
Set tags = new HashSet();
|
||||
LeaseSet replyLeaseSet = null;
|
||||
if (_shouldBundle) {
|
||||
replyLeaseSet = _context.netDb().lookupLeaseSetLocally(_status.getFrom().calculateHash());
|
||||
replyLeaseSet = getContext().netDb().lookupLeaseSetLocally(_status.getFrom().calculateHash());
|
||||
}
|
||||
|
||||
GarlicMessage msg = OutboundClientMessageJobHelper.createGarlicMessage(_context, token,
|
||||
GarlicMessage msg = OutboundClientMessageJobHelper.createGarlicMessage(getContext(), token,
|
||||
_overallExpiration, key,
|
||||
_status.getClove(),
|
||||
_status.getTo(), sessKey,
|
||||
@ -338,12 +338,12 @@ public class OutboundClientMessageJob extends JobImpl {
|
||||
_log.debug(getJobId() + ": Sending tunnel message out " + outTunnelId + " to "
|
||||
+ lease.getTunnelId() + " on "
|
||||
+ lease.getRouterIdentity().getHash().toBase64());
|
||||
SendTunnelMessageJob j = new SendTunnelMessageJob(_context, msg, outTunnelId,
|
||||
SendTunnelMessageJob j = new SendTunnelMessageJob(getContext(), msg, outTunnelId,
|
||||
lease.getRouterIdentity().getHash(),
|
||||
lease.getTunnelId(), null, onReply,
|
||||
onFail, selector, SEND_TIMEOUT_MS,
|
||||
SEND_PRIORITY);
|
||||
_context.jobQueue().addJob(j);
|
||||
getContext().jobQueue().addJob(j);
|
||||
} else {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error(getJobId() + ": Could not find any outbound tunnels to send the payload through... wtf?");
|
||||
@ -360,7 +360,7 @@ public class OutboundClientMessageJob extends JobImpl {
|
||||
TunnelSelectionCriteria crit = new TunnelSelectionCriteria();
|
||||
crit.setMaximumTunnelsRequired(1);
|
||||
crit.setMinimumTunnelsRequired(1);
|
||||
List tunnelIds = _context.tunnelManager().selectOutboundTunnelIds(crit);
|
||||
List tunnelIds = getContext().tunnelManager().selectOutboundTunnelIds(crit);
|
||||
if (tunnelIds.size() <= 0)
|
||||
return null;
|
||||
else
|
||||
@ -375,7 +375,7 @@ public class OutboundClientMessageJob extends JobImpl {
|
||||
private void dieFatal() {
|
||||
if (_status.getSuccess()) return;
|
||||
boolean alreadyFailed = _status.failed();
|
||||
long sendTime = _context.clock().now() - _status.getStart();
|
||||
long sendTime = getContext().clock().now() - _status.getStart();
|
||||
ClientMessage msg = _status.getMessage();
|
||||
if (alreadyFailed) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
@ -390,10 +390,10 @@ public class OutboundClientMessageJob extends JobImpl {
|
||||
new Exception("Message send failure"));
|
||||
}
|
||||
|
||||
_context.messageHistory().sendPayloadMessage(msg.getMessageId().getMessageId(), false, sendTime);
|
||||
_context.clientManager().messageDeliveryStatusUpdate(msg.getFromDestination(), msg.getMessageId(), false);
|
||||
_context.statManager().updateFrequency("client.sendMessageFailFrequency");
|
||||
_context.statManager().addRateData("client.sendAttemptAverage", _status.getNumSent(), sendTime);
|
||||
getContext().messageHistory().sendPayloadMessage(msg.getMessageId().getMessageId(), false, sendTime);
|
||||
getContext().clientManager().messageDeliveryStatusUpdate(msg.getFromDestination(), msg.getMessageId(), false);
|
||||
getContext().statManager().updateFrequency("client.sendMessageFailFrequency");
|
||||
getContext().statManager().addRateData("client.sendAttemptAverage", _status.getNumSent(), sendTime);
|
||||
}
|
||||
|
||||
/** build the payload clove that will be used for all of the messages, placing the clove in the status structure */
|
||||
@ -411,9 +411,9 @@ public class OutboundClientMessageJob extends JobImpl {
|
||||
clove.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null));
|
||||
clove.setDeliveryInstructions(instructions);
|
||||
clove.setExpiration(_overallExpiration);
|
||||
clove.setId(_context.random().nextLong(I2NPMessage.MAX_ID_VALUE));
|
||||
clove.setId(getContext().random().nextLong(I2NPMessage.MAX_ID_VALUE));
|
||||
|
||||
DataMessage msg = new DataMessage(_context);
|
||||
DataMessage msg = new DataMessage(getContext());
|
||||
msg.setData(_status.getMessage().getPayload().getEncryptedData());
|
||||
|
||||
clove.setPayload(msg);
|
||||
@ -450,7 +450,7 @@ public class OutboundClientMessageJob extends JobImpl {
|
||||
_failure = false;
|
||||
_numLookups = 0;
|
||||
_previousSent = 0;
|
||||
_start = _context.clock().now();
|
||||
_start = getContext().clock().now();
|
||||
}
|
||||
|
||||
/** raw payload */
|
||||
@ -572,7 +572,7 @@ public class OutboundClientMessageJob extends JobImpl {
|
||||
/** queued by the db lookup success and the send timeout to get us to try the next lease */
|
||||
private class NextStepJob extends JobImpl {
|
||||
public NextStepJob() {
|
||||
super(OutboundClientMessageJob.this._context);
|
||||
super(OutboundClientMessageJob.this.getContext());
|
||||
}
|
||||
public String getName() { return "Process next step for outbound client message"; }
|
||||
public void runJob() { sendNext(); }
|
||||
@ -585,7 +585,7 @@ public class OutboundClientMessageJob extends JobImpl {
|
||||
*/
|
||||
private class LookupLeaseSetFailedJob extends JobImpl {
|
||||
public LookupLeaseSetFailedJob() {
|
||||
super(OutboundClientMessageJob.this._context);
|
||||
super(OutboundClientMessageJob.this.getContext());
|
||||
}
|
||||
public String getName() { return "Lookup for outbound client message failed"; }
|
||||
public void runJob() {
|
||||
@ -597,7 +597,7 @@ public class OutboundClientMessageJob extends JobImpl {
|
||||
private class SendJob extends JobImpl {
|
||||
private Lease _lease;
|
||||
public SendJob(Lease lease) {
|
||||
super(OutboundClientMessageJob.this._context);
|
||||
super(OutboundClientMessageJob.this.getContext());
|
||||
_lease = lease;
|
||||
}
|
||||
public String getName() { return "Send outbound client message through the lease"; }
|
||||
@ -620,7 +620,7 @@ public class OutboundClientMessageJob extends JobImpl {
|
||||
*
|
||||
*/
|
||||
public SendSuccessJob(Lease lease, SessionKey key, Set tags) {
|
||||
super(OutboundClientMessageJob.this._context);
|
||||
super(OutboundClientMessageJob.this.getContext());
|
||||
_lease = lease;
|
||||
_key = key;
|
||||
_tags = tags;
|
||||
@ -628,7 +628,7 @@ public class OutboundClientMessageJob extends JobImpl {
|
||||
|
||||
public String getName() { return "Send client message successful to a lease"; }
|
||||
public void runJob() {
|
||||
long sendTime = _context.clock().now() - _status.getStart();
|
||||
long sendTime = getContext().clock().now() - _status.getStart();
|
||||
boolean alreadySuccessful = _status.success();
|
||||
MessageId msgId = _status.getMessage().getMessageId();
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
@ -641,7 +641,7 @@ public class OutboundClientMessageJob extends JobImpl {
|
||||
if ( (_key != null) && (_tags != null) && (_tags.size() > 0) ) {
|
||||
LeaseSet ls = _status.getLeaseSet();
|
||||
if (ls != null)
|
||||
_context.sessionKeyManager().tagsDelivered(ls.getEncryptionKey(),
|
||||
getContext().sessionKeyManager().tagsDelivered(ls.getEncryptionKey(),
|
||||
_key, _tags);
|
||||
}
|
||||
|
||||
@ -653,13 +653,13 @@ public class OutboundClientMessageJob extends JobImpl {
|
||||
return;
|
||||
}
|
||||
long dataMsgId = _status.getClove().getId();
|
||||
_context.messageHistory().sendPayloadMessage(dataMsgId, true, sendTime);
|
||||
_context.clientManager().messageDeliveryStatusUpdate(_status.getFrom(), msgId, true);
|
||||
getContext().messageHistory().sendPayloadMessage(dataMsgId, true, sendTime);
|
||||
getContext().clientManager().messageDeliveryStatusUpdate(_status.getFrom(), msgId, true);
|
||||
_lease.setNumSuccess(_lease.getNumSuccess()+1);
|
||||
|
||||
_context.statManager().addRateData("client.sendAckTime", sendTime, 0);
|
||||
_context.statManager().addRateData("client.sendMessageSize", _status.getMessage().getPayload().getSize(), sendTime);
|
||||
_context.statManager().addRateData("client.sendAttemptAverage", _status.getNumSent(), sendTime);
|
||||
getContext().statManager().addRateData("client.sendAckTime", sendTime, 0);
|
||||
getContext().statManager().addRateData("client.sendMessageSize", _status.getMessage().getPayload().getSize(), sendTime);
|
||||
getContext().statManager().addRateData("client.sendAttemptAverage", _status.getNumSent(), sendTime);
|
||||
}
|
||||
|
||||
public void setMessage(I2NPMessage msg) {}
|
||||
@ -674,7 +674,7 @@ public class OutboundClientMessageJob extends JobImpl {
|
||||
private Lease _lease;
|
||||
|
||||
public SendTimeoutJob(Lease lease) {
|
||||
super(OutboundClientMessageJob.this._context);
|
||||
super(OutboundClientMessageJob.this.getContext());
|
||||
_lease = lease;
|
||||
}
|
||||
|
||||
|
@ -76,20 +76,20 @@ public class SendGarlicJob extends JobImpl {
|
||||
public String getName() { return "Build Garlic Message"; }
|
||||
|
||||
public void runJob() {
|
||||
long before = _context.clock().now();
|
||||
_message = GarlicMessageBuilder.buildMessage(_context, _config, _wrappedKey, _wrappedTags);
|
||||
long after = _context.clock().now();
|
||||
long before = getContext().clock().now();
|
||||
_message = GarlicMessageBuilder.buildMessage(getContext(), _config, _wrappedKey, _wrappedTags);
|
||||
long after = getContext().clock().now();
|
||||
if ( (after - before) > 1000) {
|
||||
_log.warn("Building the garlic took too long [" + (after-before)+" ms]", getAddedBy());
|
||||
} else {
|
||||
_log.debug("Building the garlic was fast! " + (after - before) + " ms");
|
||||
}
|
||||
_context.jobQueue().addJob(new SendJob());
|
||||
getContext().jobQueue().addJob(new SendJob());
|
||||
}
|
||||
|
||||
private class SendJob extends JobImpl {
|
||||
public SendJob() {
|
||||
super(SendGarlicJob.this._context);
|
||||
super(SendGarlicJob.this.getContext());
|
||||
}
|
||||
public String getName() { return "Send Built Garlic Message"; }
|
||||
public void runJob() {
|
||||
@ -102,7 +102,7 @@ public class SendGarlicJob extends JobImpl {
|
||||
}
|
||||
|
||||
private void sendGarlic() {
|
||||
OutNetMessage msg = new OutNetMessage(_context);
|
||||
OutNetMessage msg = new OutNetMessage(getContext());
|
||||
long when = _message.getMessageExpiration().getTime(); // + Router.CLOCK_FUDGE_FACTOR;
|
||||
msg.setExpiration(when);
|
||||
msg.setMessage(_message);
|
||||
@ -116,7 +116,7 @@ public class SendGarlicJob extends JobImpl {
|
||||
//_log.info("Sending garlic message to [" + _config.getRecipient() + "] encrypted with " + _config.getRecipientPublicKey() + " or " + _config.getRecipient().getIdentity().getPublicKey());
|
||||
//_log.debug("Garlic config data:\n" + _config);
|
||||
//msg.setTarget(_target);
|
||||
_context.outNetMessagePool().add(msg);
|
||||
getContext().outNetMessagePool().add(msg);
|
||||
_log.debug("Garlic message added to outbound network message pool");
|
||||
}
|
||||
}
|
||||
|
@ -36,7 +36,7 @@ public class SendMessageAckJob extends JobImpl {
|
||||
}
|
||||
|
||||
public void runJob() {
|
||||
_context.jobQueue().addJob(new SendReplyMessageJob(_context, _block, createAckMessage(), ACK_PRIORITY));
|
||||
getContext().jobQueue().addJob(new SendReplyMessageJob(getContext(), _block, createAckMessage(), ACK_PRIORITY));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -48,8 +48,8 @@ public class SendMessageAckJob extends JobImpl {
|
||||
*
|
||||
*/
|
||||
protected I2NPMessage createAckMessage() {
|
||||
DeliveryStatusMessage statusMessage = new DeliveryStatusMessage(_context);
|
||||
statusMessage.setArrival(new Date(_context.clock().now()));
|
||||
DeliveryStatusMessage statusMessage = new DeliveryStatusMessage(getContext());
|
||||
statusMessage.setArrival(new Date(getContext().clock().now()));
|
||||
statusMessage.setMessageId(_ackId);
|
||||
return statusMessage;
|
||||
}
|
||||
|
@ -50,7 +50,7 @@ public class SendMessageDirectJob extends JobImpl {
|
||||
}
|
||||
public SendMessageDirectJob(RouterContext ctx, I2NPMessage message, Hash toPeer, Job onSend, ReplyJob onSuccess, Job onFail, MessageSelector selector, long expiration, int priority) {
|
||||
super(ctx);
|
||||
_log = _context.logManager().getLog(SendMessageDirectJob.class);
|
||||
_log = getContext().logManager().getLog(SendMessageDirectJob.class);
|
||||
_message = message;
|
||||
_targetHash = toPeer;
|
||||
_router = null;
|
||||
@ -67,7 +67,7 @@ public class SendMessageDirectJob extends JobImpl {
|
||||
if (_targetHash == null)
|
||||
throw new IllegalArgumentException("Attempt to send a message to a null peer");
|
||||
_sent = false;
|
||||
long remaining = expiration - _context.clock().now();
|
||||
long remaining = expiration - getContext().clock().now();
|
||||
if (remaining < 50*1000) {
|
||||
_log.info("Sending message to expire in " + remaining + "ms containing " + message.getUniqueId() + " (a " + message.getClass().getName() + ")", new Exception("SendDirect from"));
|
||||
}
|
||||
@ -75,7 +75,7 @@ public class SendMessageDirectJob extends JobImpl {
|
||||
|
||||
public String getName() { return "Send Message Direct"; }
|
||||
public void runJob() {
|
||||
long now = _context.clock().now();
|
||||
long now = getContext().clock().now();
|
||||
if (_expiration == 0)
|
||||
_expiration = now + DEFAULT_TIMEOUT;
|
||||
|
||||
@ -95,7 +95,7 @@ public class SendMessageDirectJob extends JobImpl {
|
||||
_log.debug("Router specified, sending");
|
||||
send();
|
||||
} else {
|
||||
_router = _context.netDb().lookupRouterInfoLocally(_targetHash);
|
||||
_router = getContext().netDb().lookupRouterInfoLocally(_targetHash);
|
||||
if (_router != null) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Router not specified but lookup found it");
|
||||
@ -104,14 +104,14 @@ public class SendMessageDirectJob extends JobImpl {
|
||||
if (!_alreadySearched) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Router not specified, so we're looking for it...");
|
||||
_context.netDb().lookupRouterInfo(_targetHash, this, this,
|
||||
_expiration - _context.clock().now());
|
||||
_searchOn = _context.clock().now();
|
||||
getContext().netDb().lookupRouterInfo(_targetHash, this, this,
|
||||
_expiration - getContext().clock().now());
|
||||
_searchOn = getContext().clock().now();
|
||||
_alreadySearched = true;
|
||||
} else {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Unable to find the router to send to: " + _targetHash
|
||||
+ " after searching for " + (_context.clock().now()-_searchOn)
|
||||
+ " after searching for " + (getContext().clock().now()-_searchOn)
|
||||
+ "ms, message: " + _message, getAddedBy());
|
||||
}
|
||||
}
|
||||
@ -126,10 +126,10 @@ public class SendMessageDirectJob extends JobImpl {
|
||||
}
|
||||
_sent = true;
|
||||
Hash to = _router.getIdentity().getHash();
|
||||
Hash us = _context.router().getRouterInfo().getIdentity().getHash();
|
||||
Hash us = getContext().router().getRouterInfo().getIdentity().getHash();
|
||||
if (us.equals(to)) {
|
||||
if (_selector != null) {
|
||||
OutNetMessage outM = new OutNetMessage(_context);
|
||||
OutNetMessage outM = new OutNetMessage(getContext());
|
||||
outM.setExpiration(_expiration);
|
||||
outM.setMessage(_message);
|
||||
outM.setOnFailedReplyJob(_onFail);
|
||||
@ -139,23 +139,23 @@ public class SendMessageDirectJob extends JobImpl {
|
||||
outM.setPriority(_priority);
|
||||
outM.setReplySelector(_selector);
|
||||
outM.setTarget(_router);
|
||||
_context.messageRegistry().registerPending(outM);
|
||||
getContext().messageRegistry().registerPending(outM);
|
||||
}
|
||||
|
||||
if (_onSend != null)
|
||||
_context.jobQueue().addJob(_onSend);
|
||||
getContext().jobQueue().addJob(_onSend);
|
||||
|
||||
InNetMessage msg = new InNetMessage(_context);
|
||||
InNetMessage msg = new InNetMessage(getContext());
|
||||
msg.setFromRouter(_router.getIdentity());
|
||||
msg.setMessage(_message);
|
||||
_context.inNetMessagePool().add(msg);
|
||||
getContext().inNetMessagePool().add(msg);
|
||||
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Adding " + _message.getClass().getName()
|
||||
+ " to inbound message pool as it was destined for ourselves");
|
||||
//_log.debug("debug", _createdBy);
|
||||
} else {
|
||||
OutNetMessage msg = new OutNetMessage(_context);
|
||||
OutNetMessage msg = new OutNetMessage(getContext());
|
||||
msg.setExpiration(_expiration);
|
||||
msg.setMessage(_message);
|
||||
msg.setOnFailedReplyJob(_onFail);
|
||||
@ -165,7 +165,7 @@ public class SendMessageDirectJob extends JobImpl {
|
||||
msg.setPriority(_priority);
|
||||
msg.setReplySelector(_selector);
|
||||
msg.setTarget(_router);
|
||||
_context.outNetMessagePool().add(msg);
|
||||
getContext().outNetMessagePool().add(msg);
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Adding " + _message.getClass().getName()
|
||||
+ " to outbound message pool targeting "
|
||||
|
@ -37,7 +37,7 @@ public class SendReplyMessageJob extends JobImpl {
|
||||
}
|
||||
|
||||
public void runJob() {
|
||||
SourceRouteReplyMessage msg = new SourceRouteReplyMessage(_context);
|
||||
SourceRouteReplyMessage msg = new SourceRouteReplyMessage(getContext());
|
||||
msg.setMessage(_message);
|
||||
msg.setEncryptedHeader(_block.getData());
|
||||
msg.setMessageExpiration(_message.getMessageExpiration());
|
||||
@ -56,8 +56,8 @@ public class SendReplyMessageJob extends JobImpl {
|
||||
*/
|
||||
protected void send(I2NPMessage msg) {
|
||||
_log.info("Sending reply with " + _message.getClass().getName() + " in a sourceRouteeplyMessage to " + _block.getRouter().toBase64());
|
||||
SendMessageDirectJob j = new SendMessageDirectJob(_context, msg, _block.getRouter(), _priority);
|
||||
_context.jobQueue().addJob(j);
|
||||
SendMessageDirectJob j = new SendMessageDirectJob(getContext(), msg, _block.getRouter(), _priority);
|
||||
getContext().jobQueue().addJob(j);
|
||||
}
|
||||
|
||||
public String getName() { return "Send Reply Message"; }
|
||||
|
@ -83,11 +83,11 @@ public class SendTunnelMessageJob extends JobImpl {
|
||||
new Exception("SendTunnel from"));
|
||||
}
|
||||
//_log.info("Send tunnel message " + msg.getClass().getName() + " to " + _destRouter + " over " + _tunnelId + " targetting tunnel " + _targetTunnelId, new Exception("SendTunnel from"));
|
||||
_expiration = _context.clock().now() + timeoutMs;
|
||||
_expiration = getContext().clock().now() + timeoutMs;
|
||||
}
|
||||
|
||||
public void runJob() {
|
||||
TunnelInfo info = _context.tunnelManager().getTunnelInfo(_tunnelId);
|
||||
TunnelInfo info = getContext().tunnelManager().getTunnelInfo(_tunnelId);
|
||||
if (info == null) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Message for unknown tunnel [" + _tunnelId
|
||||
@ -124,21 +124,21 @@ public class SendTunnelMessageJob extends JobImpl {
|
||||
*
|
||||
*/
|
||||
private void forwardToGateway() {
|
||||
TunnelMessage msg = new TunnelMessage(_context);
|
||||
TunnelMessage msg = new TunnelMessage(getContext());
|
||||
try {
|
||||
ByteArrayOutputStream baos = new ByteArrayOutputStream(1024);
|
||||
_message.writeBytes(baos);
|
||||
msg.setData(baos.toByteArray());
|
||||
msg.setTunnelId(_tunnelId);
|
||||
msg.setMessageExpiration(new Date(_expiration));
|
||||
_context.jobQueue().addJob(new SendMessageDirectJob(_context, msg,
|
||||
getContext().jobQueue().addJob(new SendMessageDirectJob(getContext(), msg,
|
||||
_destRouter, _onSend,
|
||||
_onReply, _onFailure,
|
||||
_selector, _expiration,
|
||||
_priority));
|
||||
|
||||
String bodyType = _message.getClass().getName();
|
||||
_context.messageHistory().wrap(bodyType, _message.getUniqueId(),
|
||||
getContext().messageHistory().wrap(bodyType, _message.getUniqueId(),
|
||||
TunnelMessage.class.getName(), msg.getUniqueId());
|
||||
} catch (IOException ioe) {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
@ -162,7 +162,7 @@ public class SendTunnelMessageJob extends JobImpl {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("We are not participating in this /known/ tunnel - was the router reset?");
|
||||
if (_onFailure != null)
|
||||
_context.jobQueue().addJob(_onFailure);
|
||||
getContext().jobQueue().addJob(_onFailure);
|
||||
} else {
|
||||
// we're the gateway, so sign, encrypt, and forward to info.getNextHop()
|
||||
TunnelMessage msg = prepareMessage(info);
|
||||
@ -170,20 +170,20 @@ public class SendTunnelMessageJob extends JobImpl {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("wtf, unable to prepare a tunnel message to the next hop, when we're the gateway and hops remain? tunnel: " + info);
|
||||
if (_onFailure != null)
|
||||
_context.jobQueue().addJob(_onFailure);
|
||||
getContext().jobQueue().addJob(_onFailure);
|
||||
return;
|
||||
}
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Tunnel message created: " + msg + " out of encrypted message: "
|
||||
+ _message);
|
||||
long now = _context.clock().now();
|
||||
long now = getContext().clock().now();
|
||||
if (_expiration < now + 15*1000) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Adding a tunnel message that will expire shortly ["
|
||||
+ new Date(_expiration) + "]", getAddedBy());
|
||||
}
|
||||
msg.setMessageExpiration(new Date(_expiration));
|
||||
_context.jobQueue().addJob(new SendMessageDirectJob(_context, msg,
|
||||
getContext().jobQueue().addJob(new SendMessageDirectJob(getContext(), msg,
|
||||
info.getNextHop(), _onSend,
|
||||
_onReply, _onFailure,
|
||||
_selector, _expiration,
|
||||
@ -205,7 +205,7 @@ public class SendTunnelMessageJob extends JobImpl {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Cannot inject non-tunnel messages as a participant!" + _message, getAddedBy());
|
||||
if (_onFailure != null)
|
||||
_context.jobQueue().addJob(_onFailure);
|
||||
getContext().jobQueue().addJob(_onFailure);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -216,29 +216,29 @@ public class SendTunnelMessageJob extends JobImpl {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("No verification key for the participant? tunnel: " + info, getAddedBy());
|
||||
if (_onFailure != null)
|
||||
_context.jobQueue().addJob(_onFailure);
|
||||
getContext().jobQueue().addJob(_onFailure);
|
||||
return;
|
||||
}
|
||||
|
||||
boolean ok = struct.verifySignature(_context, info.getVerificationKey().getKey());
|
||||
boolean ok = struct.verifySignature(getContext(), info.getVerificationKey().getKey());
|
||||
if (!ok) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Failed tunnel verification! Spoofing / tagging attack? " + _message, getAddedBy());
|
||||
if (_onFailure != null)
|
||||
_context.jobQueue().addJob(_onFailure);
|
||||
getContext().jobQueue().addJob(_onFailure);
|
||||
return;
|
||||
} else {
|
||||
if (info.getNextHop() != null) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Message for tunnel " + info.getTunnelId().getTunnelId() + " received where we're not the gateway and there are remaining hops, so forward it on to "
|
||||
+ info.getNextHop().toBase64() + " via SendMessageDirectJob");
|
||||
_context.jobQueue().addJob(new SendMessageDirectJob(_context, msg, info.getNextHop(), _onSend, null, _onFailure, null, _message.getMessageExpiration().getTime(), _priority));
|
||||
getContext().jobQueue().addJob(new SendMessageDirectJob(getContext(), msg, info.getNextHop(), _onSend, null, _onFailure, null, _message.getMessageExpiration().getTime(), _priority));
|
||||
return;
|
||||
} else {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Should not be reached - participant, but no more hops?!");
|
||||
if (_onFailure != null)
|
||||
_context.jobQueue().addJob(_onFailure);
|
||||
getContext().jobQueue().addJob(_onFailure);
|
||||
return;
|
||||
}
|
||||
}
|
||||
@ -247,7 +247,7 @@ public class SendTunnelMessageJob extends JobImpl {
|
||||
|
||||
/** find our place in the tunnel */
|
||||
private TunnelInfo getUs(TunnelInfo info) {
|
||||
Hash us = _context.routerHash();
|
||||
Hash us = getContext().routerHash();
|
||||
TunnelInfo lastUs = null;
|
||||
while (info != null) {
|
||||
if (us.equals(info.getThisHop()))
|
||||
@ -277,9 +277,9 @@ public class SendTunnelMessageJob extends JobImpl {
|
||||
*
|
||||
*/
|
||||
private TunnelMessage prepareMessage(TunnelInfo info) {
|
||||
TunnelMessage msg = new TunnelMessage(_context);
|
||||
TunnelMessage msg = new TunnelMessage(getContext());
|
||||
|
||||
SessionKey key = _context.keyGenerator().generateSessionKey();
|
||||
SessionKey key = getContext().keyGenerator().generateSessionKey();
|
||||
|
||||
DeliveryInstructions instructions = new DeliveryInstructions();
|
||||
instructions.setDelayRequested(false);
|
||||
@ -329,7 +329,7 @@ public class SendTunnelMessageJob extends JobImpl {
|
||||
TunnelVerificationStructure verification = createVerificationStructure(encryptedMessage, info);
|
||||
|
||||
String bodyType = _message.getClass().getName();
|
||||
_context.messageHistory().wrap(bodyType, _message.getUniqueId(), TunnelMessage.class.getName(), msg.getUniqueId());
|
||||
getContext().messageHistory().wrap(bodyType, _message.getUniqueId(), TunnelMessage.class.getName(), msg.getUniqueId());
|
||||
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Tunnel message prepared: instructions = " + instructions);
|
||||
@ -347,8 +347,8 @@ public class SendTunnelMessageJob extends JobImpl {
|
||||
*/
|
||||
private TunnelVerificationStructure createVerificationStructure(byte encryptedMessage[], TunnelInfo info) {
|
||||
TunnelVerificationStructure struct = new TunnelVerificationStructure();
|
||||
struct.setMessageHash(_context.sha().calculateHash(encryptedMessage));
|
||||
struct.sign(_context, info.getSigningKey().getKey());
|
||||
struct.setMessageHash(getContext().sha().calculateHash(encryptedMessage));
|
||||
struct.sign(getContext(), info.getSigningKey().getKey());
|
||||
return struct;
|
||||
}
|
||||
|
||||
@ -363,9 +363,9 @@ public class SendTunnelMessageJob extends JobImpl {
|
||||
struct.writeBytes(baos);
|
||||
|
||||
byte iv[] = new byte[16];
|
||||
Hash h = _context.sha().calculateHash(key.getData());
|
||||
Hash h = getContext().sha().calculateHash(key.getData());
|
||||
System.arraycopy(h.getData(), 0, iv, 0, iv.length);
|
||||
return _context.AESEngine().safeEncrypt(baos.toByteArray(), key, iv, paddedSize);
|
||||
return getContext().AESEngine().safeEncrypt(baos.toByteArray(), key, iv, paddedSize);
|
||||
} catch (IOException ioe) {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Error writing out data to encrypt", ioe);
|
||||
@ -389,12 +389,12 @@ public class SendTunnelMessageJob extends JobImpl {
|
||||
if (_onSend != null) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Firing onSend as we're honoring the instructions");
|
||||
_context.jobQueue().addJob(_onSend);
|
||||
getContext().jobQueue().addJob(_onSend);
|
||||
}
|
||||
|
||||
// since we are the gateway, we don't need to decrypt the delivery instructions or the payload
|
||||
|
||||
RouterIdentity ident = _context.router().getRouterInfo().getIdentity();
|
||||
RouterIdentity ident = getContext().router().getRouterInfo().getIdentity();
|
||||
|
||||
if (_destRouter != null) {
|
||||
honorSendRemote(info, ident);
|
||||
@ -416,7 +416,7 @@ public class SendTunnelMessageJob extends JobImpl {
|
||||
+ " message off to remote tunnel "
|
||||
+ _targetTunnelId.getTunnelId() + " on router "
|
||||
+ _destRouter.toBase64());
|
||||
TunnelMessage tmsg = new TunnelMessage(_context);
|
||||
TunnelMessage tmsg = new TunnelMessage(getContext());
|
||||
tmsg.setEncryptedDeliveryInstructions(null);
|
||||
tmsg.setTunnelId(_targetTunnelId);
|
||||
tmsg.setVerificationStructure(null);
|
||||
@ -438,7 +438,7 @@ public class SendTunnelMessageJob extends JobImpl {
|
||||
+ " message off to remote router " + _destRouter.toBase64());
|
||||
msg = _message;
|
||||
}
|
||||
long now = _context.clock().now();
|
||||
long now = getContext().clock().now();
|
||||
//if (_expiration < now) {
|
||||
//_expiration = now + Router.CLOCK_FUDGE_FACTOR;
|
||||
//_log.info("Fudging the message send so it expires in the fudge factor...");
|
||||
@ -451,11 +451,11 @@ public class SendTunnelMessageJob extends JobImpl {
|
||||
}
|
||||
|
||||
String bodyType = _message.getClass().getName();
|
||||
_context.messageHistory().wrap(bodyType, _message.getUniqueId(),
|
||||
getContext().messageHistory().wrap(bodyType, _message.getUniqueId(),
|
||||
TunnelMessage.class.getName(), msg.getUniqueId());
|
||||
|
||||
// don't specify a selector, since createFakeOutNetMessage already does that
|
||||
_context.jobQueue().addJob(new SendMessageDirectJob(_context, msg, _destRouter,
|
||||
getContext().jobQueue().addJob(new SendMessageDirectJob(getContext(), msg, _destRouter,
|
||||
_onSend, _onReply, _onFailure,
|
||||
null, _expiration, _priority));
|
||||
}
|
||||
@ -471,22 +471,22 @@ public class SendTunnelMessageJob extends JobImpl {
|
||||
// its a network message targeting us...
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Destination is null or its not a DataMessage - pass it off to the InNetMessagePool");
|
||||
InNetMessage msg = new InNetMessage(_context);
|
||||
InNetMessage msg = new InNetMessage(getContext());
|
||||
msg.setFromRouter(ident);
|
||||
msg.setFromRouterHash(ident.getHash());
|
||||
msg.setMessage(_message);
|
||||
msg.setReplyBlock(null);
|
||||
_context.inNetMessagePool().add(msg);
|
||||
getContext().inNetMessagePool().add(msg);
|
||||
} else {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Destination is not null and it is a DataMessage - pop it into the ClientMessagePool");
|
||||
DataMessage msg = (DataMessage)_message;
|
||||
boolean valid = _context.messageValidator().validateMessage(msg.getUniqueId(), msg.getMessageExpiration().getTime());
|
||||
boolean valid = getContext().messageValidator().validateMessage(msg.getUniqueId(), msg.getMessageExpiration().getTime());
|
||||
if (!valid) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Duplicate data message received [" + msg.getUniqueId() + " expiring on " + msg.getMessageExpiration() + "]");
|
||||
_context.messageHistory().droppedOtherMessage(msg);
|
||||
_context.messageHistory().messageProcessingError(msg.getUniqueId(), msg.getClass().getName(), "Duplicate");
|
||||
getContext().messageHistory().droppedOtherMessage(msg);
|
||||
getContext().messageHistory().messageProcessingError(msg.getUniqueId(), msg.getClass().getName(), "Duplicate");
|
||||
return;
|
||||
}
|
||||
|
||||
@ -501,8 +501,8 @@ public class SendTunnelMessageJob extends JobImpl {
|
||||
clientMessage.setDestination(info.getDestination());
|
||||
clientMessage.setPayload(payload);
|
||||
clientMessage.setReceptionInfo(receptionInfo);
|
||||
_context.clientMessagePool().add(clientMessage);
|
||||
_context.messageHistory().receivePayloadMessage(msg.getUniqueId());
|
||||
getContext().clientMessagePool().add(clientMessage);
|
||||
getContext().messageHistory().receivePayloadMessage(msg.getUniqueId());
|
||||
}
|
||||
}
|
||||
|
||||
@ -510,7 +510,7 @@ public class SendTunnelMessageJob extends JobImpl {
|
||||
// now we create a fake outNetMessage to go onto the registry so we can select
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Registering a fake outNetMessage for the message tunneled locally since we have a selector");
|
||||
OutNetMessage outM = new OutNetMessage(_context);
|
||||
OutNetMessage outM = new OutNetMessage(getContext());
|
||||
outM.setExpiration(_expiration);
|
||||
outM.setMessage(_message);
|
||||
outM.setOnFailedReplyJob(_onFailure);
|
||||
@ -520,7 +520,7 @@ public class SendTunnelMessageJob extends JobImpl {
|
||||
outM.setPriority(_priority);
|
||||
outM.setReplySelector(_selector);
|
||||
outM.setTarget(null);
|
||||
_context.messageRegistry().registerPending(outM);
|
||||
getContext().messageRegistry().registerPending(outM);
|
||||
// we dont really need the data
|
||||
outM.discardData();
|
||||
}
|
||||
|
@ -49,9 +49,9 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
|
||||
|
||||
public HandleDatabaseLookupMessageJob(RouterContext ctx, DatabaseLookupMessage receivedMessage, RouterIdentity from, Hash fromHash) {
|
||||
super(ctx);
|
||||
_log = _context.logManager().getLog(HandleDatabaseLookupMessageJob.class);
|
||||
_context.statManager().createRateStat("netDb.lookupsHandled", "How many netDb lookups have we handled?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("netDb.lookupsMatched", "How many netDb lookups did we have the data for?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_log = getContext().logManager().getLog(HandleDatabaseLookupMessageJob.class);
|
||||
getContext().statManager().createRateStat("netDb.lookupsHandled", "How many netDb lookups have we handled?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
getContext().statManager().createRateStat("netDb.lookupsMatched", "How many netDb lookups did we have the data for?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_message = receivedMessage;
|
||||
_from = from;
|
||||
_fromHash = fromHash;
|
||||
@ -70,14 +70,14 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
|
||||
}
|
||||
|
||||
// might as well grab what they sent us
|
||||
_context.netDb().store(fromKey, _message.getFrom());
|
||||
getContext().netDb().store(fromKey, _message.getFrom());
|
||||
|
||||
// whatdotheywant?
|
||||
handleRequest(fromKey);
|
||||
}
|
||||
|
||||
private void handleRequest(Hash fromKey) {
|
||||
LeaseSet ls = _context.netDb().lookupLeaseSetLocally(_message.getSearchKey());
|
||||
LeaseSet ls = getContext().netDb().lookupLeaseSetLocally(_message.getSearchKey());
|
||||
if (ls != null) {
|
||||
// send that lease set to the _message.getFromHash peer
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
@ -85,7 +85,7 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
|
||||
+ " locally as a lease set. sending to " + fromKey.toBase64());
|
||||
sendData(_message.getSearchKey(), ls, fromKey, _message.getReplyTunnel());
|
||||
} else {
|
||||
RouterInfo info = _context.netDb().lookupRouterInfoLocally(_message.getSearchKey());
|
||||
RouterInfo info = getContext().netDb().lookupRouterInfoLocally(_message.getSearchKey());
|
||||
if (info != null) {
|
||||
// send that routerInfo to the _message.getFromHash peer
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
@ -94,7 +94,7 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
|
||||
sendData(_message.getSearchKey(), info, fromKey, _message.getReplyTunnel());
|
||||
} else {
|
||||
// not found locally - return closest peer routerInfo structs
|
||||
Set routerInfoSet = _context.netDb().findNearestRouters(_message.getSearchKey(),
|
||||
Set routerInfoSet = getContext().netDb().findNearestRouters(_message.getSearchKey(),
|
||||
MAX_ROUTERS_RETURNED,
|
||||
_message.getDontIncludePeers());
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
@ -109,7 +109,7 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Sending data matching key key " + key.toBase64() + " to peer " + toPeer.toBase64()
|
||||
+ " tunnel " + replyTunnel);
|
||||
DatabaseStoreMessage msg = new DatabaseStoreMessage(_context);
|
||||
DatabaseStoreMessage msg = new DatabaseStoreMessage(getContext());
|
||||
msg.setKey(key);
|
||||
if (data instanceof LeaseSet) {
|
||||
msg.setLeaseSet((LeaseSet)data);
|
||||
@ -118,8 +118,8 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
|
||||
msg.setRouterInfo((RouterInfo)data);
|
||||
msg.setValueType(DatabaseStoreMessage.KEY_TYPE_ROUTERINFO);
|
||||
}
|
||||
_context.statManager().addRateData("netDb.lookupsMatched", 1, 0);
|
||||
_context.statManager().addRateData("netDb.lookupsHandled", 1, 0);
|
||||
getContext().statManager().addRateData("netDb.lookupsMatched", 1, 0);
|
||||
getContext().statManager().addRateData("netDb.lookupsHandled", 1, 0);
|
||||
sendMessage(msg, toPeer, replyTunnel);
|
||||
}
|
||||
|
||||
@ -127,15 +127,15 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Sending closest routers to key " + key.toBase64() + ": # peers = "
|
||||
+ routerInfoSet.size() + " tunnel " + replyTunnel);
|
||||
DatabaseSearchReplyMessage msg = new DatabaseSearchReplyMessage(_context);
|
||||
msg.setFromHash(_context.router().getRouterInfo().getIdentity().getHash());
|
||||
DatabaseSearchReplyMessage msg = new DatabaseSearchReplyMessage(getContext());
|
||||
msg.setFromHash(getContext().router().getRouterInfo().getIdentity().getHash());
|
||||
msg.setSearchKey(key);
|
||||
if (routerInfoSet.size() <= 0) {
|
||||
// always include something, so lets toss ourselves in there
|
||||
routerInfoSet.add(_context.router().getRouterInfo());
|
||||
routerInfoSet.add(getContext().router().getRouterInfo());
|
||||
}
|
||||
msg.addReplies(routerInfoSet);
|
||||
_context.statManager().addRateData("netDb.lookupsHandled", 1, 0);
|
||||
getContext().statManager().addRateData("netDb.lookupsHandled", 1, 0);
|
||||
sendMessage(msg, toPeer, replyTunnel); // should this go via garlic messages instead?
|
||||
}
|
||||
|
||||
@ -146,21 +146,21 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
|
||||
} else {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Sending reply directly to " + toPeer);
|
||||
send = new SendMessageDirectJob(_context, message, toPeer, REPLY_TIMEOUT+_context.clock().now(), MESSAGE_PRIORITY);
|
||||
send = new SendMessageDirectJob(getContext(), message, toPeer, REPLY_TIMEOUT+getContext().clock().now(), MESSAGE_PRIORITY);
|
||||
}
|
||||
|
||||
_context.netDb().lookupRouterInfo(toPeer, send, null, REPLY_TIMEOUT);
|
||||
getContext().netDb().lookupRouterInfo(toPeer, send, null, REPLY_TIMEOUT);
|
||||
}
|
||||
|
||||
private void sendThroughTunnel(I2NPMessage message, Hash toPeer, TunnelId replyTunnel) {
|
||||
TunnelInfo info = _context.tunnelManager().getTunnelInfo(replyTunnel);
|
||||
TunnelInfo info = getContext().tunnelManager().getTunnelInfo(replyTunnel);
|
||||
|
||||
// the sendTunnelMessageJob can't handle injecting into the tunnel anywhere but the beginning
|
||||
// (and if we are the beginning, we have the signing key)
|
||||
if ( (info == null) || (info.getSigningKey() != null)) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Sending reply through " + replyTunnel + " on " + toPeer);
|
||||
_context.jobQueue().addJob(new SendTunnelMessageJob(_context, message, replyTunnel, toPeer, null, null, null, null, null, REPLY_TIMEOUT, MESSAGE_PRIORITY));
|
||||
getContext().jobQueue().addJob(new SendTunnelMessageJob(getContext(), message, replyTunnel, toPeer, null, null, null, null, null, REPLY_TIMEOUT, MESSAGE_PRIORITY));
|
||||
} else {
|
||||
// its a tunnel we're participating in, but we're NOT the gateway, so
|
||||
sendToGateway(message, toPeer, replyTunnel, info);
|
||||
@ -177,19 +177,19 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
|
||||
return;
|
||||
}
|
||||
|
||||
long expiration = REPLY_TIMEOUT + _context.clock().now();
|
||||
long expiration = REPLY_TIMEOUT + getContext().clock().now();
|
||||
|
||||
TunnelMessage msg = new TunnelMessage(_context);
|
||||
TunnelMessage msg = new TunnelMessage(getContext());
|
||||
try {
|
||||
ByteArrayOutputStream baos = new ByteArrayOutputStream(1024);
|
||||
message.writeBytes(baos);
|
||||
msg.setData(baos.toByteArray());
|
||||
msg.setTunnelId(replyTunnel);
|
||||
msg.setMessageExpiration(new Date(expiration));
|
||||
_context.jobQueue().addJob(new SendMessageDirectJob(_context, msg, toPeer, null, null, null, null, expiration, MESSAGE_PRIORITY));
|
||||
getContext().jobQueue().addJob(new SendMessageDirectJob(getContext(), msg, toPeer, null, null, null, null, expiration, MESSAGE_PRIORITY));
|
||||
|
||||
String bodyType = message.getClass().getName();
|
||||
_context.messageHistory().wrap(bodyType, message.getUniqueId(), TunnelMessage.class.getName(), msg.getUniqueId());
|
||||
getContext().messageHistory().wrap(bodyType, message.getUniqueId(), TunnelMessage.class.getName(), msg.getUniqueId());
|
||||
} catch (IOException ioe) {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Error writing out the tunnel message to send to the tunnel", ioe);
|
||||
@ -202,7 +202,7 @@ public class HandleDatabaseLookupMessageJob extends JobImpl {
|
||||
public String getName() { return "Handle Database Lookup Message"; }
|
||||
|
||||
public void dropped() {
|
||||
_context.messageHistory().messageProcessingError(_message.getUniqueId(),
|
||||
getContext().messageHistory().messageProcessingError(_message.getUniqueId(),
|
||||
_message.getClass().getName(),
|
||||
"Dropped due to overload");
|
||||
}
|
||||
|
@ -38,7 +38,7 @@ public class HandleDatabaseSearchReplyMessageJob extends JobImpl {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Handling database search reply message for key " + _message.getSearchKey().toBase64() + " with " + _message.getNumReplies() + " replies");
|
||||
if (_message.getNumReplies() > 0)
|
||||
_context.jobQueue().addJob(new HandlePeerJob(0));
|
||||
getContext().jobQueue().addJob(new HandlePeerJob(0));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -49,7 +49,7 @@ public class HandleDatabaseSearchReplyMessageJob extends JobImpl {
|
||||
private final class HandlePeerJob extends JobImpl {
|
||||
private int _curReply;
|
||||
public HandlePeerJob(int reply) {
|
||||
super(HandleDatabaseSearchReplyMessageJob.this._context);
|
||||
super(HandleDatabaseSearchReplyMessageJob.this.getContext());
|
||||
_curReply = reply;
|
||||
}
|
||||
public void runJob() {
|
||||
@ -63,7 +63,7 @@ public class HandleDatabaseSearchReplyMessageJob extends JobImpl {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("On search for " + _message.getSearchKey().toBase64() + ", received " + info.getIdentity().getHash().toBase64());
|
||||
|
||||
HandlePeerJob.this._context.netDb().store(info.getIdentity().getHash(), info);
|
||||
HandlePeerJob.this.getContext().netDb().store(info.getIdentity().getHash(), info);
|
||||
_curReply++;
|
||||
return _message.getNumReplies() > _curReply;
|
||||
}
|
||||
|
@ -50,15 +50,15 @@ public class HandleDatabaseStoreMessageJob extends JobImpl {
|
||||
|
||||
boolean wasNew = false;
|
||||
if (_message.getValueType() == DatabaseStoreMessage.KEY_TYPE_LEASESET) {
|
||||
Object match = _context.netDb().store(_message.getKey(), _message.getLeaseSet());
|
||||
Object match = getContext().netDb().store(_message.getKey(), _message.getLeaseSet());
|
||||
wasNew = (null == match);
|
||||
} else if (_message.getValueType() == DatabaseStoreMessage.KEY_TYPE_ROUTERINFO) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Handling dbStore of router " + _message.getKey() + " with publishDate of "
|
||||
+ new Date(_message.getRouterInfo().getPublished()));
|
||||
Object match = _context.netDb().store(_message.getKey(), _message.getRouterInfo());
|
||||
Object match = getContext().netDb().store(_message.getKey(), _message.getRouterInfo());
|
||||
wasNew = (null == match);
|
||||
_context.profileManager().heardAbout(_message.getKey());
|
||||
getContext().profileManager().heardAbout(_message.getKey());
|
||||
} else {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Invalid DatabaseStoreMessage data type - " + _message.getValueType()
|
||||
@ -71,16 +71,16 @@ public class HandleDatabaseStoreMessageJob extends JobImpl {
|
||||
if (_from != null)
|
||||
_fromHash = _from.getHash();
|
||||
if (_fromHash != null)
|
||||
_context.profileManager().dbStoreReceived(_fromHash, wasNew);
|
||||
_context.statManager().addRateData("netDb.storeHandled", 1, 0);
|
||||
getContext().profileManager().dbStoreReceived(_fromHash, wasNew);
|
||||
getContext().statManager().addRateData("netDb.storeHandled", 1, 0);
|
||||
}
|
||||
|
||||
private void sendAck() {
|
||||
DeliveryStatusMessage msg = new DeliveryStatusMessage(_context);
|
||||
DeliveryStatusMessage msg = new DeliveryStatusMessage(getContext());
|
||||
msg.setMessageId(_message.getReplyToken());
|
||||
msg.setArrival(new Date(_context.clock().now()));
|
||||
msg.setArrival(new Date(getContext().clock().now()));
|
||||
TunnelId outTunnelId = selectOutboundTunnel();
|
||||
_context.jobQueue().addJob(new SendTunnelMessageJob(_context, msg, outTunnelId,
|
||||
getContext().jobQueue().addJob(new SendTunnelMessageJob(getContext(), msg, outTunnelId,
|
||||
_message.getReplyGateway(), _message.getReplyTunnel(),
|
||||
null, null, null, null, ACK_TIMEOUT, ACK_PRIORITY));
|
||||
}
|
||||
@ -92,7 +92,7 @@ public class HandleDatabaseStoreMessageJob extends JobImpl {
|
||||
criteria.setReliabilityPriority(20);
|
||||
criteria.setMaximumTunnelsRequired(1);
|
||||
criteria.setMinimumTunnelsRequired(1);
|
||||
List tunnelIds = _context.tunnelManager().selectOutboundTunnelIds(criteria);
|
||||
List tunnelIds = getContext().tunnelManager().selectOutboundTunnelIds(criteria);
|
||||
if (tunnelIds.size() <= 0) {
|
||||
_log.error("No outbound tunnels?!");
|
||||
return null;
|
||||
@ -104,6 +104,6 @@ public class HandleDatabaseStoreMessageJob extends JobImpl {
|
||||
public String getName() { return "Handle Database Store Message"; }
|
||||
|
||||
public void dropped() {
|
||||
_context.messageHistory().messageProcessingError(_message.getUniqueId(), _message.getClass().getName(), "Dropped due to overload");
|
||||
getContext().messageHistory().messageProcessingError(_message.getUniqueId(), _message.getClass().getName(), "Dropped due to overload");
|
||||
}
|
||||
}
|
||||
|
@ -32,25 +32,25 @@ public class PublishLocalRouterInfoJob extends JobImpl {
|
||||
|
||||
public String getName() { return "Publish Local Router Info"; }
|
||||
public void runJob() {
|
||||
RouterInfo ri = new RouterInfo(_context.router().getRouterInfo());
|
||||
RouterInfo ri = new RouterInfo(getContext().router().getRouterInfo());
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Old routerInfo contains " + ri.getAddresses().size()
|
||||
+ " addresses and " + ri.getOptions().size() + " options");
|
||||
Properties stats = _context.statPublisher().publishStatistics();
|
||||
Properties stats = getContext().statPublisher().publishStatistics();
|
||||
try {
|
||||
ri.setPublished(_context.clock().now());
|
||||
ri.setPublished(getContext().clock().now());
|
||||
ri.setOptions(stats);
|
||||
ri.setAddresses(_context.commSystem().createAddresses());
|
||||
ri.sign(_context.keyManager().getSigningPrivateKey());
|
||||
_context.router().setRouterInfo(ri);
|
||||
ri.setAddresses(getContext().commSystem().createAddresses());
|
||||
ri.sign(getContext().keyManager().getSigningPrivateKey());
|
||||
getContext().router().setRouterInfo(ri);
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Newly updated routerInfo is published with " + stats.size()
|
||||
+ "/" + ri.getOptions().size() + " options on "
|
||||
+ new Date(ri.getPublished()));
|
||||
_context.netDb().publish(ri);
|
||||
getContext().netDb().publish(ri);
|
||||
} catch (DataFormatException dfe) {
|
||||
_log.error("Error signing the updated local router info!", dfe);
|
||||
}
|
||||
requeue(PUBLISH_DELAY + _context.random().nextInt((int)PUBLISH_DELAY));
|
||||
requeue(PUBLISH_DELAY + getContext().random().nextInt((int)PUBLISH_DELAY));
|
||||
}
|
||||
}
|
||||
|
@ -55,8 +55,8 @@ class DataPublisherJob extends JobImpl {
|
||||
new Exception("Publish expired lease?"));
|
||||
}
|
||||
}
|
||||
StoreJob store = new StoreJob(_context, _facade, key, data, null, null, STORE_TIMEOUT);
|
||||
_context.jobQueue().addJob(store);
|
||||
StoreJob store = new StoreJob(getContext(), _facade, key, data, null, null, STORE_TIMEOUT);
|
||||
getContext().jobQueue().addJob(store);
|
||||
}
|
||||
requeue(RERUN_DELAY_MS);
|
||||
}
|
||||
|
@ -118,7 +118,7 @@ class DataRepublishingSelectorJob extends JobImpl {
|
||||
private long rankPublishNeed(Hash key, Long lastPublished) {
|
||||
int bucket = _facade.getKBuckets().pickBucket(key);
|
||||
long sendPeriod = (bucket+1) * RESEND_BUCKET_FACTOR;
|
||||
long now = _context.clock().now();
|
||||
long now = getContext().clock().now();
|
||||
if (lastPublished.longValue() < now-sendPeriod) {
|
||||
RouterInfo ri = _facade.lookupRouterInfoLocally(key);
|
||||
if (ri != null) {
|
||||
@ -158,7 +158,7 @@ class DataRepublishingSelectorJob extends JobImpl {
|
||||
if (_facade.lookupRouterInfoLocally(key) != null) {
|
||||
// randomize the chance of rebroadcast for leases if we haven't
|
||||
// sent it within 5 minutes
|
||||
int val = _context.random().nextInt(LEASE_REBROADCAST_PROBABILITY_SCALE);
|
||||
int val = getContext().random().nextInt(LEASE_REBROADCAST_PROBABILITY_SCALE);
|
||||
if (val <= LEASE_REBROADCAST_PROBABILITY) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Randomized rebroadcast of leases tells us to send "
|
||||
|
@ -69,7 +69,7 @@ class ExpireRoutersJob extends JobImpl {
|
||||
private Set selectKeysToExpire() {
|
||||
Set possible = getNotInUse();
|
||||
Set expiring = new HashSet(16);
|
||||
long earliestPublishDate = _context.clock().now() - EXPIRE_DELAY;
|
||||
long earliestPublishDate = getContext().clock().now() - EXPIRE_DELAY;
|
||||
|
||||
for (Iterator iter = possible.iterator(); iter.hasNext(); ) {
|
||||
Hash key = (Hash)iter.next();
|
||||
@ -94,7 +94,7 @@ class ExpireRoutersJob extends JobImpl {
|
||||
Set possible = new HashSet(16);
|
||||
for (Iterator iter = _facade.getAllRouters().iterator(); iter.hasNext(); ) {
|
||||
Hash peer = (Hash)iter.next();
|
||||
if (!_context.tunnelManager().isInUse(peer)) {
|
||||
if (!getContext().tunnelManager().isInUse(peer)) {
|
||||
possible.add(peer);
|
||||
} else {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
|
@ -68,7 +68,7 @@ class ExploreJob extends SearchJob {
|
||||
* @param expiration when the search should stop
|
||||
*/
|
||||
protected DatabaseLookupMessage buildMessage(TunnelId replyTunnelId, RouterInfo replyGateway, long expiration) {
|
||||
DatabaseLookupMessage msg = new DatabaseLookupMessage(_context);
|
||||
DatabaseLookupMessage msg = new DatabaseLookupMessage(getContext());
|
||||
msg.setSearchKey(getState().getTarget());
|
||||
msg.setFrom(replyGateway);
|
||||
msg.setDontIncludePeers(getState().getAttempted());
|
||||
@ -95,7 +95,7 @@ class ExploreJob extends SearchJob {
|
||||
*
|
||||
*/
|
||||
protected DatabaseLookupMessage buildMessage(long expiration) {
|
||||
return buildMessage(null, _context.router().getRouterInfo(), expiration);
|
||||
return buildMessage(null, getContext().router().getRouterInfo(), expiration);
|
||||
}
|
||||
|
||||
/** max # of concurrent searches */
|
||||
@ -110,7 +110,7 @@ class ExploreJob extends SearchJob {
|
||||
protected void newPeersFound(int numNewPeers) {
|
||||
// who cares about how many new peers. well, maybe we do. but for now,
|
||||
// we'll do the simplest thing that could possibly work.
|
||||
_facade.setLastExploreNewDate(_context.clock().now());
|
||||
_facade.setLastExploreNewDate(getContext().clock().now());
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -36,14 +36,14 @@ public class RepublishLeaseSetJob extends JobImpl {
|
||||
public String getName() { return "Republish a local leaseSet"; }
|
||||
public void runJob() {
|
||||
try {
|
||||
if (_context.clientManager().isLocal(_dest)) {
|
||||
if (getContext().clientManager().isLocal(_dest)) {
|
||||
LeaseSet ls = _facade.lookupLeaseSetLocally(_dest);
|
||||
if (ls != null) {
|
||||
_log.warn("Client " + _dest + " is local, so we're republishing it");
|
||||
if (!ls.isCurrent(Router.CLOCK_FUDGE_FACTOR)) {
|
||||
_log.warn("Not publishing a LOCAL lease that isn't current - " + _dest, new Exception("Publish expired LOCAL lease?"));
|
||||
} else {
|
||||
_context.jobQueue().addJob(new StoreJob(_context, _facade, _dest, ls, null, null, REPUBLISH_LEASESET_DELAY));
|
||||
getContext().jobQueue().addJob(new StoreJob(getContext(), _facade, _dest, ls, null, null, REPUBLISH_LEASESET_DELAY));
|
||||
}
|
||||
} else {
|
||||
_log.warn("Client " + _dest + " is local, but we can't find a valid LeaseSet? perhaps its being rebuilt?");
|
||||
|
@ -71,21 +71,21 @@ class SearchJob extends JobImpl {
|
||||
super(context);
|
||||
if ( (key == null) || (key.getData() == null) )
|
||||
throw new IllegalArgumentException("Search for null key? wtf");
|
||||
_log = _context.logManager().getLog(SearchJob.class);
|
||||
_log = getContext().logManager().getLog(SearchJob.class);
|
||||
_facade = facade;
|
||||
_state = new SearchState(_context, key);
|
||||
_state = new SearchState(getContext(), key);
|
||||
_onSuccess = onSuccess;
|
||||
_onFailure = onFailure;
|
||||
_timeoutMs = timeoutMs;
|
||||
_keepStats = keepStats;
|
||||
_isLease = isLease;
|
||||
_peerSelector = new PeerSelector(_context);
|
||||
_expiration = _context.clock().now() + timeoutMs;
|
||||
_context.statManager().createRateStat("netDb.successTime", "How long a successful search takes", "Network Database", new long[] { 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("netDb.failedTime", "How long a failed search takes", "Network Database", new long[] { 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("netDb.successPeers", "How many peers are contacted in a successful search", "Network Database", new long[] { 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("netDb.failedPeers", "How many peers fail to respond to a lookup?", "Network Database", new long[] { 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("netDb.searchCount", "Overall number of searches sent", "Network Database", new long[] { 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
_peerSelector = new PeerSelector(getContext());
|
||||
_expiration = getContext().clock().now() + timeoutMs;
|
||||
getContext().statManager().createRateStat("netDb.successTime", "How long a successful search takes", "Network Database", new long[] { 60*60*1000l, 24*60*60*1000l });
|
||||
getContext().statManager().createRateStat("netDb.failedTime", "How long a failed search takes", "Network Database", new long[] { 60*60*1000l, 24*60*60*1000l });
|
||||
getContext().statManager().createRateStat("netDb.successPeers", "How many peers are contacted in a successful search", "Network Database", new long[] { 60*60*1000l, 24*60*60*1000l });
|
||||
getContext().statManager().createRateStat("netDb.failedPeers", "How many peers fail to respond to a lookup?", "Network Database", new long[] { 60*60*1000l, 24*60*60*1000l });
|
||||
getContext().statManager().createRateStat("netDb.searchCount", "Overall number of searches sent", "Network Database", new long[] { 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l });
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Search (" + getClass().getName() + " for " + key.toBase64(), new Exception("Search enqueued by"));
|
||||
}
|
||||
@ -93,7 +93,7 @@ class SearchJob extends JobImpl {
|
||||
public void runJob() {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info(getJobId() + ": Searching for " + _state.getTarget()); // , getAddedBy());
|
||||
_context.statManager().addRateData("netDb.searchCount", 1, 0);
|
||||
getContext().statManager().addRateData("netDb.searchCount", 1, 0);
|
||||
searchNext();
|
||||
}
|
||||
|
||||
@ -136,7 +136,7 @@ class SearchJob extends JobImpl {
|
||||
private boolean isLocal() { return _facade.getDataStore().isKnown(_state.getTarget()); }
|
||||
|
||||
private boolean isExpired() {
|
||||
return _context.clock().now() >= _expiration;
|
||||
return getContext().clock().now() >= _expiration;
|
||||
}
|
||||
|
||||
/** max # of concurrent searches */
|
||||
@ -199,15 +199,15 @@ class SearchJob extends JobImpl {
|
||||
private void requeuePending() {
|
||||
if (_pendingRequeueJob == null)
|
||||
_pendingRequeueJob = new RequeuePending();
|
||||
long now = _context.clock().now();
|
||||
long now = getContext().clock().now();
|
||||
if (_pendingRequeueJob.getTiming().getStartAfter() < now)
|
||||
_pendingRequeueJob.getTiming().setStartAfter(now+5*1000);
|
||||
_context.jobQueue().addJob(_pendingRequeueJob);
|
||||
getContext().jobQueue().addJob(_pendingRequeueJob);
|
||||
}
|
||||
|
||||
private class RequeuePending extends JobImpl {
|
||||
public RequeuePending() {
|
||||
super(SearchJob.this._context);
|
||||
super(SearchJob.this.getContext());
|
||||
}
|
||||
public String getName() { return "Requeue search with pending"; }
|
||||
public void runJob() { searchNext(); }
|
||||
@ -220,7 +220,7 @@ class SearchJob extends JobImpl {
|
||||
* @return ordered list of Hash objects
|
||||
*/
|
||||
private List getClosestRouters(Hash key, int numClosest, Set alreadyChecked) {
|
||||
Hash rkey = _context.routingKeyGenerator().getRoutingKey(key);
|
||||
Hash rkey = getContext().routingKeyGenerator().getRoutingKey(key);
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug(getJobId() + ": Current routing key for " + key + ": " + rkey);
|
||||
return _peerSelector.selectNearestExplicit(rkey, numClosest, alreadyChecked, _facade.getKBuckets());
|
||||
@ -231,7 +231,7 @@ class SearchJob extends JobImpl {
|
||||
*
|
||||
*/
|
||||
protected void sendSearch(RouterInfo router) {
|
||||
if (router.getIdentity().equals(_context.router().getRouterInfo().getIdentity())) {
|
||||
if (router.getIdentity().equals(getContext().router().getRouterInfo().getIdentity())) {
|
||||
// don't search ourselves
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error(getJobId() + ": Dont send search to ourselves - why did we try?");
|
||||
@ -257,26 +257,26 @@ class SearchJob extends JobImpl {
|
||||
TunnelId inTunnelId = getInboundTunnelId();
|
||||
if (inTunnelId == null) {
|
||||
_log.error("No tunnels to get search replies through! wtf!");
|
||||
_context.jobQueue().addJob(new FailedJob(router));
|
||||
getContext().jobQueue().addJob(new FailedJob(router));
|
||||
return;
|
||||
}
|
||||
|
||||
TunnelInfo inTunnel = _context.tunnelManager().getTunnelInfo(inTunnelId);
|
||||
RouterInfo inGateway = _context.netDb().lookupRouterInfoLocally(inTunnel.getThisHop());
|
||||
TunnelInfo inTunnel = getContext().tunnelManager().getTunnelInfo(inTunnelId);
|
||||
RouterInfo inGateway = getContext().netDb().lookupRouterInfoLocally(inTunnel.getThisHop());
|
||||
if (inGateway == null) {
|
||||
_log.error("We can't find the gateway to our inbound tunnel?! wtf");
|
||||
_context.jobQueue().addJob(new FailedJob(router));
|
||||
getContext().jobQueue().addJob(new FailedJob(router));
|
||||
return;
|
||||
}
|
||||
|
||||
long expiration = _context.clock().now() + PER_PEER_TIMEOUT; // getTimeoutMs();
|
||||
long expiration = getContext().clock().now() + PER_PEER_TIMEOUT; // getTimeoutMs();
|
||||
|
||||
DatabaseLookupMessage msg = buildMessage(inTunnelId, inGateway, expiration);
|
||||
|
||||
TunnelId outTunnelId = getOutboundTunnelId();
|
||||
if (outTunnelId == null) {
|
||||
_log.error("No tunnels to send search out through! wtf!");
|
||||
_context.jobQueue().addJob(new FailedJob(router));
|
||||
getContext().jobQueue().addJob(new FailedJob(router));
|
||||
return;
|
||||
}
|
||||
|
||||
@ -286,18 +286,18 @@ class SearchJob extends JobImpl {
|
||||
+ msg.getFrom().getIdentity().getHash().toBase64() + "] via tunnel ["
|
||||
+ msg.getReplyTunnel() + "]");
|
||||
|
||||
SearchMessageSelector sel = new SearchMessageSelector(_context, router, _expiration, _state);
|
||||
SearchMessageSelector sel = new SearchMessageSelector(getContext(), router, _expiration, _state);
|
||||
long timeoutMs = PER_PEER_TIMEOUT; // getTimeoutMs();
|
||||
SearchUpdateReplyFoundJob reply = new SearchUpdateReplyFoundJob(_context, router, _state, _facade, this);
|
||||
SendTunnelMessageJob j = new SendTunnelMessageJob(_context, msg, outTunnelId, router.getIdentity().getHash(),
|
||||
SearchUpdateReplyFoundJob reply = new SearchUpdateReplyFoundJob(getContext(), router, _state, _facade, this);
|
||||
SendTunnelMessageJob j = new SendTunnelMessageJob(getContext(), msg, outTunnelId, router.getIdentity().getHash(),
|
||||
null, null, reply, new FailedJob(router), sel,
|
||||
timeoutMs, SEARCH_PRIORITY);
|
||||
_context.jobQueue().addJob(j);
|
||||
getContext().jobQueue().addJob(j);
|
||||
}
|
||||
|
||||
/** we're searching for a router, so we can just send direct */
|
||||
protected void sendRouterSearch(RouterInfo router) {
|
||||
long expiration = _context.clock().now() + PER_PEER_TIMEOUT; // getTimeoutMs();
|
||||
long expiration = getContext().clock().now() + PER_PEER_TIMEOUT; // getTimeoutMs();
|
||||
|
||||
DatabaseLookupMessage msg = buildMessage(expiration);
|
||||
|
||||
@ -305,12 +305,12 @@ class SearchJob extends JobImpl {
|
||||
_log.info(getJobId() + ": Sending router search to " + router.getIdentity().getHash().toBase64()
|
||||
+ " for " + msg.getSearchKey().toBase64() + " w/ replies to us ["
|
||||
+ msg.getFrom().getIdentity().getHash().toBase64() + "]");
|
||||
SearchMessageSelector sel = new SearchMessageSelector(_context, router, _expiration, _state);
|
||||
SearchMessageSelector sel = new SearchMessageSelector(getContext(), router, _expiration, _state);
|
||||
long timeoutMs = PER_PEER_TIMEOUT;
|
||||
SearchUpdateReplyFoundJob reply = new SearchUpdateReplyFoundJob(_context, router, _state, _facade, this);
|
||||
SendMessageDirectJob j = new SendMessageDirectJob(_context, msg, router.getIdentity().getHash(),
|
||||
SearchUpdateReplyFoundJob reply = new SearchUpdateReplyFoundJob(getContext(), router, _state, _facade, this);
|
||||
SendMessageDirectJob j = new SendMessageDirectJob(getContext(), msg, router.getIdentity().getHash(),
|
||||
reply, new FailedJob(router), sel, expiration, SEARCH_PRIORITY);
|
||||
_context.jobQueue().addJob(j);
|
||||
getContext().jobQueue().addJob(j);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -322,7 +322,7 @@ class SearchJob extends JobImpl {
|
||||
TunnelSelectionCriteria crit = new TunnelSelectionCriteria();
|
||||
crit.setMaximumTunnelsRequired(1);
|
||||
crit.setMinimumTunnelsRequired(1);
|
||||
List tunnelIds = _context.tunnelManager().selectOutboundTunnelIds(crit);
|
||||
List tunnelIds = getContext().tunnelManager().selectOutboundTunnelIds(crit);
|
||||
if (tunnelIds.size() <= 0) {
|
||||
return null;
|
||||
}
|
||||
@ -339,7 +339,7 @@ class SearchJob extends JobImpl {
|
||||
TunnelSelectionCriteria crit = new TunnelSelectionCriteria();
|
||||
crit.setMaximumTunnelsRequired(1);
|
||||
crit.setMinimumTunnelsRequired(1);
|
||||
List tunnelIds = _context.tunnelManager().selectInboundTunnelIds(crit);
|
||||
List tunnelIds = getContext().tunnelManager().selectInboundTunnelIds(crit);
|
||||
if (tunnelIds.size() <= 0) {
|
||||
return null;
|
||||
}
|
||||
@ -354,7 +354,7 @@ class SearchJob extends JobImpl {
|
||||
* @param expiration when the search should stop
|
||||
*/
|
||||
protected DatabaseLookupMessage buildMessage(TunnelId replyTunnelId, RouterInfo replyGateway, long expiration) {
|
||||
DatabaseLookupMessage msg = new DatabaseLookupMessage(_context);
|
||||
DatabaseLookupMessage msg = new DatabaseLookupMessage(getContext());
|
||||
msg.setSearchKey(_state.getTarget());
|
||||
msg.setFrom(replyGateway);
|
||||
msg.setDontIncludePeers(_state.getAttempted());
|
||||
@ -369,9 +369,9 @@ class SearchJob extends JobImpl {
|
||||
*
|
||||
*/
|
||||
protected DatabaseLookupMessage buildMessage(long expiration) {
|
||||
DatabaseLookupMessage msg = new DatabaseLookupMessage(_context);
|
||||
DatabaseLookupMessage msg = new DatabaseLookupMessage(getContext());
|
||||
msg.setSearchKey(_state.getTarget());
|
||||
msg.setFrom(_context.router().getRouterInfo());
|
||||
msg.setFrom(getContext().router().getRouterInfo());
|
||||
msg.setDontIncludePeers(_state.getAttempted());
|
||||
msg.setMessageExpiration(new Date(expiration));
|
||||
msg.setReplyTunnel(null);
|
||||
@ -381,7 +381,7 @@ class SearchJob extends JobImpl {
|
||||
void replyFound(DatabaseSearchReplyMessage message, Hash peer) {
|
||||
long duration = _state.replyFound(peer);
|
||||
// this processing can take a while, so split 'er up
|
||||
_context.jobQueue().addJob(new SearchReplyJob((DatabaseSearchReplyMessage)message, peer, duration));
|
||||
getContext().jobQueue().addJob(new SearchReplyJob((DatabaseSearchReplyMessage)message, peer, duration));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -403,7 +403,7 @@ class SearchJob extends JobImpl {
|
||||
private int _duplicatePeers;
|
||||
private long _duration;
|
||||
public SearchReplyJob(DatabaseSearchReplyMessage message, Hash peer, long duration) {
|
||||
super(SearchJob.this._context);
|
||||
super(SearchJob.this.getContext());
|
||||
_msg = message;
|
||||
_peer = peer;
|
||||
_curIndex = 0;
|
||||
@ -415,7 +415,7 @@ class SearchJob extends JobImpl {
|
||||
public String getName() { return "Process Reply for Kademlia Search"; }
|
||||
public void runJob() {
|
||||
if (_curIndex >= _msg.getNumReplies()) {
|
||||
_context.profileManager().dbLookupReply(_peer, _newPeers, _seenPeers,
|
||||
getContext().profileManager().dbLookupReply(_peer, _newPeers, _seenPeers,
|
||||
_invalidPeers, _duplicatePeers, _duration);
|
||||
if (_newPeers > 0)
|
||||
newPeersFound(_newPeers);
|
||||
@ -462,7 +462,7 @@ class SearchJob extends JobImpl {
|
||||
*
|
||||
*/
|
||||
public FailedJob(RouterInfo peer, boolean penalizePeer) {
|
||||
super(SearchJob.this._context);
|
||||
super(SearchJob.this.getContext());
|
||||
_penalizePeer = penalizePeer;
|
||||
_peer = peer.getIdentity().getHash();
|
||||
}
|
||||
@ -471,12 +471,12 @@ class SearchJob extends JobImpl {
|
||||
if (_penalizePeer) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Penalizing peer for timeout on search: " + _peer.toBase64());
|
||||
_context.profileManager().dbLookupFailed(_peer);
|
||||
getContext().profileManager().dbLookupFailed(_peer);
|
||||
} else {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("NOT (!!) Penalizing peer for timeout on search: " + _peer.toBase64());
|
||||
}
|
||||
_context.statManager().addRateData("netDb.failedPeers", 1, 0);
|
||||
getContext().statManager().addRateData("netDb.failedPeers", 1, 0);
|
||||
searchNext();
|
||||
}
|
||||
public String getName() { return "Kademlia Search Failed"; }
|
||||
@ -493,12 +493,12 @@ class SearchJob extends JobImpl {
|
||||
_log.debug(getJobId() + ": State of successful search: " + _state);
|
||||
|
||||
if (_keepStats) {
|
||||
long time = _context.clock().now() - _state.getWhenStarted();
|
||||
_context.statManager().addRateData("netDb.successTime", time, 0);
|
||||
_context.statManager().addRateData("netDb.successPeers", _state.getAttempted().size(), time);
|
||||
long time = getContext().clock().now() - _state.getWhenStarted();
|
||||
getContext().statManager().addRateData("netDb.successTime", time, 0);
|
||||
getContext().statManager().addRateData("netDb.successPeers", _state.getAttempted().size(), time);
|
||||
}
|
||||
if (_onSuccess != null)
|
||||
_context.jobQueue().addJob(_onSuccess);
|
||||
getContext().jobQueue().addJob(_onSuccess);
|
||||
|
||||
resend();
|
||||
}
|
||||
@ -513,7 +513,7 @@ class SearchJob extends JobImpl {
|
||||
if (ds == null)
|
||||
ds = _facade.lookupRouterInfoLocally(_state.getTarget());
|
||||
if (ds != null)
|
||||
_context.jobQueue().addJob(new StoreJob(_context, _facade, _state.getTarget(),
|
||||
getContext().jobQueue().addJob(new StoreJob(getContext(), _facade, _state.getTarget(),
|
||||
ds, null, null, RESEND_TIMEOUT,
|
||||
_state.getSuccessful()));
|
||||
}
|
||||
@ -528,11 +528,11 @@ class SearchJob extends JobImpl {
|
||||
_log.debug(getJobId() + ": State of failed search: " + _state);
|
||||
|
||||
if (_keepStats) {
|
||||
long time = _context.clock().now() - _state.getWhenStarted();
|
||||
_context.statManager().addRateData("netDb.failedTime", time, 0);
|
||||
long time = getContext().clock().now() - _state.getWhenStarted();
|
||||
getContext().statManager().addRateData("netDb.failedTime", time, 0);
|
||||
}
|
||||
if (_onFailure != null)
|
||||
_context.jobQueue().addJob(_onFailure);
|
||||
getContext().jobQueue().addJob(_onFailure);
|
||||
}
|
||||
|
||||
public String getName() { return "Kademlia NetDb Search"; }
|
||||
|
@ -58,7 +58,7 @@ class SearchUpdateReplyFoundJob extends JobImpl implements ReplyJob {
|
||||
_log.error(getJobId() + ": Unknown db store type?!@ " + msg.getValueType());
|
||||
}
|
||||
|
||||
_context.profileManager().dbLookupSuccessful(_peer, timeToReply);
|
||||
getContext().profileManager().dbLookupSuccessful(_peer, timeToReply);
|
||||
} else if (_message instanceof DatabaseSearchReplyMessage) {
|
||||
_job.replyFound((DatabaseSearchReplyMessage)_message, _peer);
|
||||
} else {
|
||||
|
@ -47,7 +47,7 @@ class StartExplorersJob extends JobImpl {
|
||||
for (Iterator iter = toExplore.iterator(); iter.hasNext(); ) {
|
||||
Hash key = (Hash)iter.next();
|
||||
//_log.info("Starting explorer for " + key, new Exception("Exploring!"));
|
||||
_context.jobQueue().addJob(new ExploreJob(_context, _facade, key));
|
||||
getContext().jobQueue().addJob(new ExploreJob(getContext(), _facade, key));
|
||||
}
|
||||
long delay = getNextRunDelay();
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
@ -63,12 +63,12 @@ class StartExplorersJob extends JobImpl {
|
||||
long delay = getNextRunDelay();
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Updating exploration schedule with a delay of " + delay);
|
||||
getTiming().setStartAfter(_context.clock().now() + delay);
|
||||
getTiming().setStartAfter(getContext().clock().now() + delay);
|
||||
}
|
||||
|
||||
/** how long should we wait before exploring? */
|
||||
private long getNextRunDelay() {
|
||||
long delay = _context.clock().now() - _facade.getLastExploreNewDate();
|
||||
long delay = getContext().clock().now() - _facade.getLastExploreNewDate();
|
||||
if (delay < MIN_RERUN_DELAY_MS)
|
||||
return MIN_RERUN_DELAY_MS;
|
||||
else if (delay > MAX_RERUN_DELAY_MS)
|
||||
|
@ -73,11 +73,11 @@ class StoreJob extends JobImpl {
|
||||
DataStructure data, Job onSuccess, Job onFailure, long timeoutMs, Set toSkip) {
|
||||
super(context);
|
||||
_log = context.logManager().getLog(StoreJob.class);
|
||||
_context.statManager().createRateStat("netDb.storeSent", "How many netDb store messages have we sent?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("netDb.storePeers", "How many peers each netDb must be sent to before success?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_context.statManager().createRateStat("netDb.ackTime", "How long does it take for a peer to ack a netDb store?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
getContext().statManager().createRateStat("netDb.storeSent", "How many netDb store messages have we sent?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
getContext().statManager().createRateStat("netDb.storePeers", "How many peers each netDb must be sent to before success?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
getContext().statManager().createRateStat("netDb.ackTime", "How long does it take for a peer to ack a netDb store?", "Network Database", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
|
||||
_facade = facade;
|
||||
_state = new StoreState(_context, key, data, toSkip);
|
||||
_state = new StoreState(getContext(), key, data, toSkip);
|
||||
_onSuccess = onSuccess;
|
||||
_onFailure = onFailure;
|
||||
_timeoutMs = timeoutMs;
|
||||
@ -91,7 +91,7 @@ class StoreJob extends JobImpl {
|
||||
}
|
||||
|
||||
private boolean isExpired() {
|
||||
return _context.clock().now() >= _expiration;
|
||||
return getContext().clock().now() >= _expiration;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -168,7 +168,7 @@ class StoreJob extends JobImpl {
|
||||
* @return ordered list of Hash objects
|
||||
*/
|
||||
private List getClosestRouters(Hash key, int numClosest, Set alreadyChecked) {
|
||||
Hash rkey = _context.routingKeyGenerator().getRoutingKey(key);
|
||||
Hash rkey = getContext().routingKeyGenerator().getRoutingKey(key);
|
||||
//if (_log.shouldLog(Log.DEBUG))
|
||||
// _log.debug(getJobId() + ": Current routing key for " + key + ": " + rkey);
|
||||
|
||||
@ -181,7 +181,7 @@ class StoreJob extends JobImpl {
|
||||
*
|
||||
*/
|
||||
private void sendStore(RouterInfo router) {
|
||||
DatabaseStoreMessage msg = new DatabaseStoreMessage(_context);
|
||||
DatabaseStoreMessage msg = new DatabaseStoreMessage(getContext());
|
||||
msg.setKey(_state.getTarget());
|
||||
if (_state.getData() instanceof RouterInfo)
|
||||
msg.setRouterInfo((RouterInfo)_state.getData());
|
||||
@ -189,9 +189,9 @@ class StoreJob extends JobImpl {
|
||||
msg.setLeaseSet((LeaseSet)_state.getData());
|
||||
else
|
||||
throw new IllegalArgumentException("Storing an unknown data type! " + _state.getData());
|
||||
msg.setMessageExpiration(new Date(_context.clock().now() + _timeoutMs));
|
||||
msg.setMessageExpiration(new Date(getContext().clock().now() + _timeoutMs));
|
||||
|
||||
if (router.getIdentity().equals(_context.router().getRouterInfo().getIdentity())) {
|
||||
if (router.getIdentity().equals(getContext().router().getRouterInfo().getIdentity())) {
|
||||
// don't send it to ourselves
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error(getJobId() + ": Dont send store to ourselves - why did we try?");
|
||||
@ -205,15 +205,15 @@ class StoreJob extends JobImpl {
|
||||
}
|
||||
|
||||
private void sendStore(DatabaseStoreMessage msg, RouterInfo peer, long expiration) {
|
||||
_context.statManager().addRateData("netDb.storeSent", 1, 0);
|
||||
getContext().statManager().addRateData("netDb.storeSent", 1, 0);
|
||||
sendStoreThroughGarlic(msg, peer, expiration);
|
||||
}
|
||||
|
||||
private void sendStoreThroughGarlic(DatabaseStoreMessage msg, RouterInfo peer, long expiration) {
|
||||
long token = _context.random().nextLong(I2NPMessage.MAX_ID_VALUE);
|
||||
long token = getContext().random().nextLong(I2NPMessage.MAX_ID_VALUE);
|
||||
|
||||
TunnelId replyTunnelId = selectInboundTunnel();
|
||||
TunnelInfo replyTunnel = _context.tunnelManager().getTunnelInfo(replyTunnelId);
|
||||
TunnelInfo replyTunnel = getContext().tunnelManager().getTunnelInfo(replyTunnelId);
|
||||
if (replyTunnel == null) {
|
||||
_log.error("No reply inbound tunnels available!");
|
||||
return;
|
||||
@ -229,7 +229,7 @@ class StoreJob extends JobImpl {
|
||||
|
||||
SendSuccessJob onReply = new SendSuccessJob(peer);
|
||||
FailedJob onFail = new FailedJob(peer);
|
||||
StoreMessageSelector selector = new StoreMessageSelector(_context, getJobId(), peer, token, expiration);
|
||||
StoreMessageSelector selector = new StoreMessageSelector(getContext(), getJobId(), peer, token, expiration);
|
||||
|
||||
TunnelId outTunnelId = selectOutboundTunnel();
|
||||
if (outTunnelId != null) {
|
||||
@ -238,12 +238,12 @@ class StoreJob extends JobImpl {
|
||||
// + peer.getIdentity().getHash().toBase64());
|
||||
TunnelId targetTunnelId = null; // not needed
|
||||
Job onSend = null; // not wanted
|
||||
SendTunnelMessageJob j = new SendTunnelMessageJob(_context, msg, outTunnelId,
|
||||
SendTunnelMessageJob j = new SendTunnelMessageJob(getContext(), msg, outTunnelId,
|
||||
peer.getIdentity().getHash(),
|
||||
targetTunnelId, onSend, onReply,
|
||||
onFail, selector, STORE_TIMEOUT_MS,
|
||||
STORE_PRIORITY);
|
||||
_context.jobQueue().addJob(j);
|
||||
getContext().jobQueue().addJob(j);
|
||||
} else {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("No outbound tunnels to send a dbStore out!");
|
||||
@ -258,7 +258,7 @@ class StoreJob extends JobImpl {
|
||||
criteria.setReliabilityPriority(20);
|
||||
criteria.setMaximumTunnelsRequired(1);
|
||||
criteria.setMinimumTunnelsRequired(1);
|
||||
List tunnelIds = _context.tunnelManager().selectOutboundTunnelIds(criteria);
|
||||
List tunnelIds = getContext().tunnelManager().selectOutboundTunnelIds(criteria);
|
||||
if (tunnelIds.size() <= 0) {
|
||||
_log.error("No outbound tunnels?!");
|
||||
return null;
|
||||
@ -274,7 +274,7 @@ class StoreJob extends JobImpl {
|
||||
criteria.setReliabilityPriority(20);
|
||||
criteria.setMaximumTunnelsRequired(1);
|
||||
criteria.setMinimumTunnelsRequired(1);
|
||||
List tunnelIds = _context.tunnelManager().selectInboundTunnelIds(criteria);
|
||||
List tunnelIds = getContext().tunnelManager().selectInboundTunnelIds(criteria);
|
||||
if (tunnelIds.size() <= 0) {
|
||||
_log.error("No inbound tunnels?!");
|
||||
return null;
|
||||
@ -292,7 +292,7 @@ class StoreJob extends JobImpl {
|
||||
private RouterInfo _peer;
|
||||
|
||||
public SendSuccessJob(RouterInfo peer) {
|
||||
super(StoreJob.this._context);
|
||||
super(StoreJob.this.getContext());
|
||||
_peer = peer;
|
||||
}
|
||||
|
||||
@ -302,8 +302,8 @@ class StoreJob extends JobImpl {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info(StoreJob.this.getJobId() + ": Marking store of " + _state.getTarget()
|
||||
+ " to " + _peer.getIdentity().getHash().toBase64() + " successful after " + howLong);
|
||||
_context.profileManager().dbStoreSent(_peer.getIdentity().getHash(), howLong);
|
||||
_context.statManager().addRateData("netDb.ackTime", howLong, howLong);
|
||||
getContext().profileManager().dbStoreSent(_peer.getIdentity().getHash(), howLong);
|
||||
getContext().statManager().addRateData("netDb.ackTime", howLong, howLong);
|
||||
|
||||
if (_state.getSuccessful().size() >= REDUNDANCY) {
|
||||
succeed();
|
||||
@ -326,14 +326,14 @@ class StoreJob extends JobImpl {
|
||||
private RouterInfo _peer;
|
||||
|
||||
public FailedJob(RouterInfo peer) {
|
||||
super(StoreJob.this._context);
|
||||
super(StoreJob.this.getContext());
|
||||
_peer = peer;
|
||||
}
|
||||
public void runJob() {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn(StoreJob.this.getJobId() + ": Peer " + _peer.getIdentity().getHash().toBase64() + " timed out");
|
||||
_state.replyTimeout(_peer.getIdentity().getHash());
|
||||
_context.profileManager().dbStoreFailed(_peer.getIdentity().getHash());
|
||||
getContext().profileManager().dbStoreFailed(_peer.getIdentity().getHash());
|
||||
|
||||
sendNext();
|
||||
}
|
||||
@ -349,9 +349,9 @@ class StoreJob extends JobImpl {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug(getJobId() + ": State of successful send: " + _state);
|
||||
if (_onSuccess != null)
|
||||
_context.jobQueue().addJob(_onSuccess);
|
||||
getContext().jobQueue().addJob(_onSuccess);
|
||||
_facade.noteKeySent(_state.getTarget());
|
||||
_context.statManager().addRateData("netDb.storePeers", _state.getAttempted().size(), _state.getWhenCompleted()-_state.getWhenStarted());
|
||||
getContext().statManager().addRateData("netDb.storePeers", _state.getAttempted().size(), _state.getWhenCompleted()-_state.getWhenStarted());
|
||||
}
|
||||
|
||||
/**
|
||||
@ -363,6 +363,6 @@ class StoreJob extends JobImpl {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug(getJobId() + ": State of failed send: " + _state, new Exception("Who failed me?"));
|
||||
if (_onFailure != null)
|
||||
_context.jobQueue().addJob(_onFailure);
|
||||
getContext().jobQueue().addJob(_onFailure);
|
||||
}
|
||||
}
|
@ -27,18 +27,18 @@ class EvaluateProfilesJob extends JobImpl {
|
||||
public String getName() { return "Evaluate peer profiles"; }
|
||||
public void runJob() {
|
||||
try {
|
||||
long start = _context.clock().now();
|
||||
Set allPeers = _context.profileOrganizer().selectAllPeers();
|
||||
long afterSelect = _context.clock().now();
|
||||
long start = getContext().clock().now();
|
||||
Set allPeers = getContext().profileOrganizer().selectAllPeers();
|
||||
long afterSelect = getContext().clock().now();
|
||||
for (Iterator iter = allPeers.iterator(); iter.hasNext(); ) {
|
||||
Hash peer = (Hash)iter.next();
|
||||
PeerProfile profile = _context.profileOrganizer().getProfile(peer);
|
||||
PeerProfile profile = getContext().profileOrganizer().getProfile(peer);
|
||||
if (profile != null)
|
||||
profile.coallesceStats();
|
||||
}
|
||||
long afterCoallesce = _context.clock().now();
|
||||
_context.profileOrganizer().reorganize();
|
||||
long afterReorganize = _context.clock().now();
|
||||
long afterCoallesce = getContext().clock().now();
|
||||
getContext().profileOrganizer().reorganize();
|
||||
long afterReorganize = getContext().clock().now();
|
||||
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Profiles coallesced and reorganized. total: " + allPeers.size() + ", selectAll: " + (afterSelect-start) + "ms, coallesce: " + (afterCoallesce-afterSelect) + "ms, reorganize: " + (afterReorganize-afterSelect));
|
||||
|
@ -53,7 +53,7 @@ public class PeerTestJob extends JobImpl {
|
||||
public void startTesting(PeerManager manager) {
|
||||
_manager = manager;
|
||||
_keepTesting = true;
|
||||
_context.jobQueue().addJob(this);
|
||||
getContext().jobQueue().addJob(this);
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Start testing peers");
|
||||
}
|
||||
@ -97,7 +97,7 @@ public class PeerTestJob extends JobImpl {
|
||||
Set peers = new HashSet(peerHashes.size());
|
||||
for (Iterator iter = peerHashes.iterator(); iter.hasNext(); ) {
|
||||
Hash peer = (Hash)iter.next();
|
||||
RouterInfo peerInfo = _context.netDb().lookupRouterInfoLocally(peer);
|
||||
RouterInfo peerInfo = getContext().netDb().lookupRouterInfoLocally(peer);
|
||||
if (peerInfo != null) {
|
||||
peers.add(peerInfo);
|
||||
} else {
|
||||
@ -119,17 +119,17 @@ public class PeerTestJob extends JobImpl {
|
||||
return;
|
||||
}
|
||||
|
||||
TunnelInfo inTunnel = _context.tunnelManager().getTunnelInfo(inTunnelId);
|
||||
RouterInfo inGateway = _context.netDb().lookupRouterInfoLocally(inTunnel.getThisHop());
|
||||
TunnelInfo inTunnel = getContext().tunnelManager().getTunnelInfo(inTunnelId);
|
||||
RouterInfo inGateway = getContext().netDb().lookupRouterInfoLocally(inTunnel.getThisHop());
|
||||
if (inGateway == null) {
|
||||
_log.error("We can't find the gateway to our inbound tunnel?! wtf");
|
||||
return;
|
||||
}
|
||||
|
||||
long timeoutMs = getTestTimeout();
|
||||
long expiration = _context.clock().now() + timeoutMs;
|
||||
long expiration = getContext().clock().now() + timeoutMs;
|
||||
|
||||
long nonce = _context.random().nextLong(I2NPMessage.MAX_ID_VALUE);
|
||||
long nonce = getContext().random().nextLong(I2NPMessage.MAX_ID_VALUE);
|
||||
DatabaseStoreMessage msg = buildMessage(peer, inTunnelId, inGateway.getIdentity().getHash(), nonce, expiration);
|
||||
|
||||
TunnelId outTunnelId = getOutboundTunnelId();
|
||||
@ -137,7 +137,7 @@ public class PeerTestJob extends JobImpl {
|
||||
_log.error("No tunnels to send search out through! wtf!");
|
||||
return;
|
||||
}
|
||||
TunnelInfo outTunnel = _context.tunnelManager().getTunnelInfo(outTunnelId);
|
||||
TunnelInfo outTunnel = getContext().tunnelManager().getTunnelInfo(outTunnelId);
|
||||
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug(getJobId() + ": Sending peer test to " + peer.getIdentity().getHash().toBase64()
|
||||
@ -145,12 +145,12 @@ public class PeerTestJob extends JobImpl {
|
||||
+ "] via tunnel [" + msg.getReplyTunnel() + "]");
|
||||
|
||||
ReplySelector sel = new ReplySelector(peer.getIdentity().getHash(), nonce, expiration);
|
||||
PeerReplyFoundJob reply = new PeerReplyFoundJob(_context, peer, inTunnel, outTunnel);
|
||||
PeerReplyTimeoutJob timeoutJob = new PeerReplyTimeoutJob(_context, peer, inTunnel, outTunnel);
|
||||
SendTunnelMessageJob j = new SendTunnelMessageJob(_context, msg, outTunnelId, peer.getIdentity().getHash(),
|
||||
PeerReplyFoundJob reply = new PeerReplyFoundJob(getContext(), peer, inTunnel, outTunnel);
|
||||
PeerReplyTimeoutJob timeoutJob = new PeerReplyTimeoutJob(getContext(), peer, inTunnel, outTunnel);
|
||||
SendTunnelMessageJob j = new SendTunnelMessageJob(getContext(), msg, outTunnelId, peer.getIdentity().getHash(),
|
||||
null, null, reply, timeoutJob, sel,
|
||||
timeoutMs, TEST_PRIORITY);
|
||||
_context.jobQueue().addJob(j);
|
||||
getContext().jobQueue().addJob(j);
|
||||
|
||||
}
|
||||
|
||||
@ -163,7 +163,7 @@ public class PeerTestJob extends JobImpl {
|
||||
TunnelSelectionCriteria crit = new TunnelSelectionCriteria();
|
||||
crit.setMaximumTunnelsRequired(1);
|
||||
crit.setMinimumTunnelsRequired(1);
|
||||
List tunnelIds = _context.tunnelManager().selectOutboundTunnelIds(crit);
|
||||
List tunnelIds = getContext().tunnelManager().selectOutboundTunnelIds(crit);
|
||||
if (tunnelIds.size() <= 0) {
|
||||
return null;
|
||||
}
|
||||
@ -180,7 +180,7 @@ public class PeerTestJob extends JobImpl {
|
||||
TunnelSelectionCriteria crit = new TunnelSelectionCriteria();
|
||||
crit.setMaximumTunnelsRequired(1);
|
||||
crit.setMinimumTunnelsRequired(1);
|
||||
List tunnelIds = _context.tunnelManager().selectInboundTunnelIds(crit);
|
||||
List tunnelIds = getContext().tunnelManager().selectInboundTunnelIds(crit);
|
||||
if (tunnelIds.size() <= 0) {
|
||||
return null;
|
||||
}
|
||||
@ -191,7 +191,7 @@ public class PeerTestJob extends JobImpl {
|
||||
* Build a message to test the peer with
|
||||
*/
|
||||
private DatabaseStoreMessage buildMessage(RouterInfo peer, TunnelId replyTunnel, Hash replyGateway, long nonce, long expiration) {
|
||||
DatabaseStoreMessage msg = new DatabaseStoreMessage(_context);
|
||||
DatabaseStoreMessage msg = new DatabaseStoreMessage(getContext());
|
||||
msg.setKey(peer.getIdentity().getHash());
|
||||
msg.setRouterInfo(peer);
|
||||
msg.setReplyGateway(replyGateway);
|
||||
@ -220,7 +220,7 @@ public class PeerTestJob extends JobImpl {
|
||||
if (message instanceof DeliveryStatusMessage) {
|
||||
DeliveryStatusMessage msg = (DeliveryStatusMessage)message;
|
||||
if (_nonce == msg.getMessageId()) {
|
||||
long timeLeft = _expiration - _context.clock().now();
|
||||
long timeLeft = _expiration - getContext().clock().now();
|
||||
if (timeLeft < 0)
|
||||
_log.warn("Took too long to get a reply from peer " + _peer.toBase64()
|
||||
+ ": " + (0-timeLeft) + "ms too slow");
|
||||
@ -247,30 +247,30 @@ public class PeerTestJob extends JobImpl {
|
||||
}
|
||||
public String getName() { return "Peer test successful"; }
|
||||
public void runJob() {
|
||||
long responseTime = _context.clock().now() - _testBegin;
|
||||
long responseTime = getContext().clock().now() - _testBegin;
|
||||
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("successful peer test after " + responseTime + " for "
|
||||
+ _peer.getIdentity().getHash().toBase64() + " using outbound tunnel "
|
||||
+ _sendTunnel.getTunnelId().getTunnelId() + " and inbound tunnel "
|
||||
+ _replyTunnel.getTunnelId().getTunnelId());
|
||||
_context.profileManager().dbLookupSuccessful(_peer.getIdentity().getHash(), responseTime);
|
||||
getContext().profileManager().dbLookupSuccessful(_peer.getIdentity().getHash(), responseTime);
|
||||
|
||||
_sendTunnel.setLastTested(_context.clock().now());
|
||||
_replyTunnel.setLastTested(_context.clock().now());
|
||||
_sendTunnel.setLastTested(getContext().clock().now());
|
||||
_replyTunnel.setLastTested(getContext().clock().now());
|
||||
|
||||
TunnelInfo cur = _replyTunnel;
|
||||
while (cur != null) {
|
||||
Hash peer = cur.getThisHop();
|
||||
if ( (peer != null) && (!_context.routerHash().equals(peer)) )
|
||||
_context.profileManager().tunnelTestSucceeded(peer, responseTime);
|
||||
if ( (peer != null) && (!getContext().routerHash().equals(peer)) )
|
||||
getContext().profileManager().tunnelTestSucceeded(peer, responseTime);
|
||||
cur = cur.getNextHopInfo();
|
||||
}
|
||||
cur = _sendTunnel;
|
||||
while (cur != null) {
|
||||
Hash peer = cur.getThisHop();
|
||||
if ( (peer != null) && (!_context.routerHash().equals(peer)) )
|
||||
_context.profileManager().tunnelTestSucceeded(peer, responseTime);
|
||||
if ( (peer != null) && (!getContext().routerHash().equals(peer)) )
|
||||
getContext().profileManager().tunnelTestSucceeded(peer, responseTime);
|
||||
cur = cur.getNextHopInfo();
|
||||
}
|
||||
}
|
||||
@ -298,7 +298,7 @@ public class PeerTestJob extends JobImpl {
|
||||
private boolean getShouldFailPeer() { return true; }
|
||||
public void runJob() {
|
||||
if (getShouldFailPeer())
|
||||
_context.profileManager().dbLookupFailed(_peer.getIdentity().getHash());
|
||||
getContext().profileManager().dbLookupFailed(_peer.getIdentity().getHash());
|
||||
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("failed peer test for "
|
||||
@ -308,21 +308,21 @@ public class PeerTestJob extends JobImpl {
|
||||
|
||||
if (getShouldFailTunnels()) {
|
||||
|
||||
_sendTunnel.setLastTested(_context.clock().now());
|
||||
_replyTunnel.setLastTested(_context.clock().now());
|
||||
_sendTunnel.setLastTested(getContext().clock().now());
|
||||
_replyTunnel.setLastTested(getContext().clock().now());
|
||||
|
||||
TunnelInfo cur = _replyTunnel;
|
||||
while (cur != null) {
|
||||
Hash peer = cur.getThisHop();
|
||||
if ( (peer != null) && (!_context.routerHash().equals(peer)) )
|
||||
_context.profileManager().tunnelFailed(peer);
|
||||
if ( (peer != null) && (!getContext().routerHash().equals(peer)) )
|
||||
getContext().profileManager().tunnelFailed(peer);
|
||||
cur = cur.getNextHopInfo();
|
||||
}
|
||||
cur = _sendTunnel;
|
||||
while (cur != null) {
|
||||
Hash peer = cur.getThisHop();
|
||||
if ( (peer != null) && (!_context.routerHash().equals(peer)) )
|
||||
_context.profileManager().tunnelFailed(peer);
|
||||
if ( (peer != null) && (!getContext().routerHash().equals(peer)) )
|
||||
getContext().profileManager().tunnelFailed(peer);
|
||||
cur = cur.getNextHopInfo();
|
||||
}
|
||||
}
|
||||
|
@ -14,7 +14,7 @@ class PersistProfilesJob extends JobImpl {
|
||||
public PersistProfilesJob(RouterContext ctx, PeerManager mgr) {
|
||||
super(ctx);
|
||||
_mgr = mgr;
|
||||
getTiming().setStartAfter(_context.clock().now() + PERSIST_DELAY);
|
||||
getTiming().setStartAfter(getContext().clock().now() + PERSIST_DELAY);
|
||||
}
|
||||
|
||||
public String getName() { return "Persist profiles"; }
|
||||
@ -24,14 +24,14 @@ class PersistProfilesJob extends JobImpl {
|
||||
int i = 0;
|
||||
for (Iterator iter = peers.iterator(); iter.hasNext(); )
|
||||
hashes[i] = (Hash)iter.next();
|
||||
_context.jobQueue().addJob(new PersistProfileJob(hashes));
|
||||
getContext().jobQueue().addJob(new PersistProfileJob(hashes));
|
||||
}
|
||||
|
||||
private class PersistProfileJob extends JobImpl {
|
||||
private Hash _peers[];
|
||||
private int _cur;
|
||||
public PersistProfileJob(Hash peers[]) {
|
||||
super(PersistProfilesJob.this._context);
|
||||
super(PersistProfilesJob.this.getContext());
|
||||
_peers = peers;
|
||||
_cur = 0;
|
||||
}
|
||||
@ -42,11 +42,11 @@ class PersistProfilesJob extends JobImpl {
|
||||
}
|
||||
if (_cur >= _peers.length) {
|
||||
// no more left, requeue up the main persist-em-all job
|
||||
PersistProfilesJob.this.getTiming().setStartAfter(_context.clock().now() + PERSIST_DELAY);
|
||||
PersistProfilesJob.this._context.jobQueue().addJob(PersistProfilesJob.this);
|
||||
PersistProfilesJob.this.getTiming().setStartAfter(getContext().clock().now() + PERSIST_DELAY);
|
||||
PersistProfilesJob.this.getContext().jobQueue().addJob(PersistProfilesJob.this);
|
||||
} else {
|
||||
// we've got peers left to persist, so requeue the persist profile job
|
||||
PersistProfilesJob.this._context.jobQueue().addJob(PersistProfileJob.this);
|
||||
PersistProfilesJob.this.getContext().jobQueue().addJob(PersistProfileJob.this);
|
||||
}
|
||||
}
|
||||
public String getName() { return "Persist profile"; }
|
||||
|
@ -28,23 +28,23 @@ public class BootCommSystemJob extends JobImpl {
|
||||
public void runJob() {
|
||||
// start up the network comm system
|
||||
|
||||
_context.commSystem().startup();
|
||||
_context.tunnelManager().startup();
|
||||
_context.peerManager().startup();
|
||||
getContext().commSystem().startup();
|
||||
getContext().tunnelManager().startup();
|
||||
getContext().peerManager().startup();
|
||||
|
||||
Job bootDb = new BootNetworkDbJob(_context);
|
||||
Job bootDb = new BootNetworkDbJob(getContext());
|
||||
boolean useTrusted = false;
|
||||
String useTrustedStr = _context.router().getConfigSetting(PROP_USE_TRUSTED_LINKS);
|
||||
String useTrustedStr = getContext().router().getConfigSetting(PROP_USE_TRUSTED_LINKS);
|
||||
if (useTrustedStr != null) {
|
||||
useTrusted = Boolean.TRUE.toString().equalsIgnoreCase(useTrustedStr);
|
||||
}
|
||||
if (useTrusted) {
|
||||
_log.debug("Using trusted links...");
|
||||
_context.jobQueue().addJob(new BuildTrustedLinksJob(_context, bootDb));
|
||||
getContext().jobQueue().addJob(new BuildTrustedLinksJob(getContext(), bootDb));
|
||||
return;
|
||||
} else {
|
||||
_log.debug("Not using trusted links - boot db");
|
||||
_context.jobQueue().addJob(bootDb);
|
||||
getContext().jobQueue().addJob(bootDb);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -24,8 +24,8 @@ public class BootNetworkDbJob extends JobImpl {
|
||||
public void runJob() {
|
||||
// start up the network database
|
||||
|
||||
_context.netDb().startup();
|
||||
getContext().netDb().startup();
|
||||
|
||||
_context.jobQueue().addJob(new StartAcceptingClientsJob(_context));
|
||||
getContext().jobQueue().addJob(new StartAcceptingClientsJob(getContext()));
|
||||
}
|
||||
}
|
||||
|
@ -19,7 +19,7 @@ public class BuildTrustedLinksJob extends JobImpl {
|
||||
|
||||
public BuildTrustedLinksJob(RouterContext context, Job next) {
|
||||
super(context);
|
||||
_log = _context.logManager().getLog(BuildTrustedLinksJob.class);
|
||||
_log = getContext().logManager().getLog(BuildTrustedLinksJob.class);
|
||||
_next = next;
|
||||
}
|
||||
|
||||
@ -30,6 +30,6 @@ public class BuildTrustedLinksJob extends JobImpl {
|
||||
|
||||
//try { Thread.sleep(5000); } catch (InterruptedException ie) {}
|
||||
|
||||
_context.jobQueue().addJob(_next);
|
||||
getContext().jobQueue().addJob(_next);
|
||||
}
|
||||
}
|
||||
|
@ -41,7 +41,7 @@ public class CreateRouterInfoJob extends JobImpl {
|
||||
_log.debug("Creating the new router info");
|
||||
// create a new router info and store it where LoadRouterInfoJob looks
|
||||
RouterInfo info = createRouterInfo();
|
||||
_context.jobQueue().addJob(_next);
|
||||
getContext().jobQueue().addJob(_next);
|
||||
}
|
||||
|
||||
RouterInfo createRouterInfo() {
|
||||
@ -49,10 +49,10 @@ public class CreateRouterInfoJob extends JobImpl {
|
||||
FileOutputStream fos1 = null;
|
||||
FileOutputStream fos2 = null;
|
||||
try {
|
||||
info.setAddresses(_context.commSystem().createAddresses());
|
||||
info.setOptions(_context.statPublisher().publishStatistics());
|
||||
info.setAddresses(getContext().commSystem().createAddresses());
|
||||
info.setOptions(getContext().statPublisher().publishStatistics());
|
||||
info.setPeers(new HashSet());
|
||||
info.setPublished(getCurrentPublishDate(_context));
|
||||
info.setPublished(getCurrentPublishDate(getContext()));
|
||||
RouterIdentity ident = new RouterIdentity();
|
||||
Certificate cert = new Certificate();
|
||||
cert.setCertificateType(Certificate.CERTIFICATE_TYPE_NULL);
|
||||
@ -62,10 +62,10 @@ public class CreateRouterInfoJob extends JobImpl {
|
||||
PrivateKey privkey = null;
|
||||
SigningPublicKey signingPubKey = null;
|
||||
SigningPrivateKey signingPrivKey = null;
|
||||
Object keypair[] = _context.keyGenerator().generatePKIKeypair();
|
||||
Object keypair[] = getContext().keyGenerator().generatePKIKeypair();
|
||||
pubkey = (PublicKey)keypair[0];
|
||||
privkey = (PrivateKey)keypair[1];
|
||||
Object signingKeypair[] = _context.keyGenerator().generateSigningKeypair();
|
||||
Object signingKeypair[] = getContext().keyGenerator().generateSigningKeypair();
|
||||
signingPubKey = (SigningPublicKey)signingKeypair[0];
|
||||
signingPrivKey = (SigningPrivateKey)signingKeypair[1];
|
||||
ident.setPublicKey(pubkey);
|
||||
@ -74,13 +74,13 @@ public class CreateRouterInfoJob extends JobImpl {
|
||||
|
||||
info.sign(signingPrivKey);
|
||||
|
||||
String infoFilename = _context.router().getConfigSetting(Router.PROP_INFO_FILENAME);
|
||||
String infoFilename = getContext().router().getConfigSetting(Router.PROP_INFO_FILENAME);
|
||||
if (infoFilename == null)
|
||||
infoFilename = Router.PROP_INFO_FILENAME_DEFAULT;
|
||||
fos1 = new FileOutputStream(infoFilename);
|
||||
info.writeBytes(fos1);
|
||||
|
||||
String keyFilename = _context.router().getConfigSetting(Router.PROP_KEYS_FILENAME);
|
||||
String keyFilename = getContext().router().getConfigSetting(Router.PROP_KEYS_FILENAME);
|
||||
if (keyFilename == null)
|
||||
keyFilename = Router.PROP_KEYS_FILENAME_DEFAULT;
|
||||
fos2 = new FileOutputStream(keyFilename);
|
||||
@ -89,10 +89,10 @@ public class CreateRouterInfoJob extends JobImpl {
|
||||
pubkey.writeBytes(fos2);
|
||||
signingPubKey.writeBytes(fos2);
|
||||
|
||||
_context.keyManager().setSigningPrivateKey(signingPrivKey);
|
||||
_context.keyManager().setSigningPublicKey(signingPubKey);
|
||||
_context.keyManager().setPrivateKey(privkey);
|
||||
_context.keyManager().setPublicKey(pubkey);
|
||||
getContext().keyManager().setSigningPrivateKey(signingPrivKey);
|
||||
getContext().keyManager().setSigningPublicKey(signingPubKey);
|
||||
getContext().keyManager().setPrivateKey(privkey);
|
||||
getContext().keyManager().setPublicKey(pubkey);
|
||||
|
||||
_log.info("Router info created and stored at " + infoFilename + " with private keys stored at " + keyFilename + " [" + info + "]");
|
||||
} catch (DataFormatException dfe) {
|
||||
|
@ -26,10 +26,10 @@ class LoadClientAppsJob extends JobImpl {
|
||||
public void runJob() {
|
||||
int i = 0;
|
||||
while (true) {
|
||||
String className = _context.router().getConfigSetting("clientApp."+i+".main");
|
||||
String clientName = _context.router().getConfigSetting("clientApp."+i+".name");
|
||||
String args = _context.router().getConfigSetting("clientApp."+i+".args");
|
||||
String onBoot = _context.router().getConfigSetting("clientApp." + i + ".onBoot");
|
||||
String className = getContext().router().getConfigSetting("clientApp."+i+".main");
|
||||
String clientName = getContext().router().getConfigSetting("clientApp."+i+".name");
|
||||
String args = getContext().router().getConfigSetting("clientApp."+i+".args");
|
||||
String onBoot = getContext().router().getConfigSetting("clientApp." + i + ".onBoot");
|
||||
boolean onStartup = false;
|
||||
if (onBoot != null)
|
||||
onStartup = "true".equals(onBoot) || "yes".equals(onBoot);
|
||||
@ -43,7 +43,7 @@ class LoadClientAppsJob extends JobImpl {
|
||||
runClient(className, clientName, argVal);
|
||||
} else {
|
||||
// wait 2 minutes
|
||||
_context.jobQueue().addJob(new DelayedRunClient(className, clientName, argVal));
|
||||
getContext().jobQueue().addJob(new DelayedRunClient(className, clientName, argVal));
|
||||
}
|
||||
i++;
|
||||
}
|
||||
@ -54,11 +54,11 @@ class LoadClientAppsJob extends JobImpl {
|
||||
private String _clientName;
|
||||
private String _args[];
|
||||
public DelayedRunClient(String className, String clientName, String args[]) {
|
||||
super(LoadClientAppsJob.this._context);
|
||||
super(LoadClientAppsJob.this.getContext());
|
||||
_className = className;
|
||||
_clientName = clientName;
|
||||
_args = args;
|
||||
getTiming().setStartAfter(LoadClientAppsJob.this._context.clock().now() + STARTUP_DELAY);
|
||||
getTiming().setStartAfter(LoadClientAppsJob.this.getContext().clock().now() + STARTUP_DELAY);
|
||||
}
|
||||
public String getName() { return "Delayed client job"; }
|
||||
public void runJob() {
|
||||
|
@ -39,26 +39,26 @@ public class LoadRouterInfoJob extends JobImpl {
|
||||
public void runJob() {
|
||||
loadRouterInfo();
|
||||
if (_us == null) {
|
||||
RebuildRouterInfoJob r = new RebuildRouterInfoJob(_context);
|
||||
RebuildRouterInfoJob r = new RebuildRouterInfoJob(getContext());
|
||||
r.rebuildRouterInfo(false);
|
||||
_context.jobQueue().addJob(this);
|
||||
getContext().jobQueue().addJob(this);
|
||||
return;
|
||||
} else {
|
||||
_context.router().setRouterInfo(_us);
|
||||
_context.messageHistory().initialize(true);
|
||||
_context.jobQueue().addJob(new BootCommSystemJob(_context));
|
||||
getContext().router().setRouterInfo(_us);
|
||||
getContext().messageHistory().initialize(true);
|
||||
getContext().jobQueue().addJob(new BootCommSystemJob(getContext()));
|
||||
}
|
||||
}
|
||||
|
||||
private void loadRouterInfo() {
|
||||
String routerInfoFile = _context.router().getConfigSetting(Router.PROP_INFO_FILENAME);
|
||||
String routerInfoFile = getContext().router().getConfigSetting(Router.PROP_INFO_FILENAME);
|
||||
if (routerInfoFile == null)
|
||||
routerInfoFile = Router.PROP_INFO_FILENAME_DEFAULT;
|
||||
RouterInfo info = null;
|
||||
boolean failedRead = false;
|
||||
|
||||
|
||||
String keyFilename = _context.router().getConfigSetting(Router.PROP_KEYS_FILENAME);
|
||||
String keyFilename = getContext().router().getConfigSetting(Router.PROP_KEYS_FILENAME);
|
||||
if (keyFilename == null)
|
||||
keyFilename = Router.PROP_KEYS_FILENAME_DEFAULT;
|
||||
|
||||
@ -90,10 +90,10 @@ public class LoadRouterInfoJob extends JobImpl {
|
||||
SigningPublicKey signingPubKey = new SigningPublicKey();
|
||||
signingPubKey.readBytes(fis2);
|
||||
|
||||
_context.keyManager().setPrivateKey(privkey);
|
||||
_context.keyManager().setSigningPrivateKey(signingPrivKey);
|
||||
_context.keyManager().setPublicKey(pubkey); //info.getIdentity().getPublicKey());
|
||||
_context.keyManager().setSigningPublicKey(signingPubKey); // info.getIdentity().getSigningPublicKey());
|
||||
getContext().keyManager().setPrivateKey(privkey);
|
||||
getContext().keyManager().setSigningPrivateKey(signingPrivKey);
|
||||
getContext().keyManager().setPublicKey(pubkey); //info.getIdentity().getPublicKey());
|
||||
getContext().keyManager().setSigningPublicKey(signingPubKey); // info.getIdentity().getSigningPublicKey());
|
||||
}
|
||||
|
||||
_us = info;
|
||||
|
@ -33,15 +33,15 @@ public class ReadConfigJob extends JobImpl {
|
||||
public String getName() { return "Read Router Configuration"; }
|
||||
public void runJob() {
|
||||
if (shouldReread()) {
|
||||
doRead(_context);
|
||||
_lastRead = _context.clock().now();
|
||||
doRead(getContext());
|
||||
_lastRead = getContext().clock().now();
|
||||
}
|
||||
getTiming().setStartAfter(_context.clock().now() + DELAY);
|
||||
_context.jobQueue().addJob(this);
|
||||
getTiming().setStartAfter(getContext().clock().now() + DELAY);
|
||||
getContext().jobQueue().addJob(this);
|
||||
}
|
||||
|
||||
private boolean shouldReread() {
|
||||
File configFile = new File(_context.router().getConfigFilename());
|
||||
File configFile = new File(getContext().router().getConfigFilename());
|
||||
if (!configFile.exists()) return false;
|
||||
if (configFile.lastModified() > _lastRead)
|
||||
return true;
|
||||
|
@ -49,13 +49,13 @@ public class RebuildRouterInfoJob extends JobImpl {
|
||||
|
||||
public void runJob() {
|
||||
_log.debug("Testing to rebuild router info");
|
||||
String infoFile = _context.router().getConfigSetting(Router.PROP_INFO_FILENAME);
|
||||
String infoFile = getContext().router().getConfigSetting(Router.PROP_INFO_FILENAME);
|
||||
if (infoFile == null) {
|
||||
_log.debug("Info filename not configured, defaulting to " + Router.PROP_INFO_FILENAME_DEFAULT);
|
||||
infoFile = Router.PROP_INFO_FILENAME_DEFAULT;
|
||||
}
|
||||
|
||||
String keyFilename = _context.router().getConfigSetting(Router.PROP_KEYS_FILENAME);
|
||||
String keyFilename = getContext().router().getConfigSetting(Router.PROP_KEYS_FILENAME);
|
||||
if (keyFilename == null)
|
||||
keyFilename = Router.PROP_KEYS_FILENAME_DEFAULT;
|
||||
File keyFile = new File(keyFilename);
|
||||
@ -67,8 +67,8 @@ public class RebuildRouterInfoJob extends JobImpl {
|
||||
} else {
|
||||
_log.debug("Router info file [" + info.getAbsolutePath() + "] exists, not rebuilding");
|
||||
}
|
||||
getTiming().setStartAfter(_context.clock().now() + REBUILD_DELAY);
|
||||
_context.jobQueue().addJob(this);
|
||||
getTiming().setStartAfter(getContext().clock().now() + REBUILD_DELAY);
|
||||
getContext().jobQueue().addJob(this);
|
||||
}
|
||||
|
||||
void rebuildRouterInfo() {
|
||||
@ -78,18 +78,18 @@ public class RebuildRouterInfoJob extends JobImpl {
|
||||
_log.debug("Rebuilding the new router info");
|
||||
boolean fullRebuild = false;
|
||||
RouterInfo info = null;
|
||||
String infoFilename = _context.router().getConfigSetting(Router.PROP_INFO_FILENAME);
|
||||
String infoFilename = getContext().router().getConfigSetting(Router.PROP_INFO_FILENAME);
|
||||
if (infoFilename == null)
|
||||
infoFilename = Router.PROP_INFO_FILENAME_DEFAULT;
|
||||
|
||||
String keyFilename = _context.router().getConfigSetting(Router.PROP_KEYS_FILENAME);
|
||||
String keyFilename = getContext().router().getConfigSetting(Router.PROP_KEYS_FILENAME);
|
||||
if (keyFilename == null)
|
||||
keyFilename = Router.PROP_KEYS_FILENAME_DEFAULT;
|
||||
File keyFile = new File(keyFilename);
|
||||
|
||||
if (keyFile.exists()) {
|
||||
// ok, no need to rebuild a brand new identity, just update what we can
|
||||
info = _context.router().getRouterInfo();
|
||||
info = getContext().router().getRouterInfo();
|
||||
if (info == null) {
|
||||
info = new RouterInfo();
|
||||
FileInputStream fis = null;
|
||||
@ -121,12 +121,12 @@ public class RebuildRouterInfoJob extends JobImpl {
|
||||
}
|
||||
|
||||
try {
|
||||
info.setAddresses(_context.commSystem().createAddresses());
|
||||
info.setOptions(_context.statPublisher().publishStatistics());
|
||||
info.setAddresses(getContext().commSystem().createAddresses());
|
||||
info.setOptions(getContext().statPublisher().publishStatistics());
|
||||
// info.setPeers(new HashSet()); // this would have the trusted peers
|
||||
info.setPublished(CreateRouterInfoJob.getCurrentPublishDate(_context));
|
||||
info.setPublished(CreateRouterInfoJob.getCurrentPublishDate(getContext()));
|
||||
|
||||
info.sign(_context.keyManager().getSigningPrivateKey());
|
||||
info.sign(getContext().keyManager().getSigningPrivateKey());
|
||||
} catch (DataFormatException dfe) {
|
||||
_log.error("Error rebuilding the new router info", dfe);
|
||||
return;
|
||||
@ -147,13 +147,13 @@ public class RebuildRouterInfoJob extends JobImpl {
|
||||
} else {
|
||||
_log.warn("Private key file " + keyFile.getAbsolutePath() + " deleted! Rebuilding a brand new router identity!");
|
||||
// this proc writes the keys and info to the file as well as builds the latest and greatest info
|
||||
CreateRouterInfoJob j = new CreateRouterInfoJob(_context, null);
|
||||
CreateRouterInfoJob j = new CreateRouterInfoJob(getContext(), null);
|
||||
info = j.createRouterInfo();
|
||||
fullRebuild = true;
|
||||
}
|
||||
|
||||
//MessageHistory.initialize();
|
||||
_context.router().setRouterInfo(info);
|
||||
getContext().router().setRouterInfo(info);
|
||||
_log.info("Router info rebuilt and stored at " + infoFilename + " [" + info + "]");
|
||||
}
|
||||
|
||||
|
@ -25,10 +25,10 @@ public class StartAcceptingClientsJob extends JobImpl {
|
||||
public void runJob() {
|
||||
// start up the network database
|
||||
|
||||
_context.clientManager().startup();
|
||||
getContext().clientManager().startup();
|
||||
|
||||
_context.jobQueue().addJob(new ReadConfigJob(_context));
|
||||
_context.jobQueue().addJob(new RebuildRouterInfoJob(_context));
|
||||
_context.jobQueue().allowParallelOperation();
|
||||
getContext().jobQueue().addJob(new ReadConfigJob(getContext()));
|
||||
getContext().jobQueue().addJob(new RebuildRouterInfoJob(getContext()));
|
||||
getContext().jobQueue().allowParallelOperation();
|
||||
}
|
||||
}
|
||||
|
@ -34,10 +34,10 @@ public class StartupJob extends JobImpl {
|
||||
|
||||
public String getName() { return "Startup Router"; }
|
||||
public void runJob() {
|
||||
ReadConfigJob.doRead(_context);
|
||||
new AdminManager(_context).startup();
|
||||
_context.jobQueue().addJob(new LoadClientAppsJob(_context));
|
||||
_context.statPublisher().startup();
|
||||
_context.jobQueue().addJob(new LoadRouterInfoJob(_context));
|
||||
ReadConfigJob.doRead(getContext());
|
||||
new AdminManager(getContext()).startup();
|
||||
getContext().jobQueue().addJob(new LoadClientAppsJob(getContext()));
|
||||
getContext().statPublisher().startup();
|
||||
getContext().jobQueue().addJob(new LoadRouterInfoJob(getContext()));
|
||||
}
|
||||
}
|
||||
|
@ -38,13 +38,13 @@ public class GetBidsJob extends JobImpl {
|
||||
public String getName() { return "Fetch bids for a message to be delivered"; }
|
||||
public void runJob() {
|
||||
Hash to = _msg.getTarget().getIdentity().getHash();
|
||||
if (_context.shitlist().isShitlisted(to)) {
|
||||
if (getContext().shitlist().isShitlisted(to)) {
|
||||
_log.warn("Attempt to send a message to a shitlisted peer - " + to);
|
||||
fail();
|
||||
return;
|
||||
}
|
||||
|
||||
Hash us = _context.routerHash();
|
||||
Hash us = getContext().routerHash();
|
||||
if (_msg.getTarget().getIdentity().getHash().equals(us)) {
|
||||
_log.error("wtf, send a message to ourselves? nuh uh. msg = " + _msg, getAddedBy());
|
||||
fail();
|
||||
@ -64,17 +64,17 @@ public class GetBidsJob extends JobImpl {
|
||||
|
||||
private void fail() {
|
||||
if (_msg.getOnFailedSendJob() != null) {
|
||||
_context.jobQueue().addJob(_msg.getOnFailedSendJob());
|
||||
getContext().jobQueue().addJob(_msg.getOnFailedSendJob());
|
||||
}
|
||||
if (_msg.getOnFailedReplyJob() != null) {
|
||||
_context.jobQueue().addJob(_msg.getOnFailedReplyJob());
|
||||
getContext().jobQueue().addJob(_msg.getOnFailedReplyJob());
|
||||
}
|
||||
MessageSelector selector = _msg.getReplySelector();
|
||||
if (selector != null) {
|
||||
_context.messageRegistry().unregisterPending(_msg);
|
||||
getContext().messageRegistry().unregisterPending(_msg);
|
||||
}
|
||||
|
||||
_context.profileManager().messageFailed(_msg.getTarget().getIdentity().getHash());
|
||||
getContext().profileManager().messageFailed(_msg.getTarget().getIdentity().getHash());
|
||||
|
||||
_msg.discardData();
|
||||
}
|
||||
|
@ -110,18 +110,18 @@ public class VMCommSystem extends CommSystemFacade {
|
||||
try {
|
||||
I2NPMessage msg = handler.readMessage(new ByteArrayInputStream(_msg));
|
||||
int size = _msg.length;
|
||||
InNetMessage inMsg = new InNetMessage(ReceiveJob.this._context);
|
||||
InNetMessage inMsg = new InNetMessage(ReceiveJob.this.getContext());
|
||||
inMsg.setFromRouterHash(_from);
|
||||
inMsg.setMessage(msg);
|
||||
_ctx.profileManager().messageReceived(_from, "vm", 1, size);
|
||||
_ctx.statManager().addRateData("transport.receiveMessageSize", size, 1);
|
||||
|
||||
if (size < 1024)
|
||||
ReceiveJob.this._context.statManager().addRateData("transport.receiveMessageSmall", 1, 1);
|
||||
ReceiveJob.this.getContext().statManager().addRateData("transport.receiveMessageSmall", 1, 1);
|
||||
else if (size <= 4096)
|
||||
ReceiveJob.this._context.statManager().addRateData("transport.receiveMessageMedium", 1, 1);
|
||||
ReceiveJob.this.getContext().statManager().addRateData("transport.receiveMessageMedium", 1, 1);
|
||||
else
|
||||
ReceiveJob.this._context.statManager().addRateData("transport.receiveMessageLarge", 1, 1);
|
||||
ReceiveJob.this.getContext().statManager().addRateData("transport.receiveMessageLarge", 1, 1);
|
||||
|
||||
_ctx.inNetMessagePool().add(inMsg);
|
||||
} catch (Exception e) {
|
||||
|
@ -92,7 +92,7 @@ class ClientLeaseSetManagerJob extends JobImpl {
|
||||
_log.info("Tunnels changed from the old leaseSet - request a new one: [pool = "
|
||||
+ _pool.getInboundTunnelIds() + " old leaseSet: " + _currentLeaseSet);
|
||||
requestNewLeaseSet();
|
||||
} else if (_context.clock().now() > _lastCreated + _pool.getClientSettings().getInboundDuration()) {
|
||||
} else if (getContext().clock().now() > _lastCreated + _pool.getClientSettings().getInboundDuration()) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("We've exceeded the client's requested duration (limit = "
|
||||
+ new Date(_lastCreated + _pool.getClientSettings().getInboundDuration())
|
||||
@ -154,7 +154,7 @@ class ClientLeaseSetManagerJob extends JobImpl {
|
||||
*/
|
||||
private void requestNewLeaseSet() {
|
||||
LeaseSet proposed = buildNewLeaseSet();
|
||||
_context.clientManager().requestLeaseSet(_pool.getDestination(), proposed,
|
||||
getContext().clientManager().requestLeaseSet(_pool.getDestination(), proposed,
|
||||
REQUEST_LEASE_TIMEOUT, new LeaseSetCreatedJob(),
|
||||
null);
|
||||
}
|
||||
@ -165,7 +165,7 @@ class ClientLeaseSetManagerJob extends JobImpl {
|
||||
private LeaseSet buildNewLeaseSet() {
|
||||
LeaseSet ls = new LeaseSet();
|
||||
TreeMap tunnels = new TreeMap();
|
||||
long now = _context.clock().now();
|
||||
long now = getContext().clock().now();
|
||||
for (Iterator iter = _pool.getInboundTunnelIds().iterator(); iter.hasNext(); ) {
|
||||
TunnelId id = (TunnelId)iter.next();
|
||||
TunnelInfo info = _pool.getInboundTunnel(id);
|
||||
@ -175,7 +175,7 @@ class ClientLeaseSetManagerJob extends JobImpl {
|
||||
long exp = info.getSettings().getExpiration();
|
||||
if (now + RECHECK_DELAY + REQUEST_LEASE_TIMEOUT > exp)
|
||||
continue;
|
||||
RouterInfo ri = _context.netDb().lookupRouterInfoLocally(info.getThisHop());
|
||||
RouterInfo ri = getContext().netDb().lookupRouterInfoLocally(info.getThisHop());
|
||||
if (ri == null)
|
||||
continue;
|
||||
|
||||
@ -205,11 +205,11 @@ class ClientLeaseSetManagerJob extends JobImpl {
|
||||
|
||||
private class LeaseSetCreatedJob extends JobImpl {
|
||||
public LeaseSetCreatedJob() {
|
||||
super(ClientLeaseSetManagerJob.this._context);
|
||||
super(ClientLeaseSetManagerJob.this.getContext());
|
||||
}
|
||||
public String getName() { return "LeaseSet created"; }
|
||||
public void runJob() {
|
||||
RouterContext ctx = ClientLeaseSetManagerJob.this._context;
|
||||
RouterContext ctx = ClientLeaseSetManagerJob.this.getContext();
|
||||
LeaseSet ls = ctx.netDb().lookupLeaseSetLocally(_pool.getDestination().calculateHash());
|
||||
if (ls != null) {
|
||||
_log.info("New leaseSet completely created");
|
||||
|
@ -33,7 +33,7 @@ class ClientTunnelPoolExpirationJob extends JobImpl {
|
||||
_log = context.logManager().getLog(ClientTunnelPoolExpirationJob.class);
|
||||
_pool = pool;
|
||||
_tunnelPool = tunnelPool;
|
||||
getTiming().setStartAfter(_context.clock().now() + EXPIRE_POOL_DELAY);
|
||||
getTiming().setStartAfter(getContext().clock().now() + EXPIRE_POOL_DELAY);
|
||||
}
|
||||
public String getName() { return "Expire Pooled Client Tunnels"; }
|
||||
public void runJob() {
|
||||
@ -65,7 +65,7 @@ class ClientTunnelPoolExpirationJob extends JobImpl {
|
||||
*
|
||||
*/
|
||||
public void expireInactiveTunnels() {
|
||||
long now = _context.clock().now();
|
||||
long now = getContext().clock().now();
|
||||
long expire = now - EXPIRE_BUFFER - 2*Router.CLOCK_FUDGE_FACTOR;
|
||||
|
||||
for (Iterator iter = _pool.getInactiveInboundTunnelIds().iterator(); iter.hasNext(); ) {
|
||||
@ -92,7 +92,7 @@ class ClientTunnelPoolExpirationJob extends JobImpl {
|
||||
*
|
||||
*/
|
||||
public void expireActiveTunnels() {
|
||||
long now = _context.clock().now();
|
||||
long now = getContext().clock().now();
|
||||
long expire = now - EXPIRE_BUFFER - 2*Router.CLOCK_FUDGE_FACTOR;
|
||||
|
||||
for (Iterator iter = _pool.getInboundTunnelIds().iterator(); iter.hasNext(); ) {
|
||||
|
@ -41,7 +41,7 @@ class ClientTunnelPoolManagerJob extends JobImpl {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!_context.clientManager().isLocal(_clientPool.getDestination())) {
|
||||
if (!getContext().clientManager().isLocal(_clientPool.getDestination())) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Client " + _clientPool.getDestination().calculateHash()
|
||||
+ " is no longer connected, stop the pool");
|
||||
@ -64,7 +64,7 @@ class ClientTunnelPoolManagerJob extends JobImpl {
|
||||
* The pool is stopped, so lets see if we should keep doing anything
|
||||
*/
|
||||
private void handleStopped() {
|
||||
if (_context.clientManager().isLocal(_clientPool.getDestination())) {
|
||||
if (getContext().clientManager().isLocal(_clientPool.getDestination())) {
|
||||
// it was stopped, but they've reconnected, so boot 'er up again
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Client " + _clientPool.getDestination().calculateHash().toBase64()
|
||||
@ -176,7 +176,7 @@ class ClientTunnelPoolManagerJob extends JobImpl {
|
||||
}
|
||||
|
||||
// (furthest in the future) - (rebuild buffer time)
|
||||
long expireAfter = _context.clock().now() + _tunnelPool.getPoolSettings().getInboundDuration()
|
||||
long expireAfter = getContext().clock().now() + _tunnelPool.getPoolSettings().getInboundDuration()
|
||||
- POOL_CHECK_DELAY - _tunnelPool.getTunnelCreationTimeout()*2;
|
||||
if (info.getSettings().getExpiration() <= expireAfter) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
@ -240,7 +240,7 @@ class ClientTunnelPoolManagerJob extends JobImpl {
|
||||
*/
|
||||
private void requestCustomTunnels(int numTunnels) {
|
||||
for (int i = 0; i < numTunnels; i++) {
|
||||
_context.jobQueue().addJob(new RequestCustomTunnelJob());
|
||||
getContext().jobQueue().addJob(new RequestCustomTunnelJob());
|
||||
}
|
||||
}
|
||||
|
||||
@ -251,13 +251,13 @@ class ClientTunnelPoolManagerJob extends JobImpl {
|
||||
*/
|
||||
private class RequestCustomTunnelJob extends JobImpl {
|
||||
public RequestCustomTunnelJob() {
|
||||
super(ClientTunnelPoolManagerJob.this._context);
|
||||
super(ClientTunnelPoolManagerJob.this.getContext());
|
||||
}
|
||||
public String getName() { return "Request Custom Client Tunnel"; }
|
||||
public void runJob() {
|
||||
TunnelInfo tunnelGateway = _tunnelBuilder.configureInboundTunnel(_clientPool.getDestination(), _clientPool.getClientSettings());
|
||||
RequestTunnelJob reqJob = new RequestTunnelJob(RequestCustomTunnelJob.this._context, _tunnelPool, tunnelGateway, true, _tunnelPool.getTunnelCreationTimeout());
|
||||
RequestCustomTunnelJob.this._context.jobQueue().addJob(reqJob);
|
||||
RequestTunnelJob reqJob = new RequestTunnelJob(RequestCustomTunnelJob.this.getContext(), _tunnelPool, tunnelGateway, true, _tunnelPool.getTunnelCreationTimeout());
|
||||
RequestCustomTunnelJob.this.getContext().jobQueue().addJob(reqJob);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -52,23 +52,23 @@ public class HandleTunnelCreateMessageJob extends JobImpl {
|
||||
sendReply(false);
|
||||
return;
|
||||
}
|
||||
TunnelInfo info = new TunnelInfo(_context);
|
||||
TunnelInfo info = new TunnelInfo(getContext());
|
||||
info.setConfigurationKey(_message.getConfigurationKey());
|
||||
info.setEncryptionKey(_message.getTunnelKey());
|
||||
info.setNextHop(_message.getNextRouter());
|
||||
|
||||
TunnelSettings settings = new TunnelSettings(_context);
|
||||
TunnelSettings settings = new TunnelSettings(getContext());
|
||||
settings.setBytesPerMinuteAverage(_message.getMaxAvgBytesPerMin());
|
||||
settings.setBytesPerMinutePeak(_message.getMaxPeakBytesPerMin());
|
||||
settings.setMessagesPerMinuteAverage(_message.getMaxAvgMessagesPerMin());
|
||||
settings.setMessagesPerMinutePeak(_message.getMaxPeakMessagesPerMin());
|
||||
settings.setExpiration(_message.getTunnelDurationSeconds()*1000+_context.clock().now());
|
||||
settings.setExpiration(_message.getTunnelDurationSeconds()*1000+getContext().clock().now());
|
||||
settings.setIncludeDummy(_message.getIncludeDummyTraffic());
|
||||
settings.setReorder(_message.getReorderMessages());
|
||||
info.setSettings(settings);
|
||||
|
||||
info.setSigningKey(_message.getVerificationPrivateKey());
|
||||
info.setThisHop(_context.routerHash());
|
||||
info.setThisHop(getContext().routerHash());
|
||||
info.setTunnelId(_message.getTunnelId());
|
||||
info.setVerificationKey(_message.getVerificationPublicKey());
|
||||
|
||||
@ -76,17 +76,17 @@ public class HandleTunnelCreateMessageJob extends JobImpl {
|
||||
|
||||
if (_message.getNextRouter() == null) {
|
||||
if (_log.shouldLog(Log.DEBUG)) _log.debug("We're the endpoint, don't test the \"next\" peer [duh]");
|
||||
boolean ok = _context.tunnelManager().joinTunnel(info);
|
||||
boolean ok = getContext().tunnelManager().joinTunnel(info);
|
||||
sendReply(ok);
|
||||
} else {
|
||||
_context.netDb().lookupRouterInfo(info.getNextHop(), new TestJob(info), new JoinJob(info, false), TIMEOUT);
|
||||
getContext().netDb().lookupRouterInfo(info.getNextHop(), new TestJob(info), new JoinJob(info, false), TIMEOUT);
|
||||
}
|
||||
}
|
||||
|
||||
private boolean isOverloaded() {
|
||||
boolean shouldAccept = _context.throttle().acceptTunnelRequest(_message);
|
||||
boolean shouldAccept = getContext().throttle().acceptTunnelRequest(_message);
|
||||
if (!shouldAccept) {
|
||||
_context.statManager().addRateData("tunnel.rejectOverloaded", 1, 1);
|
||||
getContext().statManager().addRateData("tunnel.rejectOverloaded", 1, 1);
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Refusing tunnel request due to overload");
|
||||
}
|
||||
@ -96,13 +96,13 @@ public class HandleTunnelCreateMessageJob extends JobImpl {
|
||||
private class TestJob extends JobImpl {
|
||||
private TunnelInfo _target;
|
||||
public TestJob(TunnelInfo target) {
|
||||
super(HandleTunnelCreateMessageJob.this._context);
|
||||
super(HandleTunnelCreateMessageJob.this.getContext());
|
||||
_target = target;
|
||||
}
|
||||
|
||||
public String getName() { return "Run a test for peer reachability"; }
|
||||
public void runJob() {
|
||||
RouterInfo info = TestJob.this._context.netDb().lookupRouterInfoLocally(_target.getNextHop());
|
||||
RouterInfo info = TestJob.this.getContext().netDb().lookupRouterInfoLocally(_target.getNextHop());
|
||||
if (info == null) {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Error - unable to look up peer " + _target.toBase64() + ", even though we were queued up via onSuccess??");
|
||||
@ -110,11 +110,11 @@ public class HandleTunnelCreateMessageJob extends JobImpl {
|
||||
} else {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Lookup successful for tested peer " + _target.toBase64() + ", now continue with the test");
|
||||
Hash peer = TestJob.this._context.routerHash();
|
||||
Hash peer = TestJob.this.getContext().routerHash();
|
||||
JoinJob success = new JoinJob(_target, true);
|
||||
JoinJob failure = new JoinJob(_target, false);
|
||||
BuildTestMessageJob test = new BuildTestMessageJob(TestJob.this._context, info, peer, success, failure, TIMEOUT, PRIORITY);
|
||||
TestJob.this._context.jobQueue().addJob(test);
|
||||
BuildTestMessageJob test = new BuildTestMessageJob(TestJob.this.getContext(), info, peer, success, failure, TIMEOUT, PRIORITY);
|
||||
TestJob.this.getContext().jobQueue().addJob(test);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -125,12 +125,12 @@ public class HandleTunnelCreateMessageJob extends JobImpl {
|
||||
_log.debug("Sending reply to a tunnel create of id " + _message.getTunnelId()
|
||||
+ " with ok (" + ok + ") to router " + _message.getReplyBlock().getRouter().toBase64());
|
||||
|
||||
_context.messageHistory().receiveTunnelCreate(_message.getTunnelId(), _message.getNextRouter(),
|
||||
new Date(_context.clock().now() + 1000*_message.getTunnelDurationSeconds()),
|
||||
getContext().messageHistory().receiveTunnelCreate(_message.getTunnelId(), _message.getNextRouter(),
|
||||
new Date(getContext().clock().now() + 1000*_message.getTunnelDurationSeconds()),
|
||||
ok, _message.getReplyBlock().getRouter());
|
||||
|
||||
TunnelCreateStatusMessage msg = new TunnelCreateStatusMessage(_context);
|
||||
msg.setFromHash(_context.routerHash());
|
||||
TunnelCreateStatusMessage msg = new TunnelCreateStatusMessage(getContext());
|
||||
msg.setFromHash(getContext().routerHash());
|
||||
msg.setTunnelId(_message.getTunnelId());
|
||||
if (ok) {
|
||||
msg.setStatus(TunnelCreateStatusMessage.STATUS_SUCCESS);
|
||||
@ -138,9 +138,9 @@ public class HandleTunnelCreateMessageJob extends JobImpl {
|
||||
// since we don't actually check anything, this is a catch all
|
||||
msg.setStatus(TunnelCreateStatusMessage.STATUS_FAILED_OVERLOADED);
|
||||
}
|
||||
msg.setMessageExpiration(new Date(_context.clock().now()+60*1000));
|
||||
SendReplyMessageJob job = new SendReplyMessageJob(_context, _message.getReplyBlock(), msg, PRIORITY);
|
||||
_context.jobQueue().addJob(job);
|
||||
msg.setMessageExpiration(new Date(getContext().clock().now()+60*1000));
|
||||
SendReplyMessageJob job = new SendReplyMessageJob(getContext(), _message.getReplyBlock(), msg, PRIORITY);
|
||||
getContext().jobQueue().addJob(job);
|
||||
}
|
||||
|
||||
public String getName() { return "Handle Tunnel Create Message"; }
|
||||
@ -149,24 +149,24 @@ public class HandleTunnelCreateMessageJob extends JobImpl {
|
||||
private TunnelInfo _info;
|
||||
private boolean _isReachable;
|
||||
public JoinJob(TunnelInfo info, boolean isReachable) {
|
||||
super(HandleTunnelCreateMessageJob.this._context);
|
||||
super(HandleTunnelCreateMessageJob.this.getContext());
|
||||
_info = info;
|
||||
_isReachable = isReachable;
|
||||
}
|
||||
|
||||
public void runJob() {
|
||||
if (!_isReachable) {
|
||||
long before = JoinJob.this._context.clock().now();
|
||||
long before = JoinJob.this.getContext().clock().now();
|
||||
sendReply(false);
|
||||
long after = JoinJob.this._context.clock().now();
|
||||
long after = JoinJob.this.getContext().clock().now();
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("JoinJob .refuse took " + (after-before) + "ms to refuse " + _info);
|
||||
} else {
|
||||
long before = JoinJob.this._context.clock().now();
|
||||
boolean ok = JoinJob.this._context.tunnelManager().joinTunnel(_info);
|
||||
long afterJoin = JoinJob.this._context.clock().now();
|
||||
long before = JoinJob.this.getContext().clock().now();
|
||||
boolean ok = JoinJob.this.getContext().tunnelManager().joinTunnel(_info);
|
||||
long afterJoin = JoinJob.this.getContext().clock().now();
|
||||
sendReply(ok);
|
||||
long after = JoinJob.this._context.clock().now();
|
||||
long after = JoinJob.this.getContext().clock().now();
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("JoinJob .joinTunnel took " + (afterJoin-before) + "ms and sendReply took " + (after-afterJoin) + "ms");
|
||||
}
|
||||
@ -175,8 +175,8 @@ public class HandleTunnelCreateMessageJob extends JobImpl {
|
||||
}
|
||||
|
||||
public void dropped() {
|
||||
_context.messageHistory().messageProcessingError(_message.getUniqueId(),
|
||||
_message.getClass().getName(),
|
||||
"Dropped due to overload");
|
||||
getContext().messageHistory().messageProcessingError(_message.getUniqueId(),
|
||||
_message.getClass().getName(),
|
||||
"Dropped due to overload");
|
||||
}
|
||||
}
|
||||
|
@ -26,7 +26,7 @@ class RequestInboundTunnelJob extends JobImpl {
|
||||
public void runJob() {
|
||||
_log.debug("Client pool settings: " + _pool.getPoolSettings().toString());
|
||||
TunnelInfo tunnelGateway = _builder.configureInboundTunnel(null, _pool.getPoolSettings(), _useFake);
|
||||
RequestTunnelJob reqJob = new RequestTunnelJob(_context, _pool, tunnelGateway, true, _pool.getTunnelCreationTimeout());
|
||||
_context.jobQueue().addJob(reqJob);
|
||||
RequestTunnelJob reqJob = new RequestTunnelJob(getContext(), _pool, tunnelGateway, true, _pool.getTunnelCreationTimeout());
|
||||
getContext().jobQueue().addJob(reqJob);
|
||||
}
|
||||
}
|
||||
|
@ -19,7 +19,7 @@ class RequestOutboundTunnelJob extends JobImpl {
|
||||
public String getName() { return "Request Outbound Tunnel"; }
|
||||
public void runJob() {
|
||||
TunnelInfo tunnelGateway = _builder.configureOutboundTunnel(_pool.getPoolSettings(), _useFake);
|
||||
RequestTunnelJob reqJob = new RequestTunnelJob(_context, _pool, tunnelGateway, false, _pool.getTunnelCreationTimeout());
|
||||
_context.jobQueue().addJob(reqJob);
|
||||
RequestTunnelJob reqJob = new RequestTunnelJob(getContext(), _pool, tunnelGateway, false, _pool.getTunnelCreationTimeout());
|
||||
getContext().jobQueue().addJob(reqJob);
|
||||
}
|
||||
}
|
||||
|
@ -98,7 +98,7 @@ public class RequestTunnelJob extends JobImpl {
|
||||
_toBeRequested = new ArrayList(participants.size());
|
||||
for (int i = participants.size()-1; i >= 0; i--) {
|
||||
TunnelInfo peer = (TunnelInfo)participants.get(i);
|
||||
if (null != _context.netDb().lookupRouterInfoLocally(peer.getThisHop())) {
|
||||
if (null != getContext().netDb().lookupRouterInfoLocally(peer.getThisHop())) {
|
||||
_toBeRequested.add(participants.get(i));
|
||||
} else {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
@ -109,12 +109,12 @@ public class RequestTunnelJob extends JobImpl {
|
||||
// since we request serially, we need to up the timeout serially
|
||||
// change this once we go parallel
|
||||
//_timeoutMs *= participants.size()+1;
|
||||
_expiration = (_timeoutMs * _toBeRequested.size()) + _context.clock().now();
|
||||
_expiration = (_timeoutMs * _toBeRequested.size()) + getContext().clock().now();
|
||||
}
|
||||
|
||||
public String getName() { return "Request Tunnel"; }
|
||||
public void runJob() {
|
||||
if (_context.clock().now() > _expiration) {
|
||||
if (getContext().clock().now() > _expiration) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Timeout reached building tunnel (timeout = " + _timeoutMs + " expiration = " + new Date(_expiration) + ")");
|
||||
fail();
|
||||
@ -140,7 +140,7 @@ public class RequestTunnelJob extends JobImpl {
|
||||
|
||||
private void requestParticipation(TunnelInfo participant) {
|
||||
// find the info about who we're looking for
|
||||
RouterInfo target = _context.netDb().lookupRouterInfoLocally(participant.getThisHop());
|
||||
RouterInfo target = getContext().netDb().lookupRouterInfoLocally(participant.getThisHop());
|
||||
if (target == null) {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("Error - no db info known for participant " + participant.getThisHop());
|
||||
@ -148,7 +148,7 @@ public class RequestTunnelJob extends JobImpl {
|
||||
return;
|
||||
}
|
||||
|
||||
if (target.getIdentity().getHash().equals(_context.routerHash())) {
|
||||
if (target.getIdentity().getHash().equals(getContext().routerHash())) {
|
||||
// short circuit the ok
|
||||
okLocalParticipation(participant);
|
||||
return;
|
||||
@ -190,7 +190,7 @@ public class RequestTunnelJob extends JobImpl {
|
||||
participant, inboundGateway, replyPeer,
|
||||
outboundTunnel, target);
|
||||
Request r = new Request(state);
|
||||
_context.jobQueue().addJob(r);
|
||||
getContext().jobQueue().addJob(r);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -203,7 +203,7 @@ public class RequestTunnelJob extends JobImpl {
|
||||
public class Request extends JobImpl {
|
||||
private RequestState _state;
|
||||
Request(RequestState state) {
|
||||
super(RequestTunnelJob.this._context);
|
||||
super(RequestTunnelJob.this.getContext());
|
||||
_state = state;
|
||||
}
|
||||
|
||||
@ -212,7 +212,7 @@ public class RequestTunnelJob extends JobImpl {
|
||||
if (needsMore) {
|
||||
requeue(0);
|
||||
} else {
|
||||
MessageHistory hist = Request.this._context.messageHistory();
|
||||
MessageHistory hist = Request.this.getContext().messageHistory();
|
||||
hist.requestTunnelCreate(_tunnelGateway.getTunnelId(),
|
||||
_state.getOutboundTunnel(),
|
||||
_state.getParticipant().getThisHop(),
|
||||
@ -287,11 +287,11 @@ public class RequestTunnelJob extends JobImpl {
|
||||
ReplyJob onReply = new Success(_participant, _wrappedKey, _wrappedTags, _wrappedTo);
|
||||
Job onFail = new Failure(_participant, _replyPeer.getIdentity().getHash());
|
||||
MessageSelector selector = new Selector(_participant, _statusMsg.getMessageId());
|
||||
SendTunnelMessageJob j = new SendTunnelMessageJob(_context, _garlicMessage,
|
||||
SendTunnelMessageJob j = new SendTunnelMessageJob(getContext(), _garlicMessage,
|
||||
_outboundTunnel, _target.getIdentity().getHash(),
|
||||
null, null, onReply, onFail,
|
||||
selector, _timeoutMs, PRIORITY);
|
||||
_context.jobQueue().addJob(j);
|
||||
getContext().jobQueue().addJob(j);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@ -320,7 +320,7 @@ public class RequestTunnelJob extends JobImpl {
|
||||
crit.setLatencyPriority(50); // arbitrary
|
||||
crit.setReliabilityPriority(50); // arbitrary
|
||||
|
||||
List tunnelIds = _context.tunnelManager().selectOutboundTunnelIds(crit);
|
||||
List tunnelIds = getContext().tunnelManager().selectOutboundTunnelIds(crit);
|
||||
TunnelId id = null;
|
||||
if (tunnelIds.size() > 0)
|
||||
id = (TunnelId)tunnelIds.get(0);
|
||||
@ -340,12 +340,12 @@ public class RequestTunnelJob extends JobImpl {
|
||||
criteria.setMaximumRequired(1);
|
||||
criteria.setMinimumRequired(1);
|
||||
criteria.setPurpose(PeerSelectionCriteria.PURPOSE_SOURCE_ROUTE);
|
||||
List peerHashes = _context.peerManager().selectPeers(criteria);
|
||||
List peerHashes = getContext().peerManager().selectPeers(criteria);
|
||||
|
||||
RouterInfo peerInfo = null;
|
||||
for (int i = 0; (i < peerHashes.size()) && (peerInfo == null); i++) {
|
||||
Hash peerHash = (Hash)peerHashes.get(i);
|
||||
peerInfo = _context.netDb().lookupRouterInfoLocally(peerHash);
|
||||
peerInfo = getContext().netDb().lookupRouterInfoLocally(peerHash);
|
||||
if (peerInfo == null) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Selected a peer [" + peerHash + "] we don't have info on locally... trying another");
|
||||
@ -359,7 +359,7 @@ public class RequestTunnelJob extends JobImpl {
|
||||
if (peerInfo == null) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("No peers know for a reply (out of " + peerHashes.size() + ") - using ourself");
|
||||
return _context.router().getRouterInfo();
|
||||
return getContext().router().getRouterInfo();
|
||||
} else {
|
||||
return peerInfo;
|
||||
}
|
||||
@ -377,7 +377,7 @@ public class RequestTunnelJob extends JobImpl {
|
||||
criteria.setLatencyPriority(33);
|
||||
criteria.setMaximumTunnelsRequired(1);
|
||||
criteria.setMinimumTunnelsRequired(1);
|
||||
List ids = _context.tunnelManager().selectInboundTunnelIds(criteria);
|
||||
List ids = getContext().tunnelManager().selectInboundTunnelIds(criteria);
|
||||
if (ids.size() <= 0) {
|
||||
if (_log.shouldLog(Log.ERROR))
|
||||
_log.error("No inbound tunnels to receive the tunnel create messages. Argh",
|
||||
@ -388,7 +388,7 @@ public class RequestTunnelJob extends JobImpl {
|
||||
TunnelId id = null;
|
||||
for (int i = 0; i < ids.size(); i++) {
|
||||
id = (TunnelId)ids.get(i);
|
||||
gateway = _context.tunnelManager().getTunnelInfo(id);
|
||||
gateway = getContext().tunnelManager().getTunnelInfo(id);
|
||||
if (gateway != null)
|
||||
break;
|
||||
}
|
||||
@ -409,7 +409,7 @@ public class RequestTunnelJob extends JobImpl {
|
||||
* Build a TunnelCreateMessage to the participant
|
||||
*/
|
||||
private TunnelCreateMessage buildTunnelCreate(TunnelInfo participant, TunnelGateway replyGateway, RouterInfo replyPeer) {
|
||||
TunnelCreateMessage msg = new TunnelCreateMessage(_context);
|
||||
TunnelCreateMessage msg = new TunnelCreateMessage(getContext());
|
||||
msg.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null));
|
||||
msg.setConfigurationKey(participant.getConfigurationKey());
|
||||
msg.setIncludeDummyTraffic(participant.getSettings().getIncludeDummy());
|
||||
@ -431,7 +431,7 @@ public class RequestTunnelJob extends JobImpl {
|
||||
return null;
|
||||
|
||||
msg.setReplyBlock(replyBlock);
|
||||
long duration = participant.getSettings().getExpiration() - _context.clock().now();
|
||||
long duration = participant.getSettings().getExpiration() - getContext().clock().now();
|
||||
if (duration == 0) duration = 1;
|
||||
msg.setTunnelDurationSeconds(duration/1000);
|
||||
msg.setTunnelId(participant.getTunnelId());
|
||||
@ -454,12 +454,12 @@ public class RequestTunnelJob extends JobImpl {
|
||||
return null;
|
||||
}
|
||||
|
||||
SessionKey replySessionKey = _context.keyGenerator().generateSessionKey();
|
||||
SessionKey replySessionKey = getContext().keyGenerator().generateSessionKey();
|
||||
SessionTag tag = new SessionTag(true);
|
||||
Set tags = new HashSet();
|
||||
tags.add(tag);
|
||||
// make it so we'll read the session tag correctly and use the right session key
|
||||
_context.sessionKeyManager().tagsReceived(replySessionKey, tags);
|
||||
getContext().sessionKeyManager().tagsReceived(replySessionKey, tags);
|
||||
|
||||
PublicKey pk = replyPeer.getIdentity().getPublicKey();
|
||||
|
||||
@ -473,18 +473,18 @@ public class RequestTunnelJob extends JobImpl {
|
||||
instructions.setRouter(gateway.getGateway());
|
||||
instructions.setTunnelId(gateway.getTunnelId());
|
||||
|
||||
long replyId = _context.random().nextLong(I2NPMessage.MAX_ID_VALUE);
|
||||
long replyId = getContext().random().nextLong(I2NPMessage.MAX_ID_VALUE);
|
||||
|
||||
Certificate replyCert = new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null);
|
||||
|
||||
long expiration = _context.clock().now() + _timeoutMs; // _expiration;
|
||||
long expiration = getContext().clock().now() + _timeoutMs; // _expiration;
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Setting the expiration on the reply block to " + (new Date(expiration)));
|
||||
SourceRouteBlock block = new SourceRouteBlock();
|
||||
try {
|
||||
long begin = _context.clock().now();
|
||||
block.setData(_context, instructions, replyId, replyCert, expiration, pk);
|
||||
long end = _context.clock().now();
|
||||
long begin = getContext().clock().now();
|
||||
block.setData(getContext(), instructions, replyId, replyCert, expiration, pk);
|
||||
long end = getContext().clock().now();
|
||||
if ( (end - begin) > 1000) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Took too long (" + (end-begin) + "ms) to build source route block");
|
||||
@ -511,10 +511,10 @@ public class RequestTunnelJob extends JobImpl {
|
||||
*
|
||||
*/
|
||||
private DeliveryStatusMessage buildDeliveryStatusMessage() {
|
||||
DeliveryStatusMessage msg = new DeliveryStatusMessage(_context);
|
||||
msg.setArrival(new Date(_context.clock().now()));
|
||||
msg.setMessageId(_context.random().nextLong(I2NPMessage.MAX_ID_VALUE));
|
||||
Date exp = new Date(_context.clock().now() + _timeoutMs); // _expiration);
|
||||
DeliveryStatusMessage msg = new DeliveryStatusMessage(getContext());
|
||||
msg.setArrival(new Date(getContext().clock().now()));
|
||||
msg.setMessageId(getContext().random().nextLong(I2NPMessage.MAX_ID_VALUE));
|
||||
Date exp = new Date(getContext().clock().now() + _timeoutMs); // _expiration);
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Setting the expiration on the delivery status message to " + exp);
|
||||
msg.setMessageExpiration(exp);
|
||||
@ -549,9 +549,9 @@ public class RequestTunnelJob extends JobImpl {
|
||||
if (wrappedTo != null)
|
||||
wrappedTo.setData(rcptKey.getData());
|
||||
|
||||
long start = _context.clock().now();
|
||||
GarlicMessage message = GarlicMessageBuilder.buildMessage(_context, config, wrappedKey, wrappedTags);
|
||||
long end = _context.clock().now();
|
||||
long start = getContext().clock().now();
|
||||
GarlicMessage message = GarlicMessageBuilder.buildMessage(getContext(), config, wrappedKey, wrappedTags);
|
||||
long end = getContext().clock().now();
|
||||
if ( (end - start) > 1000) {
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
_log.warn("Took more than a second (" + (end-start) + "ms) to create the garlic for the tunnel");
|
||||
@ -567,7 +567,7 @@ public class RequestTunnelJob extends JobImpl {
|
||||
RouterInfo target) {
|
||||
GarlicConfig config = new GarlicConfig();
|
||||
|
||||
long garlicExpiration = _context.clock().now() + _timeoutMs;
|
||||
long garlicExpiration = getContext().clock().now() + _timeoutMs;
|
||||
PayloadGarlicConfig dataClove = buildDataClove(data, target, garlicExpiration);
|
||||
config.addClove(dataClove);
|
||||
PayloadGarlicConfig ackClove = buildAckClove(status, replyPeer, replyTunnel, garlicExpiration);
|
||||
@ -587,7 +587,7 @@ public class RequestTunnelJob extends JobImpl {
|
||||
|
||||
config.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null));
|
||||
config.setDeliveryInstructions(instructions);
|
||||
config.setId(_context.random().nextLong(I2NPMessage.MAX_ID_VALUE));
|
||||
config.setId(getContext().random().nextLong(I2NPMessage.MAX_ID_VALUE));
|
||||
config.setExpiration(garlicExpiration);
|
||||
config.setRecipientPublicKey(target.getIdentity().getPublicKey());
|
||||
config.setRequestAck(false);
|
||||
@ -616,7 +616,7 @@ public class RequestTunnelJob extends JobImpl {
|
||||
ackClove.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null));
|
||||
ackClove.setDeliveryInstructions(ackInstructions);
|
||||
ackClove.setExpiration(expiration);
|
||||
ackClove.setId(_context.random().nextLong(I2NPMessage.MAX_ID_VALUE));
|
||||
ackClove.setId(getContext().random().nextLong(I2NPMessage.MAX_ID_VALUE));
|
||||
ackClove.setPayload(ackMsg);
|
||||
ackClove.setRecipient(replyPeer);
|
||||
ackClove.setRequestAck(false);
|
||||
@ -641,7 +641,7 @@ public class RequestTunnelJob extends JobImpl {
|
||||
clove.setCertificate(new Certificate(Certificate.CERTIFICATE_TYPE_NULL, null));
|
||||
clove.setDeliveryInstructions(instructions);
|
||||
clove.setExpiration(expiration);
|
||||
clove.setId(_context.random().nextLong(I2NPMessage.MAX_ID_VALUE));
|
||||
clove.setId(getContext().random().nextLong(I2NPMessage.MAX_ID_VALUE));
|
||||
clove.setPayload(data);
|
||||
clove.setRecipientPublicKey(null);
|
||||
clove.setRequestAck(false);
|
||||
@ -686,7 +686,7 @@ public class RequestTunnelJob extends JobImpl {
|
||||
else
|
||||
_pool.addOutboundTunnel(_tunnelGateway);
|
||||
_tunnelGateway.setIsReady(true);
|
||||
_context.statManager().updateFrequency("tunnel.buildFrequency");
|
||||
getContext().statManager().updateFrequency("tunnel.buildFrequency");
|
||||
} else {
|
||||
if (_log.shouldLog(Log.DEBUG)) {
|
||||
StringBuffer buf = new StringBuffer(128);
|
||||
@ -695,7 +695,7 @@ public class RequestTunnelJob extends JobImpl {
|
||||
buf.append(", but ").append(numLeft).append(" are pending");
|
||||
_log.debug(buf.toString());
|
||||
}
|
||||
_context.jobQueue().addJob(this);
|
||||
getContext().jobQueue().addJob(this);
|
||||
}
|
||||
}
|
||||
|
||||
@ -717,14 +717,14 @@ public class RequestTunnelJob extends JobImpl {
|
||||
private long _started;
|
||||
|
||||
public Success(TunnelInfo tunnel, SessionKey wrappedKey, Set wrappedTags, PublicKey wrappedTo) {
|
||||
super(RequestTunnelJob.this._context);
|
||||
super(RequestTunnelJob.this.getContext());
|
||||
_tunnel = tunnel;
|
||||
_messages = new LinkedList();
|
||||
_successCompleted = false;
|
||||
_wrappedKey = wrappedKey;
|
||||
_wrappedTags = wrappedTags;
|
||||
_wrappedTo = wrappedTo;
|
||||
_started = _context.clock().now();
|
||||
_started = getContext().clock().now();
|
||||
}
|
||||
|
||||
public String getName() { return "Create Tunnel Status Received"; }
|
||||
@ -737,7 +737,7 @@ public class RequestTunnelJob extends JobImpl {
|
||||
_messages.clear();
|
||||
}
|
||||
|
||||
long responseTime = _context.clock().now() - _started;
|
||||
long responseTime = getContext().clock().now() - _started;
|
||||
for (Iterator iter = toProc.iterator(); iter.hasNext(); ) {
|
||||
I2NPMessage msg = (I2NPMessage)iter.next();
|
||||
process(msg, responseTime);
|
||||
@ -766,8 +766,8 @@ public class RequestTunnelJob extends JobImpl {
|
||||
_log.warn("Tunnel creation failed for tunnel " + _tunnel.getTunnelId()
|
||||
+ " at router " + _tunnel.getThisHop().toBase64()
|
||||
+ " with status " + msg.getStatus());
|
||||
_context.profileManager().tunnelRejected(_tunnel.getThisHop(), responseTime);
|
||||
Success.this._context.messageHistory().tunnelRejected(_tunnel.getThisHop(),
|
||||
getContext().profileManager().tunnelRejected(_tunnel.getThisHop(), responseTime);
|
||||
Success.this.getContext().messageHistory().tunnelRejected(_tunnel.getThisHop(),
|
||||
_tunnel.getTunnelId(),
|
||||
null, "refused");
|
||||
fail();
|
||||
@ -781,14 +781,14 @@ public class RequestTunnelJob extends JobImpl {
|
||||
if ( (_wrappedKey != null) && (_wrappedKey.getData() != null) &&
|
||||
(_wrappedTags != null) && (_wrappedTags.size() > 0) &&
|
||||
(_wrappedTo != null) ) {
|
||||
Success.this._context.sessionKeyManager().tagsDelivered(_wrappedTo, _wrappedKey, _wrappedTags);
|
||||
Success.this.getContext().sessionKeyManager().tagsDelivered(_wrappedTo, _wrappedKey, _wrappedTags);
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Delivered tags successfully to " + _tunnel.getThisHop().toBase64()
|
||||
+ "! # tags: " + _wrappedTags.size());
|
||||
}
|
||||
|
||||
_tunnel.setIsReady(true);
|
||||
_context.profileManager().tunnelJoined(_tunnel.getThisHop(), responseTime);
|
||||
getContext().profileManager().tunnelJoined(_tunnel.getThisHop(), responseTime);
|
||||
peerSuccess(_tunnel);
|
||||
_successCompleted = true;
|
||||
break;
|
||||
@ -811,10 +811,10 @@ public class RequestTunnelJob extends JobImpl {
|
||||
private Hash _replyThrough;
|
||||
private long _started;
|
||||
public Failure(TunnelInfo tunnel, Hash replyThrough) {
|
||||
super(RequestTunnelJob.this._context);
|
||||
super(RequestTunnelJob.this.getContext());
|
||||
_tunnel = tunnel;
|
||||
_replyThrough = replyThrough;
|
||||
_started = _context.clock().now();
|
||||
_started = getContext().clock().now();
|
||||
}
|
||||
|
||||
public String getName() { return "Create Tunnel Failed"; }
|
||||
@ -823,19 +823,19 @@ public class RequestTunnelJob extends JobImpl {
|
||||
if (_log.shouldLog(Log.WARN)) {
|
||||
_log.warn("Tunnel creation timed out for tunnel " + _tunnel.getTunnelId() + " at router "
|
||||
+ _tunnel.getThisHop().toBase64() + " from router "
|
||||
+ _context.routerHash().toBase64() + " after waiting "
|
||||
+ (_context.clock().now()-_started) + "ms");
|
||||
+ getContext().routerHash().toBase64() + " after waiting "
|
||||
+ (getContext().clock().now()-_started) + "ms");
|
||||
_log.warn("Added by", Failure.this.getAddedBy());
|
||||
}
|
||||
synchronized (_failedTunnelParticipants) {
|
||||
_failedTunnelParticipants.add(_tunnel.getThisHop());
|
||||
_failedTunnelParticipants.add(_replyThrough);
|
||||
}
|
||||
Failure.this._context.messageHistory().tunnelRequestTimedOut(_tunnel.getThisHop(), _tunnel.getTunnelId(), _replyThrough);
|
||||
Failure.this.getContext().messageHistory().tunnelRequestTimedOut(_tunnel.getThisHop(), _tunnel.getTunnelId(), _replyThrough);
|
||||
// perhaps not an explicit reject, but an implicit one (due to overload & dropped messages, etc)
|
||||
_context.profileManager().tunnelRejected(_tunnel.getThisHop(), _context.clock().now() - _started);
|
||||
_context.profileManager().messageFailed(_tunnel.getThisHop());
|
||||
Failure.this._context.statManager().updateFrequency("tunnel.buildFailFrequency");
|
||||
getContext().profileManager().tunnelRejected(_tunnel.getThisHop(), getContext().clock().now() - _started);
|
||||
getContext().profileManager().messageFailed(_tunnel.getThisHop());
|
||||
Failure.this.getContext().statManager().updateFrequency("tunnel.buildFailFrequency");
|
||||
fail();
|
||||
}
|
||||
}
|
||||
@ -852,7 +852,7 @@ public class RequestTunnelJob extends JobImpl {
|
||||
_ackId = ackId;
|
||||
_statusFound = false;
|
||||
_ackFound = false;
|
||||
_attemptExpiration = _context.clock().now() + _timeoutMs;
|
||||
_attemptExpiration = getContext().clock().now() + _timeoutMs;
|
||||
}
|
||||
|
||||
public boolean continueMatching() {
|
||||
|
@ -52,7 +52,7 @@ class TestTunnelJob extends JobImpl {
|
||||
}
|
||||
|
||||
// mark it as something we're testing
|
||||
info.setLastTested(_context.clock().now());
|
||||
info.setLastTested(getContext().clock().now());
|
||||
if (isOutbound(info)) {
|
||||
testOutbound(info);
|
||||
} else {
|
||||
@ -65,7 +65,7 @@ class TestTunnelJob extends JobImpl {
|
||||
_log.error("wtf, null info?", new Exception("Who checked a null tunnel info?"));
|
||||
return false;
|
||||
}
|
||||
if (_context.routerHash().equals(info.getThisHop()))
|
||||
if (getContext().routerHash().equals(info.getThisHop()))
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
@ -81,23 +81,23 @@ class TestTunnelJob extends JobImpl {
|
||||
private void testOutbound(TunnelInfo info) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Testing outbound tunnel " + info);
|
||||
DeliveryStatusMessage msg = new DeliveryStatusMessage(_context);
|
||||
msg.setArrival(new Date(_context.clock().now()));
|
||||
DeliveryStatusMessage msg = new DeliveryStatusMessage(getContext());
|
||||
msg.setArrival(new Date(getContext().clock().now()));
|
||||
msg.setMessageId(_nonce);
|
||||
Hash us = _context.routerHash();
|
||||
Hash us = getContext().routerHash();
|
||||
_secondaryId = getReplyTunnel();
|
||||
if (_secondaryId == null) {
|
||||
_context.jobQueue().addJob(new TestFailedJob());
|
||||
getContext().jobQueue().addJob(new TestFailedJob());
|
||||
return;
|
||||
}
|
||||
|
||||
TunnelInfo inboundInfo = _pool.getTunnelInfo(_secondaryId);
|
||||
inboundInfo.setLastTested(_context.clock().now());
|
||||
inboundInfo.setLastTested(getContext().clock().now());
|
||||
|
||||
TestFailedJob failureJob = new TestFailedJob();
|
||||
MessageSelector selector = new TestMessageSelector(msg.getMessageId(), info.getTunnelId().getTunnelId());
|
||||
SendTunnelMessageJob testJob = new SendTunnelMessageJob(_context, msg, info.getTunnelId(), us, _secondaryId, null, new TestSuccessfulJob(), failureJob, selector, TEST_TIMEOUT, TEST_PRIORITY);
|
||||
_context.jobQueue().addJob(testJob);
|
||||
SendTunnelMessageJob testJob = new SendTunnelMessageJob(getContext(), msg, info.getTunnelId(), us, _secondaryId, null, new TestSuccessfulJob(), failureJob, selector, TEST_TIMEOUT, TEST_PRIORITY);
|
||||
getContext().jobQueue().addJob(testJob);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -106,23 +106,23 @@ class TestTunnelJob extends JobImpl {
|
||||
private void testInbound(TunnelInfo info) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Testing inbound tunnel " + info);
|
||||
DeliveryStatusMessage msg = new DeliveryStatusMessage(_context);
|
||||
msg.setArrival(new Date(_context.clock().now()));
|
||||
DeliveryStatusMessage msg = new DeliveryStatusMessage(getContext());
|
||||
msg.setArrival(new Date(getContext().clock().now()));
|
||||
msg.setMessageId(_nonce);
|
||||
|
||||
_secondaryId = getOutboundTunnel();
|
||||
if (_secondaryId == null) {
|
||||
_context.jobQueue().addJob(new TestFailedJob());
|
||||
getContext().jobQueue().addJob(new TestFailedJob());
|
||||
return;
|
||||
}
|
||||
|
||||
TunnelInfo outboundInfo = _pool.getTunnelInfo(_secondaryId);
|
||||
outboundInfo.setLastTested(_context.clock().now());
|
||||
outboundInfo.setLastTested(getContext().clock().now());
|
||||
|
||||
TestFailedJob failureJob = new TestFailedJob();
|
||||
MessageSelector selector = new TestMessageSelector(msg.getMessageId(), info.getTunnelId().getTunnelId());
|
||||
SendTunnelMessageJob j = new SendTunnelMessageJob(_context, msg, _secondaryId, info.getThisHop(), info.getTunnelId(), null, new TestSuccessfulJob(), failureJob, selector, TEST_TIMEOUT, TEST_PRIORITY);
|
||||
_context.jobQueue().addJob(j);
|
||||
SendTunnelMessageJob j = new SendTunnelMessageJob(getContext(), msg, _secondaryId, info.getThisHop(), info.getTunnelId(), null, new TestSuccessfulJob(), failureJob, selector, TEST_TIMEOUT, TEST_PRIORITY);
|
||||
getContext().jobQueue().addJob(j);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -137,7 +137,7 @@ class TestTunnelJob extends JobImpl {
|
||||
crit.setAnonymityPriority(50);
|
||||
crit.setLatencyPriority(50);
|
||||
crit.setReliabilityPriority(50);
|
||||
List tunnelIds = _context.tunnelManager().selectInboundTunnelIds(crit);
|
||||
List tunnelIds = getContext().tunnelManager().selectInboundTunnelIds(crit);
|
||||
|
||||
for (int i = 0; i < tunnelIds.size(); i++) {
|
||||
TunnelId id = (TunnelId)tunnelIds.get(i);
|
||||
@ -165,7 +165,7 @@ class TestTunnelJob extends JobImpl {
|
||||
crit.setAnonymityPriority(50);
|
||||
crit.setLatencyPriority(50);
|
||||
crit.setReliabilityPriority(50);
|
||||
List tunnelIds = _context.tunnelManager().selectOutboundTunnelIds(crit);
|
||||
List tunnelIds = getContext().tunnelManager().selectOutboundTunnelIds(crit);
|
||||
|
||||
for (int i = 0; i < tunnelIds.size(); i++) {
|
||||
TunnelId id = (TunnelId)tunnelIds.get(i);
|
||||
@ -183,7 +183,7 @@ class TestTunnelJob extends JobImpl {
|
||||
|
||||
private class TestFailedJob extends JobImpl {
|
||||
public TestFailedJob() {
|
||||
super(TestTunnelJob.this._context);
|
||||
super(TestTunnelJob.this.getContext());
|
||||
}
|
||||
|
||||
public String getName() { return "Tunnel Test Failed"; }
|
||||
@ -206,36 +206,36 @@ class TestTunnelJob extends JobImpl {
|
||||
private class TestSuccessfulJob extends JobImpl implements ReplyJob {
|
||||
private DeliveryStatusMessage _msg;
|
||||
public TestSuccessfulJob() {
|
||||
super(TestTunnelJob.this._context);
|
||||
super(TestTunnelJob.this.getContext());
|
||||
_msg = null;
|
||||
}
|
||||
|
||||
public String getName() { return "Tunnel Test Successful"; }
|
||||
public void runJob() {
|
||||
long time = (_context.clock().now() - _msg.getArrival().getTime());
|
||||
long time = (getContext().clock().now() - _msg.getArrival().getTime());
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Test of tunnel " + _primaryId+ " successfull after "
|
||||
+ time + "ms waiting for " + _nonce);
|
||||
TunnelInfo info = _pool.getTunnelInfo(_primaryId);
|
||||
if (info != null) {
|
||||
TestTunnelJob.this._context.messageHistory().tunnelValid(info, time);
|
||||
TestTunnelJob.this.getContext().messageHistory().tunnelValid(info, time);
|
||||
updateProfiles(info, time);
|
||||
}
|
||||
|
||||
info = _pool.getTunnelInfo(_secondaryId);
|
||||
if (info != null) {
|
||||
TestTunnelJob.this._context.messageHistory().tunnelValid(info, time);
|
||||
TestTunnelJob.this.getContext().messageHistory().tunnelValid(info, time);
|
||||
updateProfiles(info, time);
|
||||
}
|
||||
_context.statManager().addRateData("tunnel.testSuccessTime", time, time);
|
||||
getContext().statManager().addRateData("tunnel.testSuccessTime", time, time);
|
||||
}
|
||||
|
||||
private void updateProfiles(TunnelInfo info, long time) {
|
||||
TunnelInfo cur = info;
|
||||
while (cur != null) {
|
||||
Hash peer = cur.getThisHop();
|
||||
if ( (peer != null) && (!_context.routerHash().equals(peer)) )
|
||||
_context.profileManager().tunnelTestSucceeded(peer, time);
|
||||
if ( (peer != null) && (!getContext().routerHash().equals(peer)) )
|
||||
getContext().profileManager().tunnelTestSucceeded(peer, time);
|
||||
cur = cur.getNextHopInfo();
|
||||
}
|
||||
}
|
||||
@ -254,7 +254,7 @@ class TestTunnelJob extends JobImpl {
|
||||
_id = id;
|
||||
_tunnelId = tunnelId;
|
||||
_found = false;
|
||||
_expiration = _context.clock().now() + TEST_TIMEOUT;
|
||||
_expiration = getContext().clock().now() + TEST_TIMEOUT;
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("the expiration while testing tunnel " + tunnelId
|
||||
+ " waiting for nonce " + id + ": " + new Date(_expiration));
|
||||
@ -270,7 +270,7 @@ class TestTunnelJob extends JobImpl {
|
||||
return !_found;
|
||||
}
|
||||
public long getExpiration() {
|
||||
if (_expiration < _context.clock().now()) {
|
||||
if (_expiration < getContext().clock().now()) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("EXPIRED while looking for nonce " + _id + " for tunnel " + _tunnelId);
|
||||
}
|
||||
@ -282,7 +282,7 @@ class TestTunnelJob extends JobImpl {
|
||||
if (msg.getMessageId() == _id) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Found successful test of tunnel " + _tunnelId + " after "
|
||||
+ (_context.clock().now() - msg.getArrival().getTime())
|
||||
+ (getContext().clock().now() - msg.getArrival().getTime())
|
||||
+ "ms waiting for " + _id);
|
||||
_found = true;
|
||||
return true;
|
||||
@ -301,7 +301,7 @@ class TestTunnelJob extends JobImpl {
|
||||
buf.append(super.toString());
|
||||
buf.append(": TestMessageSelector: tunnel ").append(_tunnelId);
|
||||
buf.append(" looking for ").append(_id).append(" expiring in ");
|
||||
buf.append(_expiration - _context.clock().now());
|
||||
buf.append(_expiration - getContext().clock().now());
|
||||
buf.append("ms");
|
||||
return buf.toString();
|
||||
}
|
||||
|
@ -31,7 +31,7 @@ class TunnelPoolExpirationJob extends JobImpl {
|
||||
super(ctx);
|
||||
_log = ctx.logManager().getLog(TunnelPoolExpirationJob.class);
|
||||
_pool = pool;
|
||||
getTiming().setStartAfter(_context.clock().now() + EXPIRE_POOL_DELAY);
|
||||
getTiming().setStartAfter(getContext().clock().now() + EXPIRE_POOL_DELAY);
|
||||
}
|
||||
public String getName() { return "Expire Pooled Tunnels"; }
|
||||
public void runJob() {
|
||||
@ -50,7 +50,7 @@ class TunnelPoolExpirationJob extends JobImpl {
|
||||
*
|
||||
*/
|
||||
public void expireFree() {
|
||||
long now = _context.clock().now();
|
||||
long now = getContext().clock().now();
|
||||
long expire = now - EXPIRE_BUFFER - Router.CLOCK_FUDGE_FACTOR;
|
||||
|
||||
for (Iterator iter = _pool.getFreeTunnels().iterator(); iter.hasNext(); ) {
|
||||
@ -78,7 +78,7 @@ class TunnelPoolExpirationJob extends JobImpl {
|
||||
*
|
||||
*/
|
||||
public void expireOutbound() {
|
||||
long now = _context.clock().now();
|
||||
long now = getContext().clock().now();
|
||||
long expire = now - EXPIRE_BUFFER - Router.CLOCK_FUDGE_FACTOR;
|
||||
|
||||
for (Iterator iter = _pool.getOutboundTunnels().iterator(); iter.hasNext(); ) {
|
||||
@ -105,7 +105,7 @@ class TunnelPoolExpirationJob extends JobImpl {
|
||||
*
|
||||
*/
|
||||
public void expireParticipants() {
|
||||
long now = _context.clock().now();
|
||||
long now = getContext().clock().now();
|
||||
long expire = now - EXPIRE_BUFFER - Router.CLOCK_FUDGE_FACTOR;
|
||||
|
||||
for (Iterator iter = _pool.getParticipatingTunnels().iterator(); iter.hasNext(); ) {
|
||||
@ -127,7 +127,7 @@ class TunnelPoolExpirationJob extends JobImpl {
|
||||
*
|
||||
*/
|
||||
public void expirePending() {
|
||||
long now = _context.clock().now();
|
||||
long now = getContext().clock().now();
|
||||
long expire = now - EXPIRE_BUFFER - Router.CLOCK_FUDGE_FACTOR;
|
||||
|
||||
for (Iterator iter = _pool.getPendingTunnels().iterator(); iter.hasNext(); ) {
|
||||
|
@ -110,7 +110,7 @@ class TunnelPoolManagerJob extends JobImpl {
|
||||
Set freeTunnels = _pool.getFreeTunnels();
|
||||
int free = 0;
|
||||
int minLength = _pool.getPoolSettings().getDepthInbound();
|
||||
long mustExpireAfter = _context.clock().now() + EXPIRE_FUDGE_PERIOD;
|
||||
long mustExpireAfter = getContext().clock().now() + EXPIRE_FUDGE_PERIOD;
|
||||
for (Iterator iter = freeTunnels.iterator(); iter.hasNext(); ) {
|
||||
TunnelId id = (TunnelId)iter.next();
|
||||
TunnelInfo info = _pool.getFreeTunnel(id);
|
||||
@ -148,7 +148,7 @@ class TunnelPoolManagerJob extends JobImpl {
|
||||
private int getOutboundTunnelCount() {
|
||||
Set outboundTunnels = _pool.getOutboundTunnels();
|
||||
int outbound = 0;
|
||||
long mustExpireAfter = _context.clock().now() + EXPIRE_FUDGE_PERIOD;
|
||||
long mustExpireAfter = getContext().clock().now() + EXPIRE_FUDGE_PERIOD;
|
||||
for (Iterator iter = outboundTunnels.iterator(); iter.hasNext(); ) {
|
||||
TunnelId id = (TunnelId)iter.next();
|
||||
TunnelInfo info = _pool.getOutboundTunnel(id);
|
||||
@ -166,12 +166,12 @@ class TunnelPoolManagerJob extends JobImpl {
|
||||
private void requestInboundTunnels(int numTunnelsToRequest) {
|
||||
_log.info("Requesting " + numTunnelsToRequest + " inbound tunnels");
|
||||
for (int i = 0; i < numTunnelsToRequest; i++)
|
||||
_context.jobQueue().addJob(new RequestInboundTunnelJob(_context, _pool, false));
|
||||
getContext().jobQueue().addJob(new RequestInboundTunnelJob(getContext(), _pool, false));
|
||||
}
|
||||
|
||||
private void requestOutboundTunnels(int numTunnelsToRequest) {
|
||||
_log.info("Requesting " + numTunnelsToRequest + " outbound tunnels");
|
||||
for (int i = 0; i < numTunnelsToRequest; i++)
|
||||
_context.jobQueue().addJob(new RequestOutboundTunnelJob(_context, _pool, false));
|
||||
getContext().jobQueue().addJob(new RequestOutboundTunnelJob(getContext(), _pool, false));
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user